Move ADM initialization into WebRtcVoiceEngine

Bug: webrtc:4690
Change-Id: I3b8950fdb13835964c5bf41162731eff5048bf1a
Reviewed-on: https://webrtc-review.googlesource.com/23820
Commit-Queue: Fredrik Solenberg <solenberg@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20823}
This commit is contained in:
Fredrik Solenberg 2017-11-21 20:33:05 +01:00 committed by Commit Bot
parent 37e489c985
commit d319534143
20 changed files with 114 additions and 280 deletions

View File

@ -75,7 +75,6 @@ struct ConfigHelper {
audio_mixer_(new rtc::RefCountedObject<MockAudioMixer>()) { audio_mixer_(new rtc::RefCountedObject<MockAudioMixer>()) {
using testing::Invoke; using testing::Invoke;
EXPECT_CALL(voice_engine_, audio_device_module());
EXPECT_CALL(voice_engine_, audio_transport()); EXPECT_CALL(voice_engine_, audio_transport());
AudioState::Config config; AudioState::Config config;

View File

@ -148,7 +148,6 @@ struct ConfigHelper {
audio_encoder_(nullptr) { audio_encoder_(nullptr) {
using testing::Invoke; using testing::Invoke;
EXPECT_CALL(voice_engine_, audio_device_module());
EXPECT_CALL(voice_engine_, audio_transport()); EXPECT_CALL(voice_engine_, audio_transport());
AudioState::Config config; AudioState::Config config;

View File

@ -29,13 +29,6 @@ AudioState::AudioState(const AudioState::Config& config)
config_.audio_mixer) { config_.audio_mixer) {
process_thread_checker_.DetachFromThread(); process_thread_checker_.DetachFromThread();
RTC_DCHECK(config_.audio_mixer); RTC_DCHECK(config_.audio_mixer);
auto* const device = voe_base_->audio_device_module();
RTC_DCHECK(device);
// This is needed for the Chrome implementation of RegisterAudioCallback.
device->RegisterAudioCallback(nullptr);
device->RegisterAudioCallback(&audio_transport_proxy_);
} }
AudioState::~AudioState() { AudioState::~AudioState() {

View File

@ -26,22 +26,9 @@ const int kBytesPerSample = 2;
struct ConfigHelper { struct ConfigHelper {
ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) { ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) {
EXPECT_CALL(mock_voice_engine, audio_device_module())
.Times(testing::AtLeast(1));
EXPECT_CALL(mock_voice_engine, audio_transport()) EXPECT_CALL(mock_voice_engine, audio_transport())
.WillRepeatedly(testing::Return(&audio_transport)); .WillRepeatedly(testing::Return(&audio_transport));
auto device = static_cast<MockAudioDeviceModule*>(
voice_engine().audio_device_module());
// Populate the audio transport proxy pointer to the most recent
// transport connected to the Audio Device.
ON_CALL(*device, RegisterAudioCallback(testing::_))
.WillByDefault(testing::Invoke([this](AudioTransport* transport) {
registered_audio_transport = transport;
return 0;
}));
audio_state_config.voice_engine = &mock_voice_engine; audio_state_config.voice_engine = &mock_voice_engine;
audio_state_config.audio_mixer = audio_mixer; audio_state_config.audio_mixer = audio_mixer;
audio_state_config.audio_processing = audio_state_config.audio_processing =
@ -51,14 +38,12 @@ struct ConfigHelper {
MockVoiceEngine& voice_engine() { return mock_voice_engine; } MockVoiceEngine& voice_engine() { return mock_voice_engine; }
rtc::scoped_refptr<AudioMixer> mixer() { return audio_mixer; } rtc::scoped_refptr<AudioMixer> mixer() { return audio_mixer; }
MockAudioTransport& original_audio_transport() { return audio_transport; } MockAudioTransport& original_audio_transport() { return audio_transport; }
AudioTransport* audio_transport_proxy() { return registered_audio_transport; }
private: private:
testing::StrictMock<MockVoiceEngine> mock_voice_engine; testing::StrictMock<MockVoiceEngine> mock_voice_engine;
AudioState::Config audio_state_config; AudioState::Config audio_state_config;
rtc::scoped_refptr<AudioMixer> audio_mixer; rtc::scoped_refptr<AudioMixer> audio_mixer;
MockAudioTransport audio_transport; MockAudioTransport audio_transport;
AudioTransport* registered_audio_transport = nullptr;
}; };
class FakeAudioSource : public AudioMixer::Source { class FakeAudioSource : public AudioMixer::Source {
@ -112,7 +97,7 @@ TEST(AudioStateAudioPathTest, RecordedAudioArrivesAtOriginalTransport) {
kNumberOfChannels, kSampleRate, 0, 0, 0, false, kNumberOfChannels, kSampleRate, 0, 0, 0, false,
testing::Ref(new_mic_level))); testing::Ref(new_mic_level)));
helper.audio_transport_proxy()->RecordedDataIsAvailable( audio_state->audio_transport()->RecordedDataIsAvailable(
nullptr, kSampleRate / 100, kBytesPerSample, kNumberOfChannels, nullptr, kSampleRate / 100, kBytesPerSample, kNumberOfChannels,
kSampleRate, 0, 0, 0, false, new_mic_level); kSampleRate, 0, 0, 0, false, new_mic_level);
} }
@ -141,7 +126,7 @@ TEST(AudioStateAudioPathTest,
size_t n_samples_out; size_t n_samples_out;
int64_t elapsed_time_ms; int64_t elapsed_time_ms;
int64_t ntp_time_ms; int64_t ntp_time_ms;
helper.audio_transport_proxy()->NeedMorePlayData( audio_state->audio_transport()->NeedMorePlayData(
kSampleRate / 100, kBytesPerSample, kNumberOfChannels, kSampleRate, kSampleRate / 100, kBytesPerSample, kNumberOfChannels, kSampleRate,
audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms); audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms);
} }

View File

@ -176,6 +176,7 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec,
fake_audio_device = rtc::MakeUnique<FakeAudioDevice>( fake_audio_device = rtc::MakeUnique<FakeAudioDevice>(
FakeAudioDevice::CreatePulsedNoiseCapturer(256, 48000), FakeAudioDevice::CreatePulsedNoiseCapturer(256, 48000),
FakeAudioDevice::CreateDiscardRenderer(48000), audio_rtp_speed); FakeAudioDevice::CreateDiscardRenderer(48000), audio_rtp_speed);
EXPECT_EQ(0, fake_audio_device->Init());
EXPECT_EQ(0, voe_base->Init(fake_audio_device.get(), audio_processing.get(), EXPECT_EQ(0, voe_base->Init(fake_audio_device.get(), audio_processing.get(),
decoder_factory_)); decoder_factory_));
VoEBase::ChannelConfig config; VoEBase::ChannelConfig config;
@ -189,9 +190,11 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec,
send_audio_state_config.audio_processing = audio_processing; send_audio_state_config.audio_processing = audio_processing;
Call::Config sender_config(event_log_.get()); Call::Config sender_config(event_log_.get());
sender_config.audio_state = AudioState::Create(send_audio_state_config); auto audio_state = AudioState::Create(send_audio_state_config);
fake_audio_device->RegisterAudioCallback(audio_state->audio_transport());
sender_config.audio_state = audio_state;
Call::Config receiver_config(event_log_.get()); Call::Config receiver_config(event_log_.get());
receiver_config.audio_state = sender_config.audio_state; receiver_config.audio_state = audio_state;
CreateCalls(sender_config, receiver_config); CreateCalls(sender_config, receiver_config);
std::copy_if(std::begin(payload_type_map_), std::end(payload_type_map_), std::copy_if(std::begin(payload_type_map_), std::end(payload_type_map_),

View File

@ -41,7 +41,6 @@ struct CallHelper {
audio_state_config.voice_engine = &voice_engine_; audio_state_config.voice_engine = &voice_engine_;
audio_state_config.audio_mixer = webrtc::AudioMixerImpl::Create(); audio_state_config.audio_mixer = webrtc::AudioMixerImpl::Create();
audio_state_config.audio_processing = webrtc::AudioProcessing::Create(); audio_state_config.audio_processing = webrtc::AudioProcessing::Create();
EXPECT_CALL(voice_engine_, audio_device_module());
EXPECT_CALL(voice_engine_, audio_transport()); EXPECT_CALL(voice_engine_, audio_transport());
webrtc::Call::Config config(&event_log_); webrtc::Call::Config config(&event_log_);
config.audio_state = webrtc::AudioState::Create(audio_state_config); config.audio_state = webrtc::AudioState::Create(audio_state_config);

View File

@ -32,67 +32,17 @@ namespace adm_helpers {
#define AUDIO_DEVICE_ID (0u) #define AUDIO_DEVICE_ID (0u)
#endif // defined(WEBRTC_WIN) #endif // defined(WEBRTC_WIN)
void SetRecordingDevice(AudioDeviceModule* adm) { void Init(AudioDeviceModule* adm) {
RTC_DCHECK(adm); RTC_DCHECK(adm);
// Save recording status and stop recording. RTC_CHECK_EQ(0, adm->Init()) << "Failed to initialize the ADM.";
const bool was_recording = adm->Recording();
if (was_recording && adm->StopRecording() != 0) {
RTC_LOG(LS_ERROR) << "Unable to stop recording.";
return;
}
// Set device to default. // Playout device.
if (adm->SetRecordingDevice(AUDIO_DEVICE_ID) != 0) { {
RTC_LOG(LS_ERROR) << "Unable to set recording device.";
return;
}
// Init microphone, so user can do volume settings etc.
if (adm->InitMicrophone() != 0) {
RTC_LOG(LS_ERROR) << "Unable to access microphone.";
}
// Set number of channels
bool available = false;
if (adm->StereoRecordingIsAvailable(&available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to query stereo recording.";
}
if (adm->SetStereoRecording(available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to set stereo recording mode.";
}
// Restore recording if it was enabled already when calling this function.
if (was_recording) {
if (adm->InitRecording() != 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize recording.";
return;
}
if (adm->StartRecording() != 0) {
RTC_LOG(LS_ERROR) << "Failed to start recording.";
return;
}
}
RTC_LOG(LS_INFO) << "Set recording device.";
}
void SetPlayoutDevice(AudioDeviceModule* adm) {
RTC_DCHECK(adm);
// Save playing status and stop playout.
const bool was_playing = adm->Playing();
if (was_playing && adm->StopPlayout() != 0) {
RTC_LOG(LS_ERROR) << "Unable to stop playout.";
}
// Set device.
if (adm->SetPlayoutDevice(AUDIO_DEVICE_ID) != 0) { if (adm->SetPlayoutDevice(AUDIO_DEVICE_ID) != 0) {
RTC_LOG(LS_ERROR) << "Unable to set playout device."; RTC_LOG(LS_ERROR) << "Unable to set playout device.";
return; return;
} }
// Init speaker, so user can do volume settings etc.
if (adm->InitSpeaker() != 0) { if (adm->InitSpeaker() != 0) {
RTC_LOG(LS_ERROR) << "Unable to access speaker."; RTC_LOG(LS_ERROR) << "Unable to access speaker.";
} }
@ -105,21 +55,27 @@ void SetPlayoutDevice(AudioDeviceModule* adm) {
if (adm->SetStereoPlayout(available) != 0) { if (adm->SetStereoPlayout(available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to set stereo playout mode."; RTC_LOG(LS_ERROR) << "Failed to set stereo playout mode.";
} }
}
// Restore recording if it was enabled already when calling this function. // Recording device.
if (was_playing) { {
if (adm->InitPlayout() != 0) { if (adm->SetRecordingDevice(AUDIO_DEVICE_ID) != 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize playout."; RTC_LOG(LS_ERROR) << "Unable to set recording device.";
return;
}
if (adm->StartPlayout() != 0) {
RTC_LOG(LS_ERROR) << "Failed to start playout.";
return; return;
} }
if (adm->InitMicrophone() != 0) {
RTC_LOG(LS_ERROR) << "Unable to access microphone.";
} }
RTC_LOG(LS_INFO) << "Set playout device."; // Set number of channels
bool available = false;
if (adm->StereoRecordingIsAvailable(&available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to query stereo recording.";
}
if (adm->SetStereoRecording(available) != 0) {
RTC_LOG(LS_ERROR) << "Failed to set stereo recording mode.";
}
}
} }
} // namespace adm_helpers } // namespace adm_helpers
} // namespace webrtc } // namespace webrtc

View File

@ -19,8 +19,7 @@ class AudioDeviceModule;
namespace adm_helpers { namespace adm_helpers {
void SetRecordingDevice(AudioDeviceModule* adm); void Init(AudioDeviceModule* adm);
void SetPlayoutDevice(AudioDeviceModule* adm);
} // namespace adm_helpers } // namespace adm_helpers
} // namespace webrtc } // namespace webrtc

View File

@ -69,9 +69,6 @@ class FakeWebRtcVoiceEngine : public webrtc::VoEBase {
inited_ = false; inited_ = false;
return 0; return 0;
} }
webrtc::AudioDeviceModule* audio_device_module() override {
return nullptr;
}
webrtc::voe::TransmitMixer* transmit_mixer() override { webrtc::voe::TransmitMixer* transmit_mixer() override {
return transmit_mixer_; return transmit_mixer_;
} }

View File

@ -28,6 +28,7 @@
#include "media/engine/payload_type_mapper.h" #include "media/engine/payload_type_mapper.h"
#include "media/engine/webrtcmediaengine.h" #include "media/engine/webrtcmediaengine.h"
#include "media/engine/webrtcvoe.h" #include "media/engine/webrtcvoe.h"
#include "modules/audio_device/audio_device_impl.h"
#include "modules/audio_mixer/audio_mixer_impl.h" #include "modules/audio_mixer/audio_mixer_impl.h"
#include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h"
#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing.h"
@ -251,6 +252,12 @@ WebRtcVoiceEngine::~WebRtcVoiceEngine() {
if (initialized_) { if (initialized_) {
StopAecDump(); StopAecDump();
voe_wrapper_->base()->Terminate(); voe_wrapper_->base()->Terminate();
// Stop AudioDevice.
adm()->StopPlayout();
adm()->StopRecording();
adm()->RegisterAudioCallback(nullptr);
adm()->Terminate();
} }
} }
@ -283,15 +290,17 @@ void WebRtcVoiceEngine::Init() {
channel_config_.enable_voice_pacing = true; channel_config_.enable_voice_pacing = true;
RTC_CHECK_EQ(0, #if defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
voe_wrapper_->base()->Init(adm_.get(), apm(), decoder_factory_)); // No ADM supplied? Create a default one.
// No ADM supplied? Get the default one from VoE.
if (!adm_) { if (!adm_) {
adm_ = voe_wrapper_->base()->audio_device_module(); adm_ = webrtc::AudioDeviceModule::Create(
webrtc::AudioDeviceModule::kPlatformDefaultAudio);
} }
RTC_DCHECK(adm_); #endif // WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE
RTC_CHECK(adm());
webrtc::adm_helpers::Init(adm());
RTC_CHECK_EQ(0, voe_wrapper_->base()->Init(adm(), apm(), decoder_factory_));
transmit_mixer_ = voe_wrapper_->base()->transmit_mixer(); transmit_mixer_ = voe_wrapper_->base()->transmit_mixer();
RTC_DCHECK(transmit_mixer_); RTC_DCHECK(transmit_mixer_);
@ -324,15 +333,16 @@ void WebRtcVoiceEngine::Init() {
// Set default audio devices. // Set default audio devices.
#if !defined(WEBRTC_IOS) #if !defined(WEBRTC_IOS)
webrtc::adm_helpers::SetRecordingDevice(adm_);
apm()->Initialize(); apm()->Initialize();
webrtc::adm_helpers::SetPlayoutDevice(adm_);
#endif // !WEBRTC_IOS #endif // !WEBRTC_IOS
// May be null for VoE injected for testing. // May be null for VoE injected for testing.
if (voe()->engine()) { if (voe()->engine()) {
audio_state_ = webrtc::AudioState::Create( audio_state_ = webrtc::AudioState::Create(
MakeAudioStateConfig(voe(), audio_mixer_, apm_)); MakeAudioStateConfig(voe(), audio_mixer_, apm_));
// Connect the ADM to our audio path.
adm()->RegisterAudioCallback(audio_state_->audio_transport());
} }
initialized_ = true; initialized_ = true;
@ -708,7 +718,7 @@ int WebRtcVoiceEngine::CreateVoEChannel() {
webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() { webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(adm_); RTC_DCHECK(adm_);
return adm_; return adm_.get();
} }
webrtc::AudioProcessing* WebRtcVoiceEngine::apm() const { webrtc::AudioProcessing* WebRtcVoiceEngine::apm() const {

View File

@ -85,23 +85,10 @@ class MockTransmitMixer : public webrtc::voe::TransmitMixer {
void AdmSetupExpectations(webrtc::test::MockAudioDeviceModule* adm) { void AdmSetupExpectations(webrtc::test::MockAudioDeviceModule* adm) {
RTC_DCHECK(adm); RTC_DCHECK(adm);
// Setup.
EXPECT_CALL(*adm, AddRef()).Times(1); EXPECT_CALL(*adm, AddRef()).Times(1);
EXPECT_CALL(*adm, Release()) EXPECT_CALL(*adm, Init()).WillOnce(Return(0));
.WillOnce(Return(rtc::RefCountReleaseStatus::kDroppedLastRef));
#if !defined(WEBRTC_IOS)
EXPECT_CALL(*adm, Recording()).WillOnce(Return(false));
#if defined(WEBRTC_WIN)
EXPECT_CALL(*adm, SetRecordingDevice(
testing::Matcher<webrtc::AudioDeviceModule::WindowsDeviceType>(
webrtc::AudioDeviceModule::kDefaultCommunicationDevice)))
.WillOnce(Return(0));
#else
EXPECT_CALL(*adm, SetRecordingDevice(0)).WillOnce(Return(0));
#endif // #if defined(WEBRTC_WIN)
EXPECT_CALL(*adm, InitMicrophone()).WillOnce(Return(0));
EXPECT_CALL(*adm, StereoRecordingIsAvailable(testing::_)).WillOnce(Return(0));
EXPECT_CALL(*adm, SetStereoRecording(false)).WillOnce(Return(0));
EXPECT_CALL(*adm, Playing()).WillOnce(Return(false));
#if defined(WEBRTC_WIN) #if defined(WEBRTC_WIN)
EXPECT_CALL(*adm, SetPlayoutDevice( EXPECT_CALL(*adm, SetPlayoutDevice(
testing::Matcher<webrtc::AudioDeviceModule::WindowsDeviceType>( testing::Matcher<webrtc::AudioDeviceModule::WindowsDeviceType>(
@ -113,11 +100,29 @@ void AdmSetupExpectations(webrtc::test::MockAudioDeviceModule* adm) {
EXPECT_CALL(*adm, InitSpeaker()).WillOnce(Return(0)); EXPECT_CALL(*adm, InitSpeaker()).WillOnce(Return(0));
EXPECT_CALL(*adm, StereoPlayoutIsAvailable(testing::_)).WillOnce(Return(0)); EXPECT_CALL(*adm, StereoPlayoutIsAvailable(testing::_)).WillOnce(Return(0));
EXPECT_CALL(*adm, SetStereoPlayout(false)).WillOnce(Return(0)); EXPECT_CALL(*adm, SetStereoPlayout(false)).WillOnce(Return(0));
#endif // #if !defined(WEBRTC_IOS) #if defined(WEBRTC_WIN)
EXPECT_CALL(*adm, SetRecordingDevice(
testing::Matcher<webrtc::AudioDeviceModule::WindowsDeviceType>(
webrtc::AudioDeviceModule::kDefaultCommunicationDevice)))
.WillOnce(Return(0));
#else
EXPECT_CALL(*adm, SetRecordingDevice(0)).WillOnce(Return(0));
#endif // #if defined(WEBRTC_WIN)
EXPECT_CALL(*adm, InitMicrophone()).WillOnce(Return(0));
EXPECT_CALL(*adm, StereoRecordingIsAvailable(testing::_)).WillOnce(Return(0));
EXPECT_CALL(*adm, SetStereoRecording(false)).WillOnce(Return(0));
EXPECT_CALL(*adm, BuiltInAECIsAvailable()).WillOnce(Return(false)); EXPECT_CALL(*adm, BuiltInAECIsAvailable()).WillOnce(Return(false));
EXPECT_CALL(*adm, BuiltInAGCIsAvailable()).WillOnce(Return(false)); EXPECT_CALL(*adm, BuiltInAGCIsAvailable()).WillOnce(Return(false));
EXPECT_CALL(*adm, BuiltInNSIsAvailable()).WillOnce(Return(false)); EXPECT_CALL(*adm, BuiltInNSIsAvailable()).WillOnce(Return(false));
EXPECT_CALL(*adm, SetAGC(true)).WillOnce(Return(0)); EXPECT_CALL(*adm, SetAGC(true)).WillOnce(Return(0));
// Teardown.
EXPECT_CALL(*adm, StopPlayout()).WillOnce(Return(0));
EXPECT_CALL(*adm, StopRecording()).WillOnce(Return(0));
EXPECT_CALL(*adm, RegisterAudioCallback(nullptr)).WillOnce(Return(0));
EXPECT_CALL(*adm, Terminate()).WillOnce(Return(0));
EXPECT_CALL(*adm, Release())
.WillOnce(Return(rtc::RefCountReleaseStatus::kDroppedLastRef));
} }
} // namespace } // namespace

View File

@ -75,6 +75,8 @@ void CallTest::RunBaseTest(BaseTest* test) {
audio_state_config.audio_mixer = AudioMixerImpl::Create(); audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing = apm_send_; audio_state_config.audio_processing = apm_send_;
send_config.audio_state = AudioState::Create(audio_state_config); send_config.audio_state = AudioState::Create(audio_state_config);
fake_send_audio_device_->RegisterAudioCallback(
send_config.audio_state->audio_transport());
} }
CreateSenderCall(send_config); CreateSenderCall(send_config);
if (sender_call_transport_controller_ != nullptr) { if (sender_call_transport_controller_ != nullptr) {
@ -89,7 +91,8 @@ void CallTest::RunBaseTest(BaseTest* test) {
audio_state_config.audio_mixer = AudioMixerImpl::Create(); audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing = apm_recv_; audio_state_config.audio_processing = apm_recv_;
recv_config.audio_state = AudioState::Create(audio_state_config); recv_config.audio_state = AudioState::Create(audio_state_config);
} fake_recv_audio_device_->RegisterAudioCallback(
recv_config.audio_state->audio_transport()); }
CreateReceiverCall(recv_config); CreateReceiverCall(recv_config);
} }
test->OnCallsCreated(sender_call_.get(), receiver_call_.get()); test->OnCallsCreated(sender_call_.get(), receiver_call_.get());
@ -427,6 +430,7 @@ void CallTest::SetFakeVideoCaptureRotation(VideoRotation rotation) {
void CallTest::CreateVoiceEngines() { void CallTest::CreateVoiceEngines() {
voe_send_.voice_engine = VoiceEngine::Create(); voe_send_.voice_engine = VoiceEngine::Create();
voe_send_.base = VoEBase::GetInterface(voe_send_.voice_engine); voe_send_.base = VoEBase::GetInterface(voe_send_.voice_engine);
EXPECT_EQ(0, fake_send_audio_device_->Init());
EXPECT_EQ(0, voe_send_.base->Init(fake_send_audio_device_.get(), EXPECT_EQ(0, voe_send_.base->Init(fake_send_audio_device_.get(),
apm_send_.get(), decoder_factory_)); apm_send_.get(), decoder_factory_));
VoEBase::ChannelConfig config; VoEBase::ChannelConfig config;
@ -436,6 +440,7 @@ void CallTest::CreateVoiceEngines() {
voe_recv_.voice_engine = VoiceEngine::Create(); voe_recv_.voice_engine = VoiceEngine::Create();
voe_recv_.base = VoEBase::GetInterface(voe_recv_.voice_engine); voe_recv_.base = VoEBase::GetInterface(voe_recv_.voice_engine);
EXPECT_EQ(0, fake_recv_audio_device_->Init());
EXPECT_EQ(0, voe_recv_.base->Init(fake_recv_audio_device_.get(), EXPECT_EQ(0, voe_recv_.base->Init(fake_recv_audio_device_.get(),
apm_recv_.get(), decoder_factory_)); apm_recv_.get(), decoder_factory_));
voe_recv_.channel_id = voe_recv_.base->CreateChannel(); voe_recv_.channel_id = voe_recv_.base->CreateChannel();

View File

@ -303,13 +303,9 @@ int32_t FakeAudioDevice::StopRecording() {
} }
int32_t FakeAudioDevice::Init() { int32_t FakeAudioDevice::Init() {
// TODO(solenberg): Temporarily allow multiple init calls.
if (!inited_) {
RTC_CHECK(tick_->StartTimer(true, kFrameLengthMs / speed_)); RTC_CHECK(tick_->StartTimer(true, kFrameLengthMs / speed_));
thread_.Start(); thread_.Start();
thread_.SetPriority(rtc::kHighPriority); thread_.SetPriority(rtc::kHighPriority);
inited_ = true;
}
return 0; return 0;
} }

View File

@ -137,7 +137,6 @@ class FakeAudioDevice : public FakeAudioDeviceModule {
std::unique_ptr<EventTimerWrapper> tick_; std::unique_ptr<EventTimerWrapper> tick_;
rtc::PlatformThread thread_; rtc::PlatformThread thread_;
bool inited_ = false;
}; };
} // namespace test } // namespace test
} // namespace webrtc } // namespace webrtc

View File

@ -13,7 +13,6 @@
#include <memory> #include <memory>
#include "modules/audio_device/include/mock_audio_device.h"
#include "modules/audio_device/include/mock_audio_transport.h" #include "modules/audio_device/include/mock_audio_transport.h"
#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h" #include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "test/gmock.h" #include "test/gmock.h"
@ -63,8 +62,6 @@ class MockVoiceEngine : public VoiceEngineImpl {
return proxy; return proxy;
})); }));
ON_CALL(*this, audio_device_module())
.WillByDefault(testing::Return(&mock_audio_device_));
ON_CALL(*this, audio_transport()) ON_CALL(*this, audio_transport())
.WillByDefault(testing::Return(&mock_audio_transport_)); .WillByDefault(testing::Return(&mock_audio_transport_));
} }
@ -97,7 +94,6 @@ class MockVoiceEngine : public VoiceEngineImpl {
int(AudioDeviceModule* external_adm, int(AudioDeviceModule* external_adm,
AudioProcessing* external_apm, AudioProcessing* external_apm,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory)); const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory));
MOCK_METHOD0(audio_device_module, AudioDeviceModule*());
MOCK_METHOD0(transmit_mixer, voe::TransmitMixer*()); MOCK_METHOD0(transmit_mixer, voe::TransmitMixer*());
MOCK_METHOD0(Terminate, int()); MOCK_METHOD0(Terminate, int());
MOCK_METHOD0(CreateChannel, int()); MOCK_METHOD0(CreateChannel, int());
@ -120,7 +116,6 @@ class MockVoiceEngine : public VoiceEngineImpl {
std::map<int, std::unique_ptr<MockRtpRtcp>> mock_rtp_rtcps_; std::map<int, std::unique_ptr<MockRtpRtcp>> mock_rtp_rtcps_;
MockAudioDeviceModule mock_audio_device_;
MockAudioTransport mock_audio_transport_; MockAudioTransport mock_audio_transport_;
}; };
} // namespace test } // namespace test

View File

@ -92,11 +92,13 @@ struct VoiceEngineState {
void CreateVoiceEngine( void CreateVoiceEngine(
VoiceEngineState* voe, VoiceEngineState* voe,
webrtc::AudioDeviceModule* adm,
webrtc::AudioProcessing* apm, webrtc::AudioProcessing* apm,
rtc::scoped_refptr<webrtc::AudioDecoderFactory> decoder_factory) { rtc::scoped_refptr<webrtc::AudioDecoderFactory> decoder_factory) {
voe->voice_engine = webrtc::VoiceEngine::Create(); voe->voice_engine = webrtc::VoiceEngine::Create();
voe->base = webrtc::VoEBase::GetInterface(voe->voice_engine); voe->base = webrtc::VoEBase::GetInterface(voe->voice_engine);
EXPECT_EQ(0, voe->base->Init(nullptr, apm, decoder_factory)); EXPECT_EQ(0, adm->Init());
EXPECT_EQ(0, voe->base->Init(adm, apm, decoder_factory));
webrtc::VoEBase::ChannelConfig config; webrtc::VoEBase::ChannelConfig config;
config.enable_voice_pacing = true; config.enable_voice_pacing = true;
voe->send_channel_id = voe->base->CreateChannel(config); voe->send_channel_id = voe->base->CreateChannel(config);
@ -1968,6 +1970,7 @@ void VideoQualityTest::SetupAudio(int send_channel_id,
void VideoQualityTest::RunWithRenderers(const Params& params) { void VideoQualityTest::RunWithRenderers(const Params& params) {
std::unique_ptr<test::LayerFilteringTransport> send_transport; std::unique_ptr<test::LayerFilteringTransport> send_transport;
std::unique_ptr<test::DirectTransport> recv_transport; std::unique_ptr<test::DirectTransport> recv_transport;
std::unique_ptr<test::FakeAudioDevice> fake_audio_device;
::VoiceEngineState voe; ::VoiceEngineState voe;
std::unique_ptr<test::VideoRenderer> local_preview; std::unique_ptr<test::VideoRenderer> local_preview;
std::vector<std::unique_ptr<test::VideoRenderer>> loopback_renderers; std::vector<std::unique_ptr<test::VideoRenderer>> loopback_renderers;
@ -1982,16 +1985,24 @@ void VideoQualityTest::RunWithRenderers(const Params& params) {
Call::Config call_config(event_log_.get()); Call::Config call_config(event_log_.get());
call_config.bitrate_config = params_.call.call_bitrate_config; call_config.bitrate_config = params_.call.call_bitrate_config;
fake_audio_device.reset(new test::FakeAudioDevice(
test::FakeAudioDevice::CreatePulsedNoiseCapturer(32000, 48000),
test::FakeAudioDevice::CreateDiscardRenderer(48000),
1.f));
rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing( rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing(
webrtc::AudioProcessing::Create()); webrtc::AudioProcessing::Create());
if (params_.audio.enabled) { if (params_.audio.enabled) {
CreateVoiceEngine(&voe, audio_processing.get(), decoder_factory_); CreateVoiceEngine(&voe, fake_audio_device.get(), audio_processing.get(),
decoder_factory_);
AudioState::Config audio_state_config; AudioState::Config audio_state_config;
audio_state_config.voice_engine = voe.voice_engine; audio_state_config.voice_engine = voe.voice_engine;
audio_state_config.audio_mixer = AudioMixerImpl::Create(); audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing = audio_processing; audio_state_config.audio_processing = audio_processing;
call_config.audio_state = AudioState::Create(audio_state_config); call_config.audio_state = AudioState::Create(audio_state_config);
fake_audio_device->RegisterAudioCallback(
call_config.audio_state->audio_transport());
} }
CreateCalls(call_config, call_config); CreateCalls(call_config, call_config);

View File

@ -92,15 +92,10 @@ class WEBRTC_DLLEXPORT VoEBase {
// functionality in a separate (reference counted) module. // functionality in a separate (reference counted) module.
// - The AudioProcessing module handles capture-side processing. // - The AudioProcessing module handles capture-side processing.
// - An AudioDecoderFactory - used to create audio decoders. // - An AudioDecoderFactory - used to create audio decoders.
// If NULL is passed for ADM, VoiceEngine
// will create its own. Returns -1 in case of an error, 0 otherwise.
virtual int Init( virtual int Init(
AudioDeviceModule* external_adm, AudioDeviceModule* audio_device,
AudioProcessing* external_apm, AudioProcessing* audio_processing,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) = 0; const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) = 0;
// This method is WIP - DO NOT USE!
// Returns NULL before Init() is called.
virtual AudioDeviceModule* audio_device_module() = 0;
// This method is WIP - DO NOT USE! // This method is WIP - DO NOT USE!
// Returns NULL before Init() is called. // Returns NULL before Init() is called.

View File

@ -142,91 +142,17 @@ void VoEBaseImpl::PullRenderData(int bits_per_sample,
} }
int VoEBaseImpl::Init( int VoEBaseImpl::Init(
AudioDeviceModule* external_adm, AudioDeviceModule* audio_device,
AudioProcessing* audio_processing, AudioProcessing* audio_processing,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) { const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
RTC_DCHECK(audio_device);
RTC_DCHECK(audio_processing); RTC_DCHECK(audio_processing);
rtc::CritScope cs(shared_->crit_sec()); rtc::CritScope cs(shared_->crit_sec());
WebRtcSpl_Init();
if (shared_->process_thread()) { if (shared_->process_thread()) {
shared_->process_thread()->Start(); shared_->process_thread()->Start();
} }
// Create an internal ADM if the user has not added an external shared_->set_audio_device(audio_device);
// ADM implementation as input to Init().
if (external_adm == nullptr) {
#if !defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
return -1;
#else
// Create the internal ADM implementation.
shared_->set_audio_device(AudioDeviceModule::Create(
AudioDeviceModule::kPlatformDefaultAudio));
if (shared_->audio_device() == nullptr) {
RTC_LOG(LS_ERROR) << "Init() failed to create the ADM";
return -1;
}
#endif // WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE
} else {
// Use the already existing external ADM implementation.
shared_->set_audio_device(external_adm);
RTC_LOG_F(LS_INFO)
<< "An external ADM implementation will be used in VoiceEngine";
}
bool available = false;
// --------------------
// Reinitialize the ADM
// Register the AudioTransport implementation
if (shared_->audio_device()->RegisterAudioCallback(this) != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to register audio callback for the ADM";
}
// ADM initialization
if (shared_->audio_device()->Init() != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to initialize the ADM";
return -1;
}
// Initialize the default speaker
if (shared_->audio_device()->SetPlayoutDevice(
WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE) != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to set the default output device";
}
if (shared_->audio_device()->InitSpeaker() != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to initialize the speaker";
}
// Initialize the default microphone
if (shared_->audio_device()->SetRecordingDevice(
WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE) != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to set the default input device";
}
if (shared_->audio_device()->InitMicrophone() != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to initialize the microphone";
}
// Set number of channels
if (shared_->audio_device()->StereoPlayoutIsAvailable(&available) != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to query stereo playout mode";
}
if (shared_->audio_device()->SetStereoPlayout(available) != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to set mono/stereo playout mode";
}
// TODO(andrew): These functions don't tell us whether stereo recording
// is truly available. We simply set the AudioProcessing input to stereo
// here, because we have to wait until receiving the first frame to
// determine the actual number of channels anyway.
//
// These functions may be changed; tracked here:
// http://code.google.com/p/webrtc/issues/detail?id=204
shared_->audio_device()->StereoRecordingIsAvailable(&available);
if (shared_->audio_device()->SetStereoRecording(available) != 0) {
RTC_LOG(LS_ERROR) << "Init() failed to set mono/stereo recording mode";
}
shared_->set_audio_processing(audio_processing); shared_->set_audio_processing(audio_processing);
// Configure AudioProcessing components. // Configure AudioProcessing components.
@ -518,23 +444,7 @@ int32_t VoEBaseImpl::TerminateInternal() {
shared_->process_thread()->Stop(); shared_->process_thread()->Stop();
} }
if (shared_->audio_device()) {
if (shared_->audio_device()->StopPlayout() != 0) {
RTC_LOG(LS_ERROR) << "TerminateInternal() failed to stop playout";
}
if (shared_->audio_device()->StopRecording() != 0) {
RTC_LOG(LS_ERROR) << "TerminateInternal() failed to stop recording";
}
if (shared_->audio_device()->RegisterAudioCallback(nullptr) != 0) {
RTC_LOG(LS_ERROR) << "TerminateInternal() failed to de-register audio "
"callback for the ADM";
}
if (shared_->audio_device()->Terminate() != 0) {
RTC_LOG(LS_ERROR) << "TerminateInternal() failed to terminate the ADM";
}
shared_->set_audio_device(nullptr); shared_->set_audio_device(nullptr);
}
shared_->set_audio_processing(nullptr); shared_->set_audio_processing(nullptr);
return 0; return 0;

View File

@ -25,12 +25,9 @@ class VoEBaseImpl : public VoEBase,
public AudioTransport { public AudioTransport {
public: public:
int Init( int Init(
AudioDeviceModule* external_adm, AudioDeviceModule* audio_device,
AudioProcessing* audio_processing, AudioProcessing* audio_processing,
const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) override; const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) override;
AudioDeviceModule* audio_device_module() override {
return shared_->audio_device();
}
voe::TransmitMixer* transmit_mixer() override { voe::TransmitMixer* transmit_mixer() override {
return shared_->transmit_mixer(); return shared_->transmit_mixer();
} }

View File

@ -47,23 +47,4 @@ enum { kVoiceEngineMaxMinPlayoutDelayMs = 10000 };
} // namespace webrtc } // namespace webrtc
namespace webrtc {
inline int VoEId(int veId, int chId) {
if (chId == -1) {
const int dummyChannel(99);
return (int)((veId << 16) + dummyChannel);
}
return (int)((veId << 16) + chId);
}
} // namespace webrtc
#if defined(_WIN32)
#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \
AudioDeviceModule::kDefaultCommunicationDevice
#else
#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
#endif // #if (defined(_WIN32)
#endif // VOICE_ENGINE_VOICE_ENGINE_DEFINES_H_ #endif // VOICE_ENGINE_VOICE_ENGINE_DEFINES_H_