Use AudioProcessingBuilder everywhere AudioProcessing is created.
The AudioProcessingBuilder was recently introduced in https://webrtc-review.googlesource.com/c/src/+/34651 to make it easier to create APM instances. This CL replaces all calls to the old Create methods with the new AudioProcessingBuilder. Bug: webrtc:8668 Change-Id: Ibb5f0fc0dbcc85fcf3355b01bec916f20fe0eb67 Reviewed-on: https://webrtc-review.googlesource.com/36082 Commit-Queue: Ivo Creusen <ivoc@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#21534}
This commit is contained in:
parent
8bb8308235
commit
62337e59dd
@ -193,7 +193,8 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec,
|
||||
AudioState::Config send_audio_state_config;
|
||||
send_audio_state_config.voice_engine = voice_engine;
|
||||
send_audio_state_config.audio_mixer = AudioMixerImpl::Create();
|
||||
send_audio_state_config.audio_processing = AudioProcessing::Create();
|
||||
send_audio_state_config.audio_processing =
|
||||
AudioProcessingBuilder().Create();
|
||||
send_audio_state_config.audio_device_module = fake_audio_device;
|
||||
Call::Config sender_config(event_log_.get());
|
||||
|
||||
|
||||
@ -25,7 +25,8 @@ struct TestHelper {
|
||||
// This replicates the conditions from voe_auto_test.
|
||||
Config config;
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
apm_ = rtc::scoped_refptr<AudioProcessing>(AudioProcessing::Create(config));
|
||||
apm_ = rtc::scoped_refptr<AudioProcessing>(
|
||||
AudioProcessingBuilder().Create(config));
|
||||
apm_helpers::Init(apm());
|
||||
}
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@ class WebRtcMediaEngineNullVideo
|
||||
audio_encoder_factory,
|
||||
audio_decoder_factory,
|
||||
nullptr,
|
||||
webrtc::AudioProcessing::Create()),
|
||||
webrtc::AudioProcessingBuilder().Create()),
|
||||
std::forward_as_tuple()) {}
|
||||
};
|
||||
|
||||
|
||||
@ -66,9 +66,10 @@ MediaEngineInterface* WebRtcMediaEngineFactory::Create(
|
||||
audio_decoder_factory,
|
||||
WebRtcVideoEncoderFactory* video_encoder_factory,
|
||||
WebRtcVideoDecoderFactory* video_decoder_factory) {
|
||||
return CreateWebRtcMediaEngine(
|
||||
adm, audio_encoder_factory, audio_decoder_factory, video_encoder_factory,
|
||||
video_decoder_factory, nullptr, webrtc::AudioProcessing::Create());
|
||||
return CreateWebRtcMediaEngine(adm, audio_encoder_factory,
|
||||
audio_decoder_factory, video_encoder_factory,
|
||||
video_decoder_factory, nullptr,
|
||||
webrtc::AudioProcessingBuilder().Create());
|
||||
}
|
||||
|
||||
MediaEngineInterface* WebRtcMediaEngineFactory::Create(
|
||||
|
||||
@ -3336,7 +3336,7 @@ TEST(WebRtcVoiceEngineTest, StartupShutdown) {
|
||||
// we never want it to create a decoder at this stage.
|
||||
testing::NiceMock<webrtc::test::MockAudioDeviceModule> adm;
|
||||
rtc::scoped_refptr<webrtc::AudioProcessing> apm =
|
||||
webrtc::AudioProcessing::Create();
|
||||
webrtc::AudioProcessingBuilder().Create();
|
||||
cricket::WebRtcVoiceEngine engine(
|
||||
&adm, webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
|
||||
webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
|
||||
@ -3359,7 +3359,7 @@ TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) {
|
||||
.WillRepeatedly(Return(rtc::RefCountReleaseStatus::kDroppedLastRef));
|
||||
{
|
||||
rtc::scoped_refptr<webrtc::AudioProcessing> apm =
|
||||
webrtc::AudioProcessing::Create();
|
||||
webrtc::AudioProcessingBuilder().Create();
|
||||
cricket::WebRtcVoiceEngine engine(
|
||||
&adm, webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
|
||||
webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
|
||||
@ -3380,7 +3380,7 @@ TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) {
|
||||
// type assignments checked here? It shouldn't really matter.
|
||||
testing::NiceMock<webrtc::test::MockAudioDeviceModule> adm;
|
||||
rtc::scoped_refptr<webrtc::AudioProcessing> apm =
|
||||
webrtc::AudioProcessing::Create();
|
||||
webrtc::AudioProcessingBuilder().Create();
|
||||
cricket::WebRtcVoiceEngine engine(
|
||||
&adm, webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
|
||||
webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
|
||||
@ -3425,7 +3425,7 @@ TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) {
|
||||
TEST(WebRtcVoiceEngineTest, Has32Channels) {
|
||||
testing::NiceMock<webrtc::test::MockAudioDeviceModule> adm;
|
||||
rtc::scoped_refptr<webrtc::AudioProcessing> apm =
|
||||
webrtc::AudioProcessing::Create();
|
||||
webrtc::AudioProcessingBuilder().Create();
|
||||
cricket::WebRtcVoiceEngine engine(
|
||||
&adm, webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
|
||||
webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
|
||||
@ -3463,7 +3463,7 @@ TEST(WebRtcVoiceEngineTest, SetRecvCodecs) {
|
||||
// I think it will become clear once audio decoder injection is completed.
|
||||
testing::NiceMock<webrtc::test::MockAudioDeviceModule> adm;
|
||||
rtc::scoped_refptr<webrtc::AudioProcessing> apm =
|
||||
webrtc::AudioProcessing::Create();
|
||||
webrtc::AudioProcessingBuilder().Create();
|
||||
cricket::WebRtcVoiceEngine engine(
|
||||
&adm, webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
|
||||
webrtc::CreateBuiltinAudioDecoderFactory(), nullptr, apm);
|
||||
@ -3505,7 +3505,7 @@ TEST(WebRtcVoiceEngineTest, CollectRecvCodecs) {
|
||||
testing::NiceMock<webrtc::test::MockAudioDeviceModule> adm;
|
||||
|
||||
rtc::scoped_refptr<webrtc::AudioProcessing> apm =
|
||||
webrtc::AudioProcessing::Create();
|
||||
webrtc::AudioProcessingBuilder().Create();
|
||||
cricket::WebRtcVoiceEngine engine(&adm, unused_encoder_factory,
|
||||
mock_decoder_factory, nullptr, apm);
|
||||
engine.Init();
|
||||
|
||||
@ -138,7 +138,8 @@ std::unique_ptr<AudioProcessing> CreateLimiter() {
|
||||
Config config;
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
|
||||
std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config));
|
||||
std::unique_ptr<AudioProcessing> limiter(
|
||||
AudioProcessingBuilder().Create(config));
|
||||
RTC_DCHECK(limiter);
|
||||
|
||||
webrtc::AudioProcessing::Config apm_config;
|
||||
|
||||
@ -24,7 +24,7 @@ namespace {
|
||||
std::unique_ptr<webrtc::AudioProcessing> CreateAudioProcessing() {
|
||||
webrtc::Config config;
|
||||
std::unique_ptr<webrtc::AudioProcessing> apm(
|
||||
webrtc::AudioProcessing::Create(config));
|
||||
webrtc::AudioProcessingBuilder().Create(config));
|
||||
RTC_DCHECK(apm);
|
||||
return apm;
|
||||
}
|
||||
|
||||
@ -496,7 +496,7 @@ AudioProcessingImplLockTest::AudioProcessingImplLockTest()
|
||||
render_thread_(RenderProcessorThreadFunc, this, "render"),
|
||||
capture_thread_(CaptureProcessorThreadFunc, this, "capture"),
|
||||
stats_thread_(StatsProcessorThreadFunc, this, "stats"),
|
||||
apm_(AudioProcessingImpl::Create()),
|
||||
apm_(AudioProcessingBuilder().Create()),
|
||||
render_thread_state_(kMaxFrameSize,
|
||||
&rand_gen_,
|
||||
&render_call_event_,
|
||||
|
||||
@ -559,7 +559,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
int num_capture_channels = 1;
|
||||
switch (simulation_config_.simulation_settings) {
|
||||
case SettingsType::kDefaultApmMobile: {
|
||||
apm_.reset(AudioProcessingImpl::Create());
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_mobile_apm_runtime_settings(apm_.get());
|
||||
break;
|
||||
@ -567,7 +567,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
case SettingsType::kDefaultApmDesktop: {
|
||||
Config config;
|
||||
add_default_desktop_config(&config);
|
||||
apm_.reset(AudioProcessingImpl::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
@ -577,7 +577,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
Config config;
|
||||
add_beamformer_config(&config);
|
||||
add_default_desktop_config(&config);
|
||||
apm_.reset(AudioProcessingImpl::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
@ -588,14 +588,14 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
Config config;
|
||||
config.Set<Intelligibility>(new Intelligibility(true));
|
||||
add_default_desktop_config(&config);
|
||||
apm_.reset(AudioProcessingImpl::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
break;
|
||||
}
|
||||
case SettingsType::kAllSubmodulesTurnedOff: {
|
||||
apm_.reset(AudioProcessingImpl::Create());
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
ASSERT_TRUE(!!apm_);
|
||||
turn_off_default_apm_runtime_settings(apm_.get());
|
||||
break;
|
||||
@ -604,7 +604,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
Config config;
|
||||
config.Set<ExtendedFilter>(new ExtendedFilter(true));
|
||||
config.Set<DelayAgnostic>(new DelayAgnostic(false));
|
||||
apm_.reset(AudioProcessingImpl::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
@ -614,7 +614,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
Config config;
|
||||
config.Set<ExtendedFilter>(new ExtendedFilter(false));
|
||||
config.Set<DelayAgnostic>(new DelayAgnostic(true));
|
||||
apm_.reset(AudioProcessingImpl::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
|
||||
@ -452,7 +452,7 @@ ApmTest::ApmTest()
|
||||
out_file_(NULL) {
|
||||
Config config;
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
apm_.reset(AudioProcessing::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
}
|
||||
|
||||
void ApmTest::SetUp() {
|
||||
@ -1318,7 +1318,10 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
|
||||
testing::NiceMock<MockNonlinearBeamformer>* beamformer =
|
||||
new testing::NiceMock<MockNonlinearBeamformer>(geometry, 1u);
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessing::Create(config, nullptr, nullptr, nullptr, beamformer));
|
||||
AudioProcessingBuilder()
|
||||
.SetNonlinearBeamformer(
|
||||
std::unique_ptr<webrtc::NonlinearBeamformer>(beamformer))
|
||||
.Create(config));
|
||||
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
|
||||
ChannelBuffer<float> src_buf(kSamplesPerChannel, kNumInputChannels);
|
||||
ChannelBuffer<float> dest_buf(kSamplesPerChannel, kNumOutputChannels);
|
||||
@ -1582,7 +1585,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
|
||||
auto src_channels = &src[0];
|
||||
auto dest_channels = &dest[0];
|
||||
|
||||
apm_.reset(AudioProcessing::Create());
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
EXPECT_NOERR(apm_->ProcessStream(
|
||||
&src_channels, kSamples, sample_rate, LayoutFromChannels(1),
|
||||
sample_rate, LayoutFromChannels(1), &dest_channels));
|
||||
@ -1962,7 +1965,8 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
|
||||
|
||||
Config config;
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
std::unique_ptr<AudioProcessing> fapm(AudioProcessing::Create(config));
|
||||
std::unique_ptr<AudioProcessing> fapm(
|
||||
AudioProcessingBuilder().Create(config));
|
||||
EnableAllComponents();
|
||||
EnableAllAPComponents(fapm.get());
|
||||
for (int i = 0; i < ref_data.test_size(); i++) {
|
||||
@ -2114,7 +2118,7 @@ TEST_F(ApmTest, Process) {
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
config.Set<ExtendedFilter>(
|
||||
new ExtendedFilter(test->use_aec_extended_filter()));
|
||||
apm_.reset(AudioProcessing::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
|
||||
EnableAllComponents();
|
||||
|
||||
@ -2329,7 +2333,7 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
|
||||
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
|
||||
};
|
||||
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessing::Create());
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
|
||||
// Enable one component just to ensure some processing takes place.
|
||||
ap->noise_suppression()->Enable(true);
|
||||
for (size_t i = 0; i < arraysize(cf); ++i) {
|
||||
@ -2458,7 +2462,8 @@ class AudioProcessingTest
|
||||
const std::string& output_file_prefix) {
|
||||
Config config;
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessing::Create(config));
|
||||
std::unique_ptr<AudioProcessing> ap(
|
||||
AudioProcessingBuilder().Create(config));
|
||||
EnableAllAPComponents(ap.get());
|
||||
|
||||
ProcessingConfig processing_config = {
|
||||
@ -2935,8 +2940,10 @@ TEST(ApmConfiguration, EnablePreProcessing) {
|
||||
new testing::NiceMock<test::MockCustomProcessing>();
|
||||
auto mock_pre_processor =
|
||||
std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
|
||||
rtc::scoped_refptr<AudioProcessing> apm = AudioProcessing::Create(
|
||||
webrtc_config, nullptr, std::move(mock_pre_processor), nullptr, nullptr);
|
||||
rtc::scoped_refptr<AudioProcessing> apm =
|
||||
AudioProcessingBuilder()
|
||||
.SetRenderPreProcessing(std::move(mock_pre_processor))
|
||||
.Create(webrtc_config);
|
||||
|
||||
AudioFrame audio;
|
||||
audio.num_channels_ = 1;
|
||||
@ -2982,7 +2989,8 @@ std::unique_ptr<AudioProcessing> CreateApm(bool use_AEC2) {
|
||||
old_config.Set<ExtendedFilter>(new ExtendedFilter(true));
|
||||
old_config.Set<DelayAgnostic>(new DelayAgnostic(true));
|
||||
}
|
||||
std::unique_ptr<AudioProcessing> apm(AudioProcessing::Create(old_config));
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder().Create(old_config));
|
||||
if (!apm) {
|
||||
return apm;
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
namespace webrtc {
|
||||
|
||||
TEST(EchoCancellationInternalTest, ExtendedFilter) {
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessing::Create());
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
|
||||
EXPECT_TRUE(ap->echo_cancellation()->aec_core() == NULL);
|
||||
|
||||
EXPECT_EQ(ap->kNoError, ap->echo_cancellation()->Enable(true));
|
||||
@ -47,7 +47,7 @@ TEST(EchoCancellationInternalTest, ExtendedFilter) {
|
||||
}
|
||||
|
||||
TEST(EchoCancellationInternalTest, DelayAgnostic) {
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessing::Create());
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
|
||||
EXPECT_TRUE(ap->echo_cancellation()->aec_core() == NULL);
|
||||
|
||||
EXPECT_EQ(ap->kNoError, ap->echo_cancellation()->Enable(true));
|
||||
|
||||
@ -112,7 +112,7 @@ struct DelayAgnostic {
|
||||
// microphone volume is set too low. The value is clamped to its operating range
|
||||
// [12, 255]. Here, 255 maps to 100%.
|
||||
//
|
||||
// Must be provided through AudioProcessing::Create(Confg&).
|
||||
// Must be provided through AudioProcessingBuilder().Create(config).
|
||||
#if defined(WEBRTC_CHROMIUM_BUILD)
|
||||
static const int kAgcStartupMinVolume = 85;
|
||||
#else
|
||||
@ -205,7 +205,7 @@ struct Intelligibility {
|
||||
// data.
|
||||
//
|
||||
// Usage example, omitting error checking:
|
||||
// AudioProcessing* apm = AudioProcessing::Create(0);
|
||||
// AudioProcessing* apm = AudioProcessingBuilder().Create();
|
||||
//
|
||||
// AudioProcessing::Config config;
|
||||
// config.level_controller.enabled = true;
|
||||
|
||||
@ -86,7 +86,7 @@ void RunTogetherWithApm(const std::string& test_description,
|
||||
apm_config.residual_echo_detector.enabled = include_default_apm_processing;
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm;
|
||||
apm.reset(AudioProcessing::Create(config));
|
||||
apm.reset(AudioProcessingBuilder().Create(config));
|
||||
ASSERT_TRUE(apm.get());
|
||||
apm->ApplyConfig(apm_config);
|
||||
|
||||
|
||||
@ -348,8 +348,9 @@ void AudioProcessingSimulator::CreateAudioProcessor() {
|
||||
apm_config.residual_echo_detector.enabled = *settings_.use_ed;
|
||||
}
|
||||
|
||||
ap_.reset(AudioProcessing::Create(config, nullptr, nullptr,
|
||||
std::move(echo_control_factory), nullptr));
|
||||
ap_.reset(AudioProcessingBuilder()
|
||||
.SetEchoControlFactory(std::move(echo_control_factory))
|
||||
.Create(config));
|
||||
RTC_CHECK(ap_);
|
||||
|
||||
ap_->ApplyConfig(apm_config);
|
||||
|
||||
@ -193,7 +193,7 @@ void DebugDumpReplayer::MaybeRecreateApm(const audioproc::Config& msg) {
|
||||
// We only create APM once, since changes on these fields should not
|
||||
// happen in current implementation.
|
||||
if (!apm_.get()) {
|
||||
apm_.reset(AudioProcessing::Create(config));
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -139,15 +139,14 @@ DebugDumpGenerator::DebugDumpGenerator(const std::string& input_file_name,
|
||||
output_(new ChannelBuffer<float>(output_config_.num_frames(),
|
||||
output_config_.num_channels())),
|
||||
worker_queue_("debug_dump_generator_worker_queue"),
|
||||
apm_(AudioProcessing::Create(
|
||||
config,
|
||||
nullptr,
|
||||
nullptr,
|
||||
(enable_aec3 ? std::unique_ptr<EchoControlFactory>(
|
||||
new EchoCanceller3Factory())
|
||||
: nullptr),
|
||||
nullptr)),
|
||||
dump_file_name_(dump_file_name) {}
|
||||
dump_file_name_(dump_file_name) {
|
||||
AudioProcessingBuilder apm_builder;
|
||||
if (enable_aec3) {
|
||||
apm_builder.SetEchoControlFactory(
|
||||
std::unique_ptr<EchoControlFactory>(new EchoCanceller3Factory()));
|
||||
}
|
||||
apm_.reset(apm_builder.Create(config));
|
||||
}
|
||||
|
||||
DebugDumpGenerator::DebugDumpGenerator(
|
||||
const Config& config,
|
||||
|
||||
@ -562,7 +562,7 @@ OrtcFactory::CreateMediaEngine_w() {
|
||||
return std::unique_ptr<cricket::MediaEngineInterface>(
|
||||
cricket::WebRtcMediaEngineFactory::Create(
|
||||
adm_, audio_encoder_factory_, audio_decoder_factory_, nullptr,
|
||||
nullptr, nullptr, webrtc::AudioProcessing::Create()));
|
||||
nullptr, nullptr, webrtc::AudioProcessingBuilder().Create()));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -48,7 +48,7 @@ rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing) {
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing_use = audio_processing;
|
||||
if (!audio_processing_use) {
|
||||
audio_processing_use = AudioProcessing::Create();
|
||||
audio_processing_use = AudioProcessingBuilder().Create();
|
||||
}
|
||||
|
||||
std::unique_ptr<cricket::MediaEngineInterface> media_engine(
|
||||
@ -79,7 +79,7 @@ rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
|
||||
rtc::scoped_refptr<AudioMixer> audio_mixer,
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing) {
|
||||
if (!audio_processing)
|
||||
audio_processing = AudioProcessing::Create();
|
||||
audio_processing = AudioProcessingBuilder().Create();
|
||||
|
||||
std::unique_ptr<cricket::MediaEngineInterface> media_engine =
|
||||
cricket::WebRtcMediaEngineFactory::Create(
|
||||
|
||||
@ -54,7 +54,7 @@ class PeerConnectionFactoryForJsepTest : public PeerConnectionFactory {
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
AudioProcessing::Create())),
|
||||
AudioProcessingBuilder().Create())),
|
||||
CreateCallFactory(),
|
||||
nullptr) {}
|
||||
|
||||
|
||||
@ -551,7 +551,7 @@ class PeerConnectionFactoryForTest : public webrtc::PeerConnectionFactory {
|
||||
cricket::WebRtcMediaEngineFactory::Create(
|
||||
FakeAudioCaptureModule::Create(), audio_encoder_factory,
|
||||
audio_decoder_factory, nullptr, nullptr, nullptr,
|
||||
webrtc::AudioProcessing::Create()));
|
||||
webrtc::AudioProcessingBuilder().Create()));
|
||||
|
||||
std::unique_ptr<webrtc::CallFactoryInterface> call_factory =
|
||||
webrtc::CreateCallFactory();
|
||||
|
||||
@ -26,7 +26,7 @@ rtc::scoped_refptr<AudioEncoderFactory> CreateAudioEncoderFactory() {
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioProcessing> CreateAudioProcessing() {
|
||||
return AudioProcessing::Create();
|
||||
return AudioProcessingBuilder().Create();
|
||||
}
|
||||
|
||||
} // namespace jni
|
||||
|
||||
@ -25,10 +25,9 @@ static jlong JNI_DefaultAudioProcessingFactory_CreateAudioProcessing(
|
||||
std::unique_ptr<CustomProcessing> post_processor(
|
||||
reinterpret_cast<CustomProcessing*>(native_post_processor));
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing =
|
||||
AudioProcessing::Create(webrtc::Config(), std::move(post_processor),
|
||||
nullptr /* render_pre_processing */,
|
||||
nullptr /* echo_control_factory */,
|
||||
nullptr /* beamformer */);
|
||||
AudioProcessingBuilder()
|
||||
.SetCapturePostProcessing(std::move(post_processor))
|
||||
.Create();
|
||||
return jlongFromPointer(audio_processing.release());
|
||||
}
|
||||
|
||||
|
||||
@ -65,8 +65,8 @@ void CallTest::RunBaseTest(BaseTest* test) {
|
||||
CreateFakeAudioDevices(test->CreateCapturer(), test->CreateRenderer());
|
||||
test->OnFakeAudioDevicesCreated(fake_send_audio_device_.get(),
|
||||
fake_recv_audio_device_.get());
|
||||
apm_send_ = AudioProcessing::Create();
|
||||
apm_recv_ = AudioProcessing::Create();
|
||||
apm_send_ = AudioProcessingBuilder().Create();
|
||||
apm_recv_ = AudioProcessingBuilder().Create();
|
||||
CreateVoiceEngines();
|
||||
AudioState::Config audio_state_config;
|
||||
audio_state_config.voice_engine = voe_send_.voice_engine;
|
||||
|
||||
@ -69,8 +69,10 @@ std::unique_ptr<AudioProcessing> CreateAPM(const uint8_t** data,
|
||||
config.Set<DelayAgnostic>(new DelayAgnostic(*da));
|
||||
config.Set<Intelligibility>(new Intelligibility(*ie));
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm(AudioProcessing::Create(
|
||||
config, nullptr, nullptr, std::move(echo_control_factory), nullptr));
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder()
|
||||
.SetEchoControlFactory(std::move(echo_control_factory))
|
||||
.Create(config));
|
||||
|
||||
webrtc::AudioProcessing::Config apm_config;
|
||||
apm_config.residual_echo_detector.enabled = *red;
|
||||
|
||||
@ -2096,7 +2096,7 @@ void VideoQualityTest::RunWithRenderers(const Params& params) {
|
||||
AudioState::Config audio_state_config;
|
||||
audio_state_config.voice_engine = voe.voice_engine;
|
||||
audio_state_config.audio_mixer = AudioMixerImpl::Create();
|
||||
audio_state_config.audio_processing = AudioProcessing::Create();
|
||||
audio_state_config.audio_processing = AudioProcessingBuilder().Create();
|
||||
audio_state_config.audio_device_module = fake_audio_device;
|
||||
call_config.audio_state = AudioState::Create(audio_state_config);
|
||||
fake_audio_device->RegisterAudioCallback(
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user