From a9bba047b7bb5cfe9aa7b52620672f36ae8fdb74 Mon Sep 17 00:00:00 2001 From: Peter Hanspers Date: Tue, 30 May 2023 13:43:41 +0200 Subject: [PATCH] Updating AsyncAudioProcessing API, part 1. Add an API to pass AudioFrameProcessor as a unique_ptr. Bug: webrtc:15111 Change-Id: I4cefa35399c05c6e81c496e0b0387b95809bd8f8 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/301984 Reviewed-by: Olga Sharonova Reviewed-by: Markus Handell Reviewed-by: Henrik Andreassson Commit-Queue: Peter Hanspers Cr-Commit-Position: refs/heads/main@{#40187} --- api/create_peerconnection_factory.cc | 48 ++++++++++++++++- api/create_peerconnection_factory.h | 19 ++++++- .../null_webrtc_video_engine_unittest.cc | 2 +- media/engine/webrtc_media_engine.cc | 3 +- media/engine/webrtc_media_engine.h | 3 ++ media/engine/webrtc_voice_engine.cc | 11 +++- media/engine/webrtc_voice_engine.h | 6 +++ media/engine/webrtc_voice_engine_unittest.cc | 19 +++---- .../async_audio_processing.cc | 54 ++++++++++++++++--- .../async_audio_processing.h | 33 ++++++++++++ 10 files changed, 177 insertions(+), 21 deletions(-) diff --git a/api/create_peerconnection_factory.cc b/api/create_peerconnection_factory.cc index f9cc7ad3e2..b7f9eb7f30 100644 --- a/api/create_peerconnection_factory.cc +++ b/api/create_peerconnection_factory.cc @@ -39,6 +39,7 @@ rtc::scoped_refptr CreatePeerConnectionFactory( rtc::scoped_refptr audio_mixer, rtc::scoped_refptr audio_processing, AudioFrameProcessor* audio_frame_processor, + std::unique_ptr owned_audio_frame_processor, std::unique_ptr field_trials) { if (!field_trials) { field_trials = std::make_unique(); @@ -64,7 +65,12 @@ rtc::scoped_refptr CreatePeerConnectionFactory( media_dependencies.adm = std::move(default_adm); media_dependencies.audio_encoder_factory = std::move(audio_encoder_factory); media_dependencies.audio_decoder_factory = std::move(audio_decoder_factory); - media_dependencies.audio_frame_processor = audio_frame_processor; + if (audio_frame_processor) { + media_dependencies.audio_frame_processor = audio_frame_processor; + } else if (owned_audio_frame_processor) { + media_dependencies.owned_audio_frame_processor = + std::move(owned_audio_frame_processor); + } if (audio_processing) { media_dependencies.audio_processing = std::move(audio_processing); } else { @@ -80,4 +86,44 @@ rtc::scoped_refptr CreatePeerConnectionFactory( return CreateModularPeerConnectionFactory(std::move(dependencies)); } +rtc::scoped_refptr CreatePeerConnectionFactory( + rtc::Thread* network_thread, + rtc::Thread* worker_thread, + rtc::Thread* signaling_thread, + rtc::scoped_refptr default_adm, + rtc::scoped_refptr audio_encoder_factory, + rtc::scoped_refptr audio_decoder_factory, + std::unique_ptr video_encoder_factory, + std::unique_ptr video_decoder_factory, + rtc::scoped_refptr audio_mixer, + rtc::scoped_refptr audio_processing, + AudioFrameProcessor* audio_frame_processor) { + return CreatePeerConnectionFactory( + network_thread, worker_thread, signaling_thread, default_adm, + audio_encoder_factory, audio_decoder_factory, + std::move(video_encoder_factory), std::move(video_decoder_factory), + audio_mixer, audio_processing, audio_frame_processor, nullptr, nullptr); +} + +rtc::scoped_refptr CreatePeerConnectionFactory( + rtc::Thread* network_thread, + rtc::Thread* worker_thread, + rtc::Thread* signaling_thread, + rtc::scoped_refptr default_adm, + rtc::scoped_refptr audio_encoder_factory, + rtc::scoped_refptr audio_decoder_factory, + std::unique_ptr video_encoder_factory, + std::unique_ptr video_decoder_factory, + rtc::scoped_refptr audio_mixer, + rtc::scoped_refptr audio_processing, + std::unique_ptr owned_audio_frame_processor, + std::unique_ptr field_trials) { + return CreatePeerConnectionFactory( + network_thread, worker_thread, signaling_thread, default_adm, + audio_encoder_factory, audio_decoder_factory, + std::move(video_encoder_factory), std::move(video_decoder_factory), + audio_mixer, audio_processing, nullptr, + std::move(owned_audio_frame_processor), std::move(field_trials)); +} + } // namespace webrtc diff --git a/api/create_peerconnection_factory.h b/api/create_peerconnection_factory.h index efebc5f3ea..f8f52a0869 100644 --- a/api/create_peerconnection_factory.h +++ b/api/create_peerconnection_factory.h @@ -37,6 +37,9 @@ class AudioProcessing; // Create a new instance of PeerConnectionFactoryInterface with optional video // codec factories. These video factories represents all video codecs, i.e. no // extra internal video codecs will be added. +// TODO(bugs.webrtc.org/15111): +// Remove the method with the raw AudioFrameProcessor pointer in the +// follow-up. RTC_EXPORT rtc::scoped_refptr CreatePeerConnectionFactory( rtc::Thread* network_thread, @@ -49,7 +52,21 @@ CreatePeerConnectionFactory( std::unique_ptr video_decoder_factory, rtc::scoped_refptr audio_mixer, rtc::scoped_refptr audio_processing, - AudioFrameProcessor* audio_frame_processor = nullptr, + AudioFrameProcessor* audio_frame_processor = nullptr); + +RTC_EXPORT rtc::scoped_refptr +CreatePeerConnectionFactory( + rtc::Thread* network_thread, + rtc::Thread* worker_thread, + rtc::Thread* signaling_thread, + rtc::scoped_refptr default_adm, + rtc::scoped_refptr audio_encoder_factory, + rtc::scoped_refptr audio_decoder_factory, + std::unique_ptr video_encoder_factory, + std::unique_ptr video_decoder_factory, + rtc::scoped_refptr audio_mixer, + rtc::scoped_refptr audio_processing, + std::unique_ptr owned_audio_frame_processor, std::unique_ptr field_trials = nullptr); } // namespace webrtc diff --git a/media/engine/null_webrtc_video_engine_unittest.cc b/media/engine/null_webrtc_video_engine_unittest.cc index 9515d44be9..31c442d53d 100644 --- a/media/engine/null_webrtc_video_engine_unittest.cc +++ b/media/engine/null_webrtc_video_engine_unittest.cc @@ -37,7 +37,7 @@ TEST(NullWebRtcVideoEngineTest, CheckInterface) { task_queue_factory.get(), adm.get(), webrtc::MockAudioEncoderFactory::CreateUnusedFactory(), webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, - webrtc::AudioProcessingBuilder().Create(), nullptr, trials); + webrtc::AudioProcessingBuilder().Create(), nullptr, nullptr, trials); CompositeMediaEngine engine(std::move(audio_engine), std::make_unique()); diff --git a/media/engine/webrtc_media_engine.cc b/media/engine/webrtc_media_engine.cc index 514e228780..99d7dd2704 100644 --- a/media/engine/webrtc_media_engine.cc +++ b/media/engine/webrtc_media_engine.cc @@ -46,7 +46,8 @@ std::unique_ptr CreateMediaEngine( std::move(dependencies.audio_decoder_factory), std::move(dependencies.audio_mixer), std::move(dependencies.audio_processing), - dependencies.audio_frame_processor, trials); + dependencies.audio_frame_processor, + std::move(dependencies.owned_audio_frame_processor), trials); #ifdef HAVE_WEBRTC_VIDEO auto video_engine = std::make_unique( std::move(dependencies.video_encoder_factory), diff --git a/media/engine/webrtc_media_engine.h b/media/engine/webrtc_media_engine.h index e65824bd83..0f6dce35b5 100644 --- a/media/engine/webrtc_media_engine.h +++ b/media/engine/webrtc_media_engine.h @@ -49,7 +49,10 @@ struct MediaEngineDependencies { rtc::scoped_refptr audio_decoder_factory; rtc::scoped_refptr audio_mixer; rtc::scoped_refptr audio_processing; + // TODO(bugs.webrtc.org/15111): + // Remove the raw AudioFrameProcessor pointer in the follow-up. webrtc::AudioFrameProcessor* audio_frame_processor = nullptr; + std::unique_ptr owned_audio_frame_processor; std::unique_ptr video_encoder_factory; std::unique_ptr video_decoder_factory; diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc index be1f3a778a..a763ddb107 100644 --- a/media/engine/webrtc_voice_engine.cc +++ b/media/engine/webrtc_voice_engine.cc @@ -313,7 +313,10 @@ WebRtcVoiceEngine::WebRtcVoiceEngine( const rtc::scoped_refptr& decoder_factory, rtc::scoped_refptr audio_mixer, rtc::scoped_refptr audio_processing, + // TODO(bugs.webrtc.org/15111): + // Remove the raw AudioFrameProcessor pointer in the follow-up. webrtc::AudioFrameProcessor* audio_frame_processor, + std::unique_ptr owned_audio_frame_processor, const webrtc::FieldTrialsView& trials) : task_queue_factory_(task_queue_factory), adm_(adm), @@ -322,6 +325,7 @@ WebRtcVoiceEngine::WebRtcVoiceEngine( audio_mixer_(audio_mixer), apm_(audio_processing), audio_frame_processor_(audio_frame_processor), + owned_audio_frame_processor_(std::move(owned_audio_frame_processor)), minimized_remsampling_on_mobile_trial_enabled_( IsEnabled(trials, "WebRTC-Audio-MinimizeResamplingOnMobile")) { RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::WebRtcVoiceEngine"; @@ -387,10 +391,15 @@ void WebRtcVoiceEngine::Init() { } config.audio_processing = apm_; config.audio_device_module = adm_; - if (audio_frame_processor_) + if (audio_frame_processor_) { config.async_audio_processing_factory = rtc::make_ref_counted( *audio_frame_processor_, *task_queue_factory_); + } else if (owned_audio_frame_processor_) { + config.async_audio_processing_factory = + rtc::make_ref_counted( + std::move(owned_audio_frame_processor_), *task_queue_factory_); + } audio_state_ = webrtc::AudioState::Create(config); } diff --git a/media/engine/webrtc_voice_engine.h b/media/engine/webrtc_voice_engine.h index 48fb95a3e7..e73c24c5f5 100644 --- a/media/engine/webrtc_voice_engine.h +++ b/media/engine/webrtc_voice_engine.h @@ -89,7 +89,10 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface { const rtc::scoped_refptr& decoder_factory, rtc::scoped_refptr audio_mixer, rtc::scoped_refptr audio_processing, + // TODO(bugs.webrtc.org/15111): + // Remove the raw AudioFrameProcessor pointer in the follow-up. webrtc::AudioFrameProcessor* audio_frame_processor, + std::unique_ptr owned_audio_frame_processor, const webrtc::FieldTrialsView& trials); WebRtcVoiceEngine() = delete; @@ -156,7 +159,10 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface { // The audio processing module. rtc::scoped_refptr apm_; // Asynchronous audio processing. + // TODO(bugs.webrtc.org/15111): + // Remove the raw AudioFrameProcessor pointer in the follow-up. webrtc::AudioFrameProcessor* const audio_frame_processor_; + std::unique_ptr owned_audio_frame_processor_; // The primary instance of WebRtc VoiceEngine. rtc::scoped_refptr audio_state_; std::vector send_codecs_; diff --git a/media/engine/webrtc_voice_engine_unittest.cc b/media/engine/webrtc_voice_engine_unittest.cc index d151715c3a..e99311164e 100644 --- a/media/engine/webrtc_voice_engine_unittest.cc +++ b/media/engine/webrtc_voice_engine_unittest.cc @@ -167,7 +167,7 @@ TEST(WebRtcVoiceEngineTestStubLibrary, StartupShutdown) { task_queue_factory.get(), adm.get(), webrtc::MockAudioEncoderFactory::CreateUnusedFactory(), webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm, - nullptr, trials); + nullptr, nullptr, trials); engine.Init(); } } @@ -213,7 +213,7 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam { auto decoder_factory = webrtc::CreateBuiltinAudioDecoderFactory(); engine_.reset(new cricket::WebRtcVoiceEngine( task_queue_factory_.get(), adm_.get(), encoder_factory, decoder_factory, - nullptr, apm_, nullptr, field_trials_)); + nullptr, apm_, nullptr, nullptr, field_trials_)); engine_->Init(); send_parameters_.codecs.push_back(kPcmuCodec); recv_parameters_.codecs.push_back(kPcmuCodec); @@ -3641,7 +3641,7 @@ TEST(WebRtcVoiceEngineTest, StartupShutdown) { task_queue_factory.get(), adm.get(), webrtc::MockAudioEncoderFactory::CreateUnusedFactory(), webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm, - nullptr, field_trials); + nullptr, nullptr, field_trials); engine.Init(); webrtc::RtcEventLogNull event_log; webrtc::Call::Config call_config(&event_log); @@ -3673,7 +3673,7 @@ TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) { task_queue_factory.get(), adm.get(), webrtc::MockAudioEncoderFactory::CreateUnusedFactory(), webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm, - nullptr, field_trials); + nullptr, nullptr, field_trials); engine.Init(); webrtc::RtcEventLogNull event_log; webrtc::Call::Config call_config(&event_log); @@ -3709,7 +3709,7 @@ TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) { task_queue_factory.get(), adm.get(), webrtc::MockAudioEncoderFactory::CreateUnusedFactory(), webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm, - nullptr, field_trials); + nullptr, nullptr, field_trials); engine.Init(); for (const cricket::AudioCodec& codec : engine.send_codecs()) { auto is_codec = [&codec](const char* name, int clockrate = 0) { @@ -3759,7 +3759,7 @@ TEST(WebRtcVoiceEngineTest, Has32Channels) { task_queue_factory.get(), adm.get(), webrtc::MockAudioEncoderFactory::CreateUnusedFactory(), webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm, - nullptr, field_trials); + nullptr, nullptr, field_trials); engine.Init(); webrtc::RtcEventLogNull event_log; webrtc::Call::Config call_config(&event_log); @@ -3810,7 +3810,7 @@ TEST(WebRtcVoiceEngineTest, SetRecvCodecs) { task_queue_factory.get(), adm.get(), webrtc::MockAudioEncoderFactory::CreateUnusedFactory(), webrtc::CreateBuiltinAudioDecoderFactory(), nullptr, apm, nullptr, - field_trials); + nullptr, field_trials); engine.Init(); webrtc::RtcEventLogNull event_log; webrtc::Call::Config call_config(&event_log); @@ -3838,7 +3838,8 @@ TEST(WebRtcVoiceEngineTest, SetRtpSendParametersMaxBitrate) { cricket::WebRtcVoiceEngine engine(task_queue_factory.get(), adm.get(), webrtc::CreateBuiltinAudioEncoderFactory(), webrtc::CreateBuiltinAudioDecoderFactory(), - nullptr, nullptr, nullptr, field_trials); + nullptr, nullptr, nullptr, nullptr, + field_trials); engine.Init(); webrtc::RtcEventLogNull event_log; webrtc::Call::Config call_config(&event_log); @@ -3914,7 +3915,7 @@ TEST(WebRtcVoiceEngineTest, CollectRecvCodecs) { webrtc::FieldTrialBasedConfig field_trials; cricket::WebRtcVoiceEngine engine( task_queue_factory.get(), adm.get(), unused_encoder_factory, - mock_decoder_factory, nullptr, apm, nullptr, field_trials); + mock_decoder_factory, nullptr, apm, nullptr, nullptr, field_trials); engine.Init(); auto codecs = engine.recv_codecs(); EXPECT_EQ(11u, codecs.size()); diff --git a/modules/async_audio_processing/async_audio_processing.cc b/modules/async_audio_processing/async_audio_processing.cc index 9452f3bcf9..19c08dc3e5 100644 --- a/modules/async_audio_processing/async_audio_processing.cc +++ b/modules/async_audio_processing/async_audio_processing.cc @@ -24,16 +24,33 @@ AsyncAudioProcessing::Factory::Factory(AudioFrameProcessor& frame_processor, : frame_processor_(frame_processor), task_queue_factory_(task_queue_factory) {} +AsyncAudioProcessing::Factory::Factory( + std::unique_ptr frame_processor, + TaskQueueFactory& task_queue_factory) + : frame_processor_(*frame_processor), + owned_frame_processor_(std::move(frame_processor)), + task_queue_factory_(task_queue_factory) {} + std::unique_ptr AsyncAudioProcessing::Factory::CreateAsyncAudioProcessing( AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback) { - return std::make_unique( - frame_processor_, task_queue_factory_, - std::move(on_frame_processed_callback)); + if (owned_frame_processor_) { + return std::make_unique( + std::move(owned_frame_processor_), task_queue_factory_, + std::move(on_frame_processed_callback)); + } else { + return std::make_unique( + frame_processor_, task_queue_factory_, + std::move(on_frame_processed_callback)); + } } AsyncAudioProcessing::~AsyncAudioProcessing() { - frame_processor_.SetSink(nullptr); + if (owned_frame_processor_) { + owned_frame_processor_->SetSink(nullptr); + } else { + frame_processor_.SetSink(nullptr); + } } AsyncAudioProcessing::AsyncAudioProcessing( @@ -52,10 +69,33 @@ AsyncAudioProcessing::AsyncAudioProcessing( }); } -void AsyncAudioProcessing::Process(std::unique_ptr frame) { - task_queue_.PostTask([this, frame = std::move(frame)]() mutable { - frame_processor_.Process(std::move(frame)); +AsyncAudioProcessing::AsyncAudioProcessing( + std::unique_ptr frame_processor, + TaskQueueFactory& task_queue_factory, + AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback) + : on_frame_processed_callback_(std::move(on_frame_processed_callback)), + frame_processor_(*frame_processor), + owned_frame_processor_(std::move(frame_processor)), + task_queue_(task_queue_factory.CreateTaskQueue( + "AsyncAudioProcessing", + TaskQueueFactory::Priority::NORMAL)) { + owned_frame_processor_->SetSink([this](std::unique_ptr frame) { + task_queue_.PostTask([this, frame = std::move(frame)]() mutable { + on_frame_processed_callback_(std::move(frame)); + }); }); } +void AsyncAudioProcessing::Process(std::unique_ptr frame) { + if (owned_frame_processor_) { + task_queue_.PostTask([this, frame = std::move(frame)]() mutable { + owned_frame_processor_->Process(std::move(frame)); + }); + } else { + task_queue_.PostTask([this, frame = std::move(frame)]() mutable { + frame_processor_.Process(std::move(frame)); + }); + } +} + } // namespace webrtc diff --git a/modules/async_audio_processing/async_audio_processing.h b/modules/async_audio_processing/async_audio_processing.h index bbd0f69b1b..f3ed96959b 100644 --- a/modules/async_audio_processing/async_audio_processing.h +++ b/modules/async_audio_processing/async_audio_processing.h @@ -38,12 +38,23 @@ class AsyncAudioProcessing final { ~Factory(); Factory(AudioFrameProcessor& frame_processor, TaskQueueFactory& task_queue_factory); + Factory(std::unique_ptr frame_processor, + TaskQueueFactory& task_queue_factory); std::unique_ptr CreateAsyncAudioProcessing( AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback); private: + // TODO(bugs.webrtc.org/15111): + // Remove 'AudioFrameProcessor& frame_processor_' in favour of + // std::unique_ptr in the follow-up. + // While transitioning this API from using AudioFrameProcessor& to using + // std::unique_ptr, we have two member variable both + // referencing the same object. Throughout the lifetime of the Factory + // only one of the variables is used, depending on which constructor was + // called. AudioFrameProcessor& frame_processor_; + std::unique_ptr owned_frame_processor_; TaskQueueFactory& task_queue_factory_; }; @@ -57,17 +68,39 @@ class AsyncAudioProcessing final { // into `on_frame_processed_callback`, which is posted back onto // `task_queue_`. `task_queue_` is created using the provided // `task_queue_factory`. + // TODO(bugs.webrtc.org/15111): + // Remove this method in favour of the method taking the + // unique_ptr in the follow-up. AsyncAudioProcessing( AudioFrameProcessor& frame_processor, TaskQueueFactory& task_queue_factory, AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback); + // Creates AsyncAudioProcessing which will pass audio frames to + // `frame_processor` on `task_queue_` and reply with processed frames passed + // into `on_frame_processed_callback`, which is posted back onto + // `task_queue_`. `task_queue_` is created using the provided + // `task_queue_factory`. + AsyncAudioProcessing( + std::unique_ptr frame_processor, + TaskQueueFactory& task_queue_factory, + AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback); + // Accepts `frame` for asynchronous processing. Thread-safe. void Process(std::unique_ptr frame); private: AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback_; + // TODO(bugs.webrtc.org/15111): + // Remove 'AudioFrameProcessor& frame_processor_' in favour of + // std::unique_ptr in the follow-up. + // While transitioning this API from using AudioFrameProcessor& to using + // std::unique_ptr, we have two member variable both + // referencing the same object. Throughout the lifetime of the Factory + // only one of the variables is used, depending on which constructor was + // called. AudioFrameProcessor& frame_processor_; + std::unique_ptr owned_frame_processor_; rtc::TaskQueue task_queue_; };