From 97597c0f51aac5ba411882cdc133497c5311660e Mon Sep 17 00:00:00 2001 From: Harald Alvestrand Date: Thu, 4 Nov 2021 12:01:23 +0000 Subject: [PATCH] Remove usage of INFO alias for LS_INFO in log messages Bug: webrtc:13362 Change-Id: Ifda893861a036a85c045cd366f9eab33c62ebde0 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/237221 Reviewed-by: Niels Moller Commit-Queue: Harald Alvestrand Cr-Commit-Position: refs/heads/main@{#35310} --- ...video_encoder_software_fallback_wrapper.cc | 4 +- api/voip/voip_engine_factory.cc | 2 +- audio/audio_state.cc | 4 +- .../resource_adaptation_processor.cc | 23 +- call/adaptation/video_stream_adapter.cc | 6 +- examples/peerconnection/client/conductor.cc | 60 ++--- .../peerconnection/client/linux/main_wnd.cc | 6 +- .../client/peer_connection_client.cc | 6 +- .../unityplugin/simple_peer_connection.cc | 14 +- .../encoder/rtc_event_log_encoder_legacy.cc | 2 +- .../usrsctp_transport_reliability_unittest.cc | 4 +- modules/audio_device/android/aaudio_player.cc | 30 +-- .../audio_device/android/aaudio_recorder.cc | 38 +-- .../audio_device/android/aaudio_wrapper.cc | 48 ++-- .../android/audio_device_template.h | 84 +++---- modules/audio_device/android/audio_manager.cc | 24 +- .../audio_device/android/audio_record_jni.cc | 38 +-- .../audio_device/android/audio_track_jni.cc | 30 +-- modules/audio_device/audio_device_buffer.cc | 91 +++---- modules/audio_device/audio_device_impl.cc | 236 +++++++++--------- modules/audio_device/fine_audio_buffer.cc | 16 +- .../include/audio_device_factory.cc | 4 +- .../audio_device/win/audio_device_core_win.cc | 36 +-- .../win/audio_device_module_win.cc | 88 +++---- .../audio_device/win/core_audio_base_win.cc | 117 ++++----- .../audio_device/win/core_audio_input_win.cc | 50 ++-- .../audio_device/win/core_audio_output_win.cc | 52 ++-- .../win/core_audio_utility_win.cc | 225 +++++++++-------- .../audio_device/win/core_audio_utility_win.h | 14 +- modules/utility/source/jvm_android.cc | 40 +-- .../codecs/h264/h264_encoder_impl.cc | 8 +- .../codecs/vp8/libvpx_vp8_encoder.cc | 4 +- .../codecs/vp9/libvpx_vp9_encoder.cc | 4 +- .../video_coding/utility/ivf_file_reader.cc | 8 +- p2p/base/fake_ice_transport.h | 2 +- p2p/base/p2p_transport_channel.cc | 26 +- p2p/base/port_unittest.cc | 2 +- p2p/client/basic_port_allocator.cc | 4 +- pc/jsep_transport.cc | 2 +- pc/jsep_transport_controller.cc | 2 +- rtc_base/logging.h | 2 + rtc_base/network_unittest.cc | 4 +- rtc_base/socket_adapters.cc | 2 +- rtc_base/win/scoped_com_initializer.cc | 6 +- rtc_base/win/windows_version_unittest.cc | 6 +- rtc_tools/converter/yuv_to_ivf_converter.cc | 10 +- .../audio_device_android.cc | 8 +- .../peer_connection_factory_unittest.cc | 6 +- .../src/jni/audio_device/aaudio_player.cc | 30 +-- .../src/jni/audio_device/aaudio_recorder.cc | 36 +-- .../src/jni/audio_device/aaudio_wrapper.cc | 48 ++-- .../jni/audio_device/audio_device_module.cc | 160 ++++++------ .../src/jni/audio_device/audio_record_jni.cc | 39 +-- .../src/jni/audio_device/audio_track_jni.cc | 30 +-- .../src/jni/audio_device/opensles_common.cc | 2 +- sdk/objc/native/api/audio_device_module.mm | 5 +- .../src/audio/audio_device_module_ios.mm | 190 +++++++------- test/network/cross_traffic_unittest.cc | 8 +- test/network/fake_network_socket_server.cc | 4 +- test/network/network_emulation.cc | 33 +-- .../video/default_video_quality_analyzer.cc | 88 +++---- .../video/example_video_quality_analyzer.cc | 6 +- test/pc/e2e/echo/echo_emulation.cc | 2 +- test/pc/e2e/media/media_helper.cc | 4 +- test/pc/e2e/peer_connection_quality_test.cc | 57 ++--- ..._based_network_quality_metrics_reporter.cc | 2 +- video/video_quality_test.cc | 4 +- video/video_source_sink_controller.cc | 3 +- video/video_stream_encoder.cc | 6 +- video/video_stream_encoder_unittest.cc | 10 +- 70 files changed, 1140 insertions(+), 1125 deletions(-) diff --git a/api/video_codecs/video_encoder_software_fallback_wrapper.cc b/api/video_codecs/video_encoder_software_fallback_wrapper.cc index 8202217880..72e08a704c 100644 --- a/api/video_codecs/video_encoder_software_fallback_wrapper.cc +++ b/api/video_codecs/video_encoder_software_fallback_wrapper.cc @@ -358,8 +358,8 @@ int32_t VideoEncoderSoftwareFallbackWrapper::EncodeWithMainEncoder( fallback_encoder_->GetEncoderInfo().supports_native_handle) { return fallback_encoder_->Encode(frame, frame_types); } else { - RTC_LOG(INFO) << "Fallback encoder does not support native handle - " - "converting frame to I420"; + RTC_LOG(LS_INFO) << "Fallback encoder does not support native handle - " + "converting frame to I420"; rtc::scoped_refptr src_buffer = frame.video_frame_buffer()->ToI420(); if (!src_buffer) { diff --git a/api/voip/voip_engine_factory.cc b/api/voip/voip_engine_factory.cc index 88f63f9c92..8da53cef74 100644 --- a/api/voip/voip_engine_factory.cc +++ b/api/voip/voip_engine_factory.cc @@ -24,7 +24,7 @@ std::unique_ptr CreateVoipEngine(VoipEngineConfig config) { RTC_CHECK(config.audio_device_module); if (!config.audio_processing) { - RTC_DLOG(INFO) << "No audio processing functionality provided."; + RTC_DLOG(LS_INFO) << "No audio processing functionality provided."; } return std::make_unique(std::move(config.encoder_factory), diff --git a/audio/audio_state.cc b/audio/audio_state.cc index 0e60f0372b..9e5b63b999 100644 --- a/audio/audio_state.cc +++ b/audio/audio_state.cc @@ -123,7 +123,7 @@ void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) { } void AudioState::SetPlayout(bool enabled) { - RTC_LOG(INFO) << "SetPlayout(" << enabled << ")"; + RTC_LOG(LS_INFO) << "SetPlayout(" << enabled << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); if (playout_enabled_ != enabled) { playout_enabled_ = enabled; @@ -140,7 +140,7 @@ void AudioState::SetPlayout(bool enabled) { } void AudioState::SetRecording(bool enabled) { - RTC_LOG(INFO) << "SetRecording(" << enabled << ")"; + RTC_LOG(LS_INFO) << "SetRecording(" << enabled << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); if (recording_enabled_ != enabled) { recording_enabled_ = enabled; diff --git a/call/adaptation/resource_adaptation_processor.cc b/call/adaptation/resource_adaptation_processor.cc index 3c06675a16..d95cd75a9d 100644 --- a/call/adaptation/resource_adaptation_processor.cc +++ b/call/adaptation/resource_adaptation_processor.cc @@ -129,7 +129,7 @@ void ResourceAdaptationProcessor::AddResource( resources_.push_back(resource); } resource->SetResourceListener(resource_listener_delegate_); - RTC_LOG(INFO) << "Registered resource \"" << resource->Name() << "\"."; + RTC_LOG(LS_INFO) << "Registered resource \"" << resource->Name() << "\"."; } std::vector> @@ -141,7 +141,7 @@ ResourceAdaptationProcessor::GetResources() const { void ResourceAdaptationProcessor::RemoveResource( rtc::scoped_refptr resource) { RTC_DCHECK(resource); - RTC_LOG(INFO) << "Removing resource \"" << resource->Name() << "\"."; + RTC_LOG(LS_INFO) << "Removing resource \"" << resource->Name() << "\"."; resource->SetResourceListener(nullptr); { MutexLock crit(&resources_lock_); @@ -188,10 +188,11 @@ void ResourceAdaptationProcessor::RemoveLimitationsImposedByResource( RTC_DCHECK_EQ(adapt_to.status(), Adaptation::Status::kValid); stream_adapter_->ApplyAdaptation(adapt_to, nullptr); - RTC_LOG(INFO) << "Most limited resource removed. Restoring restrictions to " - "next most limited restrictions: " - << most_limited.restrictions.ToString() << " with counters " - << most_limited.counters.ToString(); + RTC_LOG(LS_INFO) + << "Most limited resource removed. Restoring restrictions to " + "next most limited restrictions: " + << most_limited.restrictions.ToString() << " with counters " + << most_limited.counters.ToString(); } } @@ -204,8 +205,8 @@ void ResourceAdaptationProcessor::OnResourceUsageStateMeasured( { MutexLock crit(&resources_lock_); if (absl::c_find(resources_, resource) == resources_.end()) { - RTC_LOG(INFO) << "Ignoring signal from removed resource \"" - << resource->Name() << "\"."; + RTC_LOG(LS_INFO) << "Ignoring signal from removed resource \"" + << resource->Name() << "\"."; return; } } @@ -226,9 +227,9 @@ void ResourceAdaptationProcessor::OnResourceUsageStateMeasured( // successfully adapted since - don't log to avoid spam. return; } - RTC_LOG(INFO) << "Resource \"" << resource->Name() << "\" signalled " - << ResourceUsageStateToString(usage_state) << ". " - << result_and_message.message; + RTC_LOG(LS_INFO) << "Resource \"" << resource->Name() << "\" signalled " + << ResourceUsageStateToString(usage_state) << ". " + << result_and_message.message; if (result_and_message.result == MitigationResult::kAdaptationApplied) { previous_mitigation_results_.clear(); } else { diff --git a/call/adaptation/video_stream_adapter.cc b/call/adaptation/video_stream_adapter.cc index 49a4d6a1ce..ca625022b8 100644 --- a/call/adaptation/video_stream_adapter.cc +++ b/call/adaptation/video_stream_adapter.cc @@ -234,7 +234,7 @@ const VideoAdaptationCounters& VideoStreamAdapter::adaptation_counters() const { void VideoStreamAdapter::ClearRestrictions() { RTC_DCHECK_RUN_ON(&sequence_checker_); // Invalidate any previously returned Adaptation. - RTC_LOG(INFO) << "Resetting restrictions"; + RTC_LOG(LS_INFO) << "Resetting restrictions"; ++adaptation_validation_id_; current_restrictions_ = {VideoSourceRestrictions(), VideoAdaptationCounters()}; @@ -333,8 +333,8 @@ Adaptation VideoStreamAdapter::GetAdaptationUp( if (!constraint->IsAdaptationUpAllowed(input_state, current_restrictions_.restrictions, restrictions.restrictions)) { - RTC_LOG(INFO) << "Not adapting up because constraint \"" - << constraint->Name() << "\" disallowed it"; + RTC_LOG(LS_INFO) << "Not adapting up because constraint \"" + << constraint->Name() << "\" disallowed it"; step = Adaptation::Status::kRejectedByConstraint; } } diff --git a/examples/peerconnection/client/conductor.cc b/examples/peerconnection/client/conductor.cc index d3b1d21a41..5f213deeeb 100644 --- a/examples/peerconnection/client/conductor.cc +++ b/examples/peerconnection/client/conductor.cc @@ -61,10 +61,10 @@ class DummySetSessionDescriptionObserver static DummySetSessionDescriptionObserver* Create() { return new rtc::RefCountedObject(); } - virtual void OnSuccess() { RTC_LOG(INFO) << __FUNCTION__; } + virtual void OnSuccess() { RTC_LOG(LS_INFO) << __FUNCTION__; } virtual void OnFailure(webrtc::RTCError error) { - RTC_LOG(INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " - << error.message(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " + << error.message(); } }; @@ -221,23 +221,23 @@ void Conductor::OnAddTrack( rtc::scoped_refptr receiver, const std::vector>& streams) { - RTC_LOG(INFO) << __FUNCTION__ << " " << receiver->id(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); main_wnd_->QueueUIThreadCallback(NEW_TRACK_ADDED, receiver->track().release()); } void Conductor::OnRemoveTrack( rtc::scoped_refptr receiver) { - RTC_LOG(INFO) << __FUNCTION__ << " " << receiver->id(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); main_wnd_->QueueUIThreadCallback(TRACK_REMOVED, receiver->track().release()); } void Conductor::OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { - RTC_LOG(INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); // For loopback test. To save some connecting delay. if (loopback_) { if (!peer_connection_->AddIceCandidate(candidate)) { - RTC_LOG(WARNING) << "Failed to apply the received candidate"; + RTC_LOG(LS_WARNING) << "Failed to apply the received candidate"; } return; } @@ -261,12 +261,12 @@ void Conductor::OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { // void Conductor::OnSignedIn() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; main_wnd_->SwitchToPeerList(client_->peers()); } void Conductor::OnDisconnected() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; DeletePeerConnection(); @@ -275,16 +275,16 @@ void Conductor::OnDisconnected() { } void Conductor::OnPeerConnected(int id, const std::string& name) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; // Refresh the list if we're showing it. if (main_wnd_->current_ui() == MainWindow::LIST_PEERS) main_wnd_->SwitchToPeerList(client_->peers()); } void Conductor::OnPeerDisconnected(int id) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (id == peer_id_) { - RTC_LOG(INFO) << "Our peer disconnected"; + RTC_LOG(LS_INFO) << "Our peer disconnected"; main_wnd_->QueueUIThreadCallback(PEER_CONNECTION_CLOSED, NULL); } else { // Refresh the list if we're showing it. @@ -308,7 +308,7 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { } } else if (peer_id != peer_id_) { RTC_DCHECK(peer_id_ != -1); - RTC_LOG(WARNING) + RTC_LOG(LS_WARNING) << "Received a message from unknown peer while already in a " "conversation with a different peer."; return; @@ -317,7 +317,7 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { Json::Reader reader; Json::Value jmessage; if (!reader.parse(message, jmessage)) { - RTC_LOG(WARNING) << "Received unknown message. " << message; + RTC_LOG(LS_WARNING) << "Received unknown message. " << message; return; } std::string type_str; @@ -346,19 +346,21 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { std::string sdp; if (!rtc::GetStringFromJsonObject(jmessage, kSessionDescriptionSdpName, &sdp)) { - RTC_LOG(WARNING) << "Can't parse received session description message."; + RTC_LOG(LS_WARNING) + << "Can't parse received session description message."; return; } webrtc::SdpParseError error; std::unique_ptr session_description = webrtc::CreateSessionDescription(type, sdp, &error); if (!session_description) { - RTC_LOG(WARNING) << "Can't parse received session description message. " - "SdpParseError was: " - << error.description; + RTC_LOG(LS_WARNING) + << "Can't parse received session description message. " + "SdpParseError was: " + << error.description; return; } - RTC_LOG(INFO) << " Received session description :" << message; + RTC_LOG(LS_INFO) << " Received session description :" << message; peer_connection_->SetRemoteDescription( DummySetSessionDescriptionObserver::Create(), session_description.release()); @@ -375,23 +377,23 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { !rtc::GetIntFromJsonObject(jmessage, kCandidateSdpMlineIndexName, &sdp_mlineindex) || !rtc::GetStringFromJsonObject(jmessage, kCandidateSdpName, &sdp)) { - RTC_LOG(WARNING) << "Can't parse received message."; + RTC_LOG(LS_WARNING) << "Can't parse received message."; return; } webrtc::SdpParseError error; std::unique_ptr candidate( webrtc::CreateIceCandidate(sdp_mid, sdp_mlineindex, sdp, &error)); if (!candidate.get()) { - RTC_LOG(WARNING) << "Can't parse received candidate message. " - "SdpParseError was: " - << error.description; + RTC_LOG(LS_WARNING) << "Can't parse received candidate message. " + "SdpParseError was: " + << error.description; return; } if (!peer_connection_->AddIceCandidate(candidate.get())) { - RTC_LOG(WARNING) << "Failed to apply the received candidate"; + RTC_LOG(LS_WARNING) << "Failed to apply the received candidate"; return; } - RTC_LOG(INFO) << " Received candidate :" << message; + RTC_LOG(LS_INFO) << " Received candidate :" << message; } } @@ -475,7 +477,7 @@ void Conductor::AddTracks() { } void Conductor::DisconnectFromCurrentPeer() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (peer_connection_.get()) { client_->SendHangUp(peer_id_); DeletePeerConnection(); @@ -488,7 +490,7 @@ void Conductor::DisconnectFromCurrentPeer() { void Conductor::UIThreadCallback(int msg_id, void* data) { switch (msg_id) { case PEER_CONNECTION_CLOSED: - RTC_LOG(INFO) << "PEER_CONNECTION_CLOSED"; + RTC_LOG(LS_INFO) << "PEER_CONNECTION_CLOSED"; DeletePeerConnection(); if (main_wnd_->IsWindow()) { @@ -503,7 +505,7 @@ void Conductor::UIThreadCallback(int msg_id, void* data) { break; case SEND_MESSAGE_TO_PEER: { - RTC_LOG(INFO) << "SEND_MESSAGE_TO_PEER"; + RTC_LOG(LS_INFO) << "SEND_MESSAGE_TO_PEER"; std::string* msg = reinterpret_cast(data); if (msg) { // For convenience, we always run the message through the queue. @@ -579,7 +581,7 @@ void Conductor::OnSuccess(webrtc::SessionDescriptionInterface* desc) { } void Conductor::OnFailure(webrtc::RTCError error) { - RTC_LOG(LERROR) << ToString(error.type()) << ": " << error.message(); + RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message(); } void Conductor::SendMessage(const std::string& json_object) { diff --git a/examples/peerconnection/client/linux/main_wnd.cc b/examples/peerconnection/client/linux/main_wnd.cc index 7dcfa89d6a..fe29d6220a 100644 --- a/examples/peerconnection/client/linux/main_wnd.cc +++ b/examples/peerconnection/client/linux/main_wnd.cc @@ -252,7 +252,7 @@ bool GtkMainWnd::Destroy() { } void GtkMainWnd::SwitchToConnectUI() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(IsWindow()); RTC_DCHECK(vbox_ == NULL); @@ -308,7 +308,7 @@ void GtkMainWnd::SwitchToConnectUI() { } void GtkMainWnd::SwitchToPeerList(const Peers& peers) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (!peer_list_) { gtk_container_set_border_width(GTK_CONTAINER(window_), 0); @@ -345,7 +345,7 @@ void GtkMainWnd::SwitchToPeerList(const Peers& peers) { } void GtkMainWnd::SwitchToStreamingUI() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(draw_area_ == NULL); diff --git a/examples/peerconnection/client/peer_connection_client.cc b/examples/peerconnection/client/peer_connection_client.cc index 713259360c..b76b925bda 100644 --- a/examples/peerconnection/client/peer_connection_client.cc +++ b/examples/peerconnection/client/peer_connection_client.cc @@ -297,7 +297,7 @@ bool PeerConnectionClient::ReadIntoBuffer(rtc::Socket* socket, bool ret = false; size_t i = data->find("\r\n\r\n"); if (i != std::string::npos) { - RTC_LOG(INFO) << "Headers received"; + RTC_LOG(LS_INFO) << "Headers received"; if (GetHeaderValue(*data, i, "\r\nContent-Length: ", content_length)) { size_t total_response_size = (i + 4) + *content_length; if (data->length() >= total_response_size) { @@ -374,7 +374,7 @@ void PeerConnectionClient::OnRead(rtc::Socket* socket) { } void PeerConnectionClient::OnHangingGetRead(rtc::Socket* socket) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; size_t content_length = 0; if (ReadIntoBuffer(socket, ¬ification_data_, &content_length)) { size_t peer_id = 0, eoh = 0; @@ -472,7 +472,7 @@ bool PeerConnectionClient::ParseServerResponse(const std::string& response, } void PeerConnectionClient::OnClose(rtc::Socket* socket, int err) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; socket->Close(); diff --git a/examples/unityplugin/simple_peer_connection.cc b/examples/unityplugin/simple_peer_connection.cc index e1c9c406b6..4e16c00bd4 100644 --- a/examples/unityplugin/simple_peer_connection.cc +++ b/examples/unityplugin/simple_peer_connection.cc @@ -101,10 +101,10 @@ class DummySetSessionDescriptionObserver static DummySetSessionDescriptionObserver* Create() { return new rtc::RefCountedObject(); } - virtual void OnSuccess() { RTC_LOG(INFO) << __FUNCTION__; } + virtual void OnSuccess() { RTC_LOG(LS_INFO) << __FUNCTION__; } virtual void OnFailure(webrtc::RTCError error) { - RTC_LOG(INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " - << error.message(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " + << error.message(); } protected: @@ -278,7 +278,7 @@ void SimplePeerConnection::OnFailure(webrtc::RTCError error) { void SimplePeerConnection::OnIceCandidate( const webrtc::IceCandidateInterface* candidate) { - RTC_LOG(INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); std::string sdp; if (!candidate->ToString(&sdp)) { @@ -348,7 +348,7 @@ bool SimplePeerConnection::SetRemoteDescription(const char* type, << error.description; return false; } - RTC_LOG(INFO) << " Received session description :" << remote_desc; + RTC_LOG(LS_INFO) << " Received session description :" << remote_desc; peer_connection_->SetRemoteDescription( DummySetSessionDescriptionObserver::Create(), session_description); @@ -374,7 +374,7 @@ bool SimplePeerConnection::AddIceCandidate(const char* candidate, RTC_LOG(WARNING) << "Failed to apply the received candidate"; return false; } - RTC_LOG(INFO) << " Received candidate :" << candidate; + RTC_LOG(LS_INFO) << " Received candidate :" << candidate; return true; } @@ -409,7 +409,7 @@ void SimplePeerConnection::SetAudioControl() { void SimplePeerConnection::OnAddStream( rtc::scoped_refptr stream) { - RTC_LOG(INFO) << __FUNCTION__ << " " << stream->id(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << stream->id(); remote_stream_ = stream; if (remote_video_observer_ && !remote_stream_->GetVideoTracks().empty()) { remote_stream_->GetVideoTracks()[0]->AddOrUpdateSink( diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc b/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc index 2bd7507853..6422ea07e0 100644 --- a/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc @@ -674,7 +674,7 @@ std::string RtcEventLogEncoderLegacy::EncodeVideoSendStreamConfig( encoder->set_payload_type(codec.payload_type); if (event.config().codecs.size() > 1) { - RTC_LOG(WARNING) + RTC_LOG(LS_WARNING) << "LogVideoSendStreamConfig currently only supports one " "codec. Logging codec :" << codec.payload_name; diff --git a/media/sctp/usrsctp_transport_reliability_unittest.cc b/media/sctp/usrsctp_transport_reliability_unittest.cc index 104e320398..a06d8d84dd 100644 --- a/media/sctp/usrsctp_transport_reliability_unittest.cc +++ b/media/sctp/usrsctp_transport_reliability_unittest.cc @@ -259,8 +259,8 @@ class SctpDataReceiver final : public sigslot::has_slots<> { } if (num_messages_received_ % kLogPerMessagesCount == 0) { - RTC_LOG(INFO) << receiver_id_ << " receiver got " - << num_messages_received_ << " messages"; + RTC_LOG(LS_INFO) << receiver_id_ << " receiver got " + << num_messages_received_ << " messages"; } } diff --git a/modules/audio_device/android/aaudio_player.cc b/modules/audio_device/android/aaudio_player.cc index 7f635126bd..a5a3675cec 100644 --- a/modules/audio_device/android/aaudio_player.cc +++ b/modules/audio_device/android/aaudio_player.cc @@ -27,19 +27,19 @@ enum AudioDeviceMessageType : uint32_t { AAudioPlayer::AAudioPlayer(AudioManager* audio_manager) : main_thread_(rtc::Thread::Current()), aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; thread_checker_aaudio_.Detach(); } AAudioPlayer::~AAudioPlayer() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK_RUN_ON(&main_thread_checker_); Terminate(); - RTC_LOG(INFO) << "#detected underruns: " << underrun_count_; + RTC_LOG(LS_INFO) << "#detected underruns: " << underrun_count_; } int AAudioPlayer::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK_RUN_ON(&main_thread_checker_); if (aaudio_.audio_parameters().channels() == 2) { RTC_DLOG(LS_WARNING) << "Stereo mode is enabled"; @@ -48,14 +48,14 @@ int AAudioPlayer::Init() { } int AAudioPlayer::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK_RUN_ON(&main_thread_checker_); StopPlayout(); return 0; } int AAudioPlayer::InitPlayout() { - RTC_LOG(INFO) << "InitPlayout"; + RTC_LOG(LS_INFO) << "InitPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!initialized_); RTC_DCHECK(!playing_); @@ -72,7 +72,7 @@ bool AAudioPlayer::PlayoutIsInitialized() const { } int AAudioPlayer::StartPlayout() { - RTC_LOG(INFO) << "StartPlayout"; + RTC_LOG(LS_INFO) << "StartPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!playing_); if (!initialized_) { @@ -93,7 +93,7 @@ int AAudioPlayer::StartPlayout() { } int AAudioPlayer::StopPlayout() { - RTC_LOG(INFO) << "StopPlayout"; + RTC_LOG(LS_INFO) << "StopPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); if (!initialized_ || !playing_) { return 0; @@ -114,7 +114,7 @@ bool AAudioPlayer::Playing() const { } void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_DLOG(INFO) << "AttachAudioBuffer"; + RTC_DLOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK_RUN_ON(&main_thread_checker_); audio_device_buffer_ = audioBuffer; const AudioParameters audio_parameters = aaudio_.audio_parameters(); @@ -157,9 +157,9 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, // Log device id in first data callback to ensure that a valid device is // utilized. if (first_data_callback_) { - RTC_LOG(INFO) << "--- First output data callback: " - "device id=" - << aaudio_.device_id(); + RTC_LOG(LS_INFO) << "--- First output data callback: " + "device id=" + << aaudio_.device_id(); first_data_callback_ = false; } @@ -179,8 +179,8 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, latency_millis_ = aaudio_.EstimateLatencyMillis(); // TODO(henrika): use for development only. if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) { - RTC_DLOG(INFO) << "output latency: " << latency_millis_ - << ", num_frames: " << num_frames; + RTC_DLOG(LS_INFO) << "output latency: " << latency_millis_ + << ", num_frames: " << num_frames; } // Read audio data from the WebRTC source using the FineAudioBuffer object @@ -215,7 +215,7 @@ void AAudioPlayer::OnMessage(rtc::Message* msg) { void AAudioPlayer::HandleStreamDisconnected() { RTC_DCHECK_RUN_ON(&main_thread_checker_); - RTC_DLOG(INFO) << "HandleStreamDisconnected"; + RTC_DLOG(LS_INFO) << "HandleStreamDisconnected"; if (!initialized_ || !playing_) { return; } diff --git a/modules/audio_device/android/aaudio_recorder.cc b/modules/audio_device/android/aaudio_recorder.cc index 68c9cee858..d91fb9e0bf 100644 --- a/modules/audio_device/android/aaudio_recorder.cc +++ b/modules/audio_device/android/aaudio_recorder.cc @@ -28,19 +28,19 @@ enum AudioDeviceMessageType : uint32_t { AAudioRecorder::AAudioRecorder(AudioManager* audio_manager) : main_thread_(rtc::Thread::Current()), aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; thread_checker_aaudio_.Detach(); } AAudioRecorder::~AAudioRecorder() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); - RTC_LOG(INFO) << "detected owerflows: " << overflow_count_; + RTC_LOG(LS_INFO) << "detected owerflows: " << overflow_count_; } int AAudioRecorder::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); if (aaudio_.audio_parameters().channels() == 2) { RTC_DLOG(LS_WARNING) << "Stereo mode is enabled"; @@ -49,14 +49,14 @@ int AAudioRecorder::Init() { } int AAudioRecorder::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopRecording(); return 0; } int AAudioRecorder::InitRecording() { - RTC_LOG(INFO) << "InitRecording"; + RTC_LOG(LS_INFO) << "InitRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK(!recording_); @@ -68,7 +68,7 @@ int AAudioRecorder::InitRecording() { } int AAudioRecorder::StartRecording() { - RTC_LOG(INFO) << "StartRecording"; + RTC_LOG(LS_INFO) << "StartRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(initialized_); RTC_DCHECK(!recording_); @@ -85,7 +85,7 @@ int AAudioRecorder::StartRecording() { } int AAudioRecorder::StopRecording() { - RTC_LOG(INFO) << "StopRecording"; + RTC_LOG(LS_INFO) << "StopRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !recording_) { return 0; @@ -100,7 +100,7 @@ int AAudioRecorder::StopRecording() { } void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const AudioParameters audio_parameters = aaudio_.audio_parameters(); @@ -114,19 +114,19 @@ void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { } int AAudioRecorder::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAEC: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInAEC: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } int AAudioRecorder::EnableBuiltInAGC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAGC: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInAGC: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } int AAudioRecorder::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInNS: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInNS: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } @@ -154,14 +154,14 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( int32_t num_frames) { // TODO(henrika): figure out why we sometimes hit this one. // RTC_DCHECK(thread_checker_aaudio_.IsCurrent()); - // RTC_LOG(INFO) << "OnDataCallback: " << num_frames; + // RTC_LOG(LS_INFO) << "OnDataCallback: " << num_frames; // Drain the input buffer at first callback to ensure that it does not // contain any old data. Will also ensure that the lowest possible latency // is obtained. if (first_data_callback_) { - RTC_LOG(INFO) << "--- First input data callback: " - "device id=" - << aaudio_.device_id(); + RTC_LOG(LS_INFO) << "--- First input data callback: " + "device id=" + << aaudio_.device_id(); aaudio_.ClearInputStream(audio_data, num_frames); first_data_callback_ = false; } @@ -177,8 +177,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( latency_millis_ = aaudio_.EstimateLatencyMillis(); // TODO(henrika): use for development only. if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) { - RTC_DLOG(INFO) << "input latency: " << latency_millis_ - << ", num_frames: " << num_frames; + RTC_DLOG(LS_INFO) << "input latency: " << latency_millis_ + << ", num_frames: " << num_frames; } // Copy recorded audio in `audio_data` to the WebRTC sink using the // FineAudioBuffer object. @@ -204,7 +204,7 @@ void AAudioRecorder::OnMessage(rtc::Message* msg) { void AAudioRecorder::HandleStreamDisconnected() { RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_LOG(INFO) << "HandleStreamDisconnected"; + RTC_LOG(LS_INFO) << "HandleStreamDisconnected"; if (!initialized_ || !recording_) { return; } diff --git a/modules/audio_device/android/aaudio_wrapper.cc b/modules/audio_device/android/aaudio_wrapper.cc index ab1278436e..82860e3703 100644 --- a/modules/audio_device/android/aaudio_wrapper.cc +++ b/modules/audio_device/android/aaudio_wrapper.cc @@ -134,23 +134,23 @@ AAudioWrapper::AAudioWrapper(AudioManager* audio_manager, aaudio_direction_t direction, AAudioObserverInterface* observer) : direction_(direction), observer_(observer) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(observer_); direction_ == AAUDIO_DIRECTION_OUTPUT ? audio_parameters_ = audio_manager->GetPlayoutAudioParameters() : audio_parameters_ = audio_manager->GetRecordAudioParameters(); aaudio_thread_checker_.Detach(); - RTC_LOG(INFO) << audio_parameters_.ToString(); + RTC_LOG(LS_INFO) << audio_parameters_.ToString(); } AAudioWrapper::~AAudioWrapper() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!stream_); } bool AAudioWrapper::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); // Creates a stream builder which can be used to open an audio stream. ScopedStreamBuilder builder; @@ -174,7 +174,7 @@ bool AAudioWrapper::Init() { } bool AAudioWrapper::Start() { - RTC_LOG(INFO) << "Start"; + RTC_LOG(LS_INFO) << "Start"; RTC_DCHECK(thread_checker_.IsCurrent()); // TODO(henrika): this state check might not be needed. aaudio_stream_state_t current_state = AAudioStream_getState(stream_); @@ -190,7 +190,7 @@ bool AAudioWrapper::Start() { } bool AAudioWrapper::Stop() { - RTC_LOG(INFO) << "Stop: " << DirectionToString(direction()); + RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction()); RTC_DCHECK(thread_checker_.IsCurrent()); // Asynchronous request for the stream to stop. RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false); @@ -240,7 +240,7 @@ double AAudioWrapper::EstimateLatencyMillis() const { // Returns new buffer size or a negative error value if buffer size could not // be increased. bool AAudioWrapper::IncreaseOutputBufferSize() { - RTC_LOG(INFO) << "IncreaseBufferSize"; + RTC_LOG(LS_INFO) << "IncreaseBufferSize"; RTC_DCHECK(stream_); RTC_DCHECK(aaudio_thread_checker_.IsCurrent()); RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT); @@ -255,20 +255,20 @@ bool AAudioWrapper::IncreaseOutputBufferSize() { << ") is higher than max: " << max_buffer_size; return false; } - RTC_LOG(INFO) << "Updating buffer size to: " << buffer_size - << " (max=" << max_buffer_size << ")"; + RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size + << " (max=" << max_buffer_size << ")"; buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size); if (buffer_size < 0) { RTC_LOG(LS_ERROR) << "Failed to change buffer size: " << AAudio_convertResultToText(buffer_size); return false; } - RTC_LOG(INFO) << "Buffer size changed to: " << buffer_size; + RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size; return true; } void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) { - RTC_LOG(INFO) << "ClearInputStream"; + RTC_LOG(LS_INFO) << "ClearInputStream"; RTC_DCHECK(stream_); RTC_DCHECK(aaudio_thread_checker_.IsCurrent()); RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT); @@ -357,7 +357,7 @@ int64_t AAudioWrapper::frames_read() const { } void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) { - RTC_LOG(INFO) << "SetStreamConfiguration"; + RTC_LOG(LS_INFO) << "SetStreamConfiguration"; RTC_DCHECK(builder); RTC_DCHECK(thread_checker_.IsCurrent()); // Request usage of default primary output/input device. @@ -390,7 +390,7 @@ void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) { } bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) { - RTC_LOG(INFO) << "OpenStream"; + RTC_LOG(LS_INFO) << "OpenStream"; RTC_DCHECK(builder); AAudioStream* stream = nullptr; RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false); @@ -400,7 +400,7 @@ bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) { } void AAudioWrapper::CloseStream() { - RTC_LOG(INFO) << "CloseStream"; + RTC_LOG(LS_INFO) << "CloseStream"; RTC_DCHECK(stream_); LOG_ON_ERROR(AAudioStream_close(stream_)); stream_ = nullptr; @@ -419,16 +419,16 @@ void AAudioWrapper::LogStreamConfiguration() { ss << ", direction=" << DirectionToString(direction()); ss << ", device id=" << AAudioStream_getDeviceId(stream_); ss << ", frames per callback=" << frames_per_callback(); - RTC_LOG(INFO) << ss.str(); + RTC_LOG(LS_INFO) << ss.str(); } void AAudioWrapper::LogStreamState() { - RTC_LOG(INFO) << "AAudio stream state: " - << AAudio_convertStreamStateToText(stream_state()); + RTC_LOG(LS_INFO) << "AAudio stream state: " + << AAudio_convertStreamStateToText(stream_state()); } bool AAudioWrapper::VerifyStreamConfiguration() { - RTC_LOG(INFO) << "VerifyStreamConfiguration"; + RTC_LOG(LS_INFO) << "VerifyStreamConfiguration"; RTC_DCHECK(stream_); // TODO(henrika): should we verify device ID as well? if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) { @@ -466,16 +466,16 @@ bool AAudioWrapper::VerifyStreamConfiguration() { } bool AAudioWrapper::OptimizeBuffers() { - RTC_LOG(INFO) << "OptimizeBuffers"; + RTC_LOG(LS_INFO) << "OptimizeBuffers"; RTC_DCHECK(stream_); // Maximum number of frames that can be filled without blocking. - RTC_LOG(INFO) << "max buffer capacity in frames: " - << buffer_capacity_in_frames(); + RTC_LOG(LS_INFO) << "max buffer capacity in frames: " + << buffer_capacity_in_frames(); // Query the number of frames that the application should read or write at // one time for optimal performance. int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_); - RTC_LOG(INFO) << "frames per burst for optimal performance: " - << frames_per_burst; + RTC_LOG(LS_INFO) << "frames per burst for optimal performance: " + << frames_per_burst; frames_per_burst_ = frames_per_burst; if (direction() == AAUDIO_DIRECTION_INPUT) { // There is no point in calling setBufferSizeInFrames() for input streams @@ -492,7 +492,7 @@ bool AAudioWrapper::OptimizeBuffers() { return false; } // Maximum number of frames that can be filled without blocking. - RTC_LOG(INFO) << "buffer burst size in frames: " << buffer_size; + RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size; return true; } diff --git a/modules/audio_device/android/audio_device_template.h b/modules/audio_device/android/audio_device_template.h index 3ea248f79e..a1510d300e 100644 --- a/modules/audio_device/android/audio_device_template.h +++ b/modules/audio_device/android/audio_device_template.h @@ -39,22 +39,22 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { output_(audio_manager_), input_(audio_manager_), initialized_(false) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK(audio_manager); audio_manager_->SetActiveAudioLayer(audio_layer); } - virtual ~AudioDeviceTemplate() { RTC_LOG(INFO) << __FUNCTION__; } + virtual ~AudioDeviceTemplate() { RTC_LOG(LS_INFO) << __FUNCTION__; } int32_t ActiveAudioLayer( AudioDeviceModule::AudioLayer& audioLayer) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; audioLayer = audio_layer_; return 0; } InitStatus Init() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); if (!audio_manager_->Init()) { @@ -74,7 +74,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t Terminate() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); int32_t err = input_.Terminate(); err |= output_.Terminate(); @@ -85,18 +85,18 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } bool Initialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); return initialized_; } int16_t PlayoutDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 1; } int16_t RecordingDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 1; } @@ -115,7 +115,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t SetPlayoutDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } @@ -127,7 +127,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t SetRecordingDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } @@ -137,39 +137,39 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t PlayoutIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = true; return 0; } int32_t InitPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.InitPlayout(); } bool PlayoutIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.PlayoutIsInitialized(); } int32_t RecordingIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = true; return 0; } int32_t InitRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return input_.InitRecording(); } bool RecordingIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return input_.RecordingIsInitialized(); } int32_t StartPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!audio_manager_->IsCommunicationModeEnabled()) { RTC_LOG(WARNING) << "The application should use MODE_IN_COMMUNICATION audio mode!"; @@ -181,18 +181,18 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // Avoid using audio manger (JNI/Java cost) if playout was inactive. if (!Playing()) return 0; - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; int32_t err = output_.StopPlayout(); return err; } bool Playing() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return output_.Playing(); } int32_t StartRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!audio_manager_->IsCommunicationModeEnabled()) { RTC_LOG(WARNING) << "The application should use MODE_IN_COMMUNICATION audio mode!"; @@ -202,7 +202,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t StopRecording() override { // Avoid using audio manger (JNI/Java cost) if recording was inactive. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!Recording()) return 0; int32_t err = input_.StopRecording(); @@ -212,47 +212,47 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { bool Recording() const override { return input_.Recording(); } int32_t InitSpeaker() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } bool SpeakerIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return true; } int32_t InitMicrophone() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } bool MicrophoneIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return true; } int32_t SpeakerVolumeIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.SpeakerVolumeIsAvailable(available); } int32_t SetSpeakerVolume(uint32_t volume) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.SetSpeakerVolume(volume); } int32_t SpeakerVolume(uint32_t& volume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.SpeakerVolume(volume); } int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.MaxSpeakerVolume(maxVolume); } int32_t MinSpeakerVolume(uint32_t& minVolume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.MinSpeakerVolume(minVolume); } @@ -299,13 +299,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // Returns true if the audio manager has been configured to support stereo // and false otherwised. Default is mono. int32_t StereoPlayoutIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = audio_manager_->IsStereoPlayoutSupported(); return 0; } int32_t SetStereoPlayout(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; bool available = audio_manager_->IsStereoPlayoutSupported(); // Android does not support changes between mono and stero on the fly. // Instead, the native audio layer is configured via the audio manager @@ -320,13 +320,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t StereoRecordingIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = audio_manager_->IsStereoRecordSupported(); return 0; } int32_t SetStereoRecording(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; bool available = audio_manager_->IsStereoRecordSupported(); // Android does not support changes between mono and stero on the fly. // Instead, the native audio layer is configured via the audio manager @@ -336,7 +336,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t StereoRecording(bool& enabled) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; enabled = audio_manager_->IsStereoRecordSupported(); return 0; } @@ -349,7 +349,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; output_.AttachAudioBuffer(audioBuffer); input_.AttachAudioBuffer(audioBuffer); } @@ -367,13 +367,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // a "Not Implemented" log will be filed. This non-perfect state will remain // until I have added full support for audio effects based on OpenSL ES APIs. bool BuiltInAECIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_manager_->IsAcousticEchoCancelerSupported(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInAEC(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available"; return input_.EnableBuiltInAEC(enable); } @@ -383,13 +383,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // TODO(henrika): add implementation for OpenSL ES based audio as well. // In addition, see comments for BuiltInAECIsAvailable(). bool BuiltInAGCIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_manager_->IsAutomaticGainControlSupported(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInAGC(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available"; return input_.EnableBuiltInAGC(enable); } @@ -399,13 +399,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // TODO(henrika): add implementation for OpenSL ES based audio as well. // In addition, see comments for BuiltInAECIsAvailable(). bool BuiltInNSIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_manager_->IsNoiseSuppressorSupported(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInNS(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available"; return input_.EnableBuiltInNS(enable); } diff --git a/modules/audio_device/android/audio_manager.cc b/modules/audio_device/android/audio_manager.cc index 7de20656fd..e75bd4d227 100644 --- a/modules/audio_device/android/audio_manager.cc +++ b/modules/audio_device/android/audio_manager.cc @@ -33,11 +33,11 @@ AudioManager::JavaAudioManager::JavaAudioManager( is_device_blacklisted_for_open_sles_usage_( native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage", "()Z")) { - RTC_LOG(INFO) << "JavaAudioManager::ctor"; + RTC_LOG(LS_INFO) << "JavaAudioManager::ctor"; } AudioManager::JavaAudioManager::~JavaAudioManager() { - RTC_LOG(INFO) << "JavaAudioManager::~dtor"; + RTC_LOG(LS_INFO) << "JavaAudioManager::~dtor"; } bool AudioManager::JavaAudioManager::Init() { @@ -68,7 +68,7 @@ AudioManager::AudioManager() low_latency_playout_(false), low_latency_record_(false), delay_estimate_in_milliseconds_(0) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_CHECK(j_environment_); JNINativeMethod native_methods[] = { {"nativeCacheAudioParameters", "(IIIZZZZZZZIIJ)V", @@ -83,14 +83,14 @@ AudioManager::AudioManager() } AudioManager::~AudioManager() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Close(); } void AudioManager::SetActiveAudioLayer( AudioDeviceModule::AudioLayer audio_layer) { - RTC_LOG(INFO) << "SetActiveAudioLayer: " << audio_layer; + RTC_LOG(LS_INFO) << "SetActiveAudioLayer: " << audio_layer; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); // Store the currently utilized audio layer. @@ -103,18 +103,18 @@ void AudioManager::SetActiveAudioLayer( (audio_layer == AudioDeviceModule::kAndroidJavaAudio) ? kHighLatencyModeDelayEstimateInMilliseconds : kLowLatencyModeDelayEstimateInMilliseconds; - RTC_LOG(INFO) << "delay_estimate_in_milliseconds: " - << delay_estimate_in_milliseconds_; + RTC_LOG(LS_INFO) << "delay_estimate_in_milliseconds: " + << delay_estimate_in_milliseconds_; } SLObjectItf AudioManager::GetOpenSLEngine() { - RTC_LOG(INFO) << "GetOpenSLEngine"; + RTC_LOG(LS_INFO) << "GetOpenSLEngine"; RTC_DCHECK(thread_checker_.IsCurrent()); // Only allow usage of OpenSL ES if such an audio layer has been specified. if (audio_layer_ != AudioDeviceModule::kAndroidOpenSLESAudio && audio_layer_ != AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) { - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "Unable to create OpenSL engine for the current audio layer: " << audio_layer_; return nullptr; @@ -149,7 +149,7 @@ SLObjectItf AudioManager::GetOpenSLEngine() { } bool AudioManager::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio); @@ -162,7 +162,7 @@ bool AudioManager::Init() { } bool AudioManager::Close() { - RTC_LOG(INFO) << "Close"; + RTC_LOG(LS_INFO) << "Close"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_) return true; @@ -273,7 +273,7 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env, jboolean a_audio, jint output_buffer_size, jint input_buffer_size) { - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "OnCacheAudioParameters: " "hardware_aec: " << static_cast(hardware_aec) diff --git a/modules/audio_device/android/audio_record_jni.cc b/modules/audio_device/android/audio_record_jni.cc index 2c28ab242d..9d7bf73097 100644 --- a/modules/audio_device/android/audio_record_jni.cc +++ b/modules/audio_device/android/audio_record_jni.cc @@ -34,7 +34,7 @@ class ScopedHistogramTimer { ~ScopedHistogramTimer() { const int64_t life_time_ms = rtc::TimeSince(start_time_ms_); RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms); - RTC_LOG(INFO) << histogram_name_ << ": " << life_time_ms; + RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms; } private: @@ -93,7 +93,7 @@ AudioRecordJni::AudioRecordJni(AudioManager* audio_manager) initialized_(false), recording_(false), audio_device_buffer_(nullptr) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(audio_parameters_.is_valid()); RTC_CHECK(j_environment_); JNINativeMethod native_methods[] = { @@ -115,26 +115,26 @@ AudioRecordJni::AudioRecordJni(AudioManager* audio_manager) } AudioRecordJni::~AudioRecordJni() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); } int32_t AudioRecordJni::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); return 0; } int32_t AudioRecordJni::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopRecording(); return 0; } int32_t AudioRecordJni::InitRecording() { - RTC_LOG(INFO) << "InitRecording"; + RTC_LOG(LS_INFO) << "InitRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK(!recording_); @@ -147,7 +147,7 @@ int32_t AudioRecordJni::InitRecording() { return -1; } frames_per_buffer_ = static_cast(frames_per_buffer); - RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_; + RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_; const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t); RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_, frames_per_buffer_ * bytes_per_frame); @@ -157,7 +157,7 @@ int32_t AudioRecordJni::InitRecording() { } int32_t AudioRecordJni::StartRecording() { - RTC_LOG(INFO) << "StartRecording"; + RTC_LOG(LS_INFO) << "StartRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!recording_); if (!initialized_) { @@ -175,7 +175,7 @@ int32_t AudioRecordJni::StartRecording() { } int32_t AudioRecordJni::StopRecording() { - RTC_LOG(INFO) << "StopRecording"; + RTC_LOG(LS_INFO) << "StopRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !recording_) { return 0; @@ -195,24 +195,24 @@ int32_t AudioRecordJni::StopRecording() { } void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const int sample_rate_hz = audio_parameters_.sample_rate(); - RTC_LOG(INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")"; + RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")"; audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz); const size_t channels = audio_parameters_.channels(); - RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")"; audio_device_buffer_->SetRecordingChannels(channels); total_delay_in_milliseconds_ = audio_manager_->GetDelayEstimateInMilliseconds(); RTC_DCHECK_GT(total_delay_in_milliseconds_, 0); - RTC_LOG(INFO) << "total_delay_in_milliseconds: " - << total_delay_in_milliseconds_; + RTC_LOG(LS_INFO) << "total_delay_in_milliseconds: " + << total_delay_in_milliseconds_; } int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAEC(" << enable << ")"; + RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1; } @@ -223,7 +223,7 @@ int32_t AudioRecordJni::EnableBuiltInAGC(bool enable) { } int32_t AudioRecordJni::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInNS(" << enable << ")"; + RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1; } @@ -240,12 +240,12 @@ void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env, void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer) { - RTC_LOG(INFO) << "OnCacheDirectBufferAddress"; + RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!direct_buffer_address_); direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer); jlong capacity = env->GetDirectBufferCapacity(byte_buffer); - RTC_LOG(INFO) << "direct buffer capacity: " << capacity; + RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity; direct_buffer_capacity_in_bytes_ = static_cast(capacity); } @@ -274,7 +274,7 @@ void AudioRecordJni::OnDataIsRecorded(int length) { // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter. audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0); if (audio_device_buffer_->DeliverRecordedData() == -1) { - RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; } } diff --git a/modules/audio_device/android/audio_track_jni.cc b/modules/audio_device/android/audio_track_jni.cc index daaeeca1ea..178ccadfdb 100644 --- a/modules/audio_device/android/audio_track_jni.cc +++ b/modules/audio_device/android/audio_track_jni.cc @@ -103,7 +103,7 @@ AudioTrackJni::AudioTrackJni(AudioManager* audio_manager) initialized_(false), playing_(false), audio_device_buffer_(nullptr) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(audio_parameters_.is_valid()); RTC_CHECK(j_environment_); JNINativeMethod native_methods[] = { @@ -125,26 +125,26 @@ AudioTrackJni::AudioTrackJni(AudioManager* audio_manager) } AudioTrackJni::~AudioTrackJni() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); } int32_t AudioTrackJni::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); return 0; } int32_t AudioTrackJni::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopPlayout(); return 0; } int32_t AudioTrackJni::InitPlayout() { - RTC_LOG(INFO) << "InitPlayout"; + RTC_LOG(LS_INFO) << "InitPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK(!playing_); @@ -158,7 +158,7 @@ int32_t AudioTrackJni::InitPlayout() { } int32_t AudioTrackJni::StartPlayout() { - RTC_LOG(INFO) << "StartPlayout"; + RTC_LOG(LS_INFO) << "StartPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!playing_); if (!initialized_) { @@ -175,7 +175,7 @@ int32_t AudioTrackJni::StartPlayout() { } int32_t AudioTrackJni::StopPlayout() { - RTC_LOG(INFO) << "StopPlayout"; + RTC_LOG(LS_INFO) << "StopPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !playing_) { return 0; @@ -200,7 +200,7 @@ int AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) { } int AudioTrackJni::SetSpeakerVolume(uint32_t volume) { - RTC_LOG(INFO) << "SetSpeakerVolume(" << volume << ")"; + RTC_LOG(LS_INFO) << "SetSpeakerVolume(" << volume << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return j_audio_track_->SetStreamVolume(volume) ? 0 : -1; } @@ -220,20 +220,20 @@ int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const { int AudioTrackJni::SpeakerVolume(uint32_t& volume) const { RTC_DCHECK(thread_checker_.IsCurrent()); volume = j_audio_track_->GetStreamVolume(); - RTC_LOG(INFO) << "SpeakerVolume: " << volume; + RTC_LOG(LS_INFO) << "SpeakerVolume: " << volume; return 0; } // TODO(henrika): possibly add stereo support. void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const int sample_rate_hz = audio_parameters_.sample_rate(); - RTC_LOG(INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")"; audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz); const size_t channels = audio_parameters_.channels(); - RTC_LOG(INFO) << "SetPlayoutChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")"; audio_device_buffer_->SetPlayoutChannels(channels); } @@ -249,16 +249,16 @@ void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env, void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer) { - RTC_LOG(INFO) << "OnCacheDirectBufferAddress"; + RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!direct_buffer_address_); direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer); jlong capacity = env->GetDirectBufferCapacity(byte_buffer); - RTC_LOG(INFO) << "direct buffer capacity: " << capacity; + RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity; direct_buffer_capacity_in_bytes_ = static_cast(capacity); const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t); frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame; - RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_; + RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_; } JNI_FUNCTION_ALIGN diff --git a/modules/audio_device/audio_device_buffer.cc b/modules/audio_device/audio_device_buffer.cc index 572982ef4c..73a8210978 100644 --- a/modules/audio_device/audio_device_buffer.cc +++ b/modules/audio_device/audio_device_buffer.cc @@ -61,7 +61,7 @@ AudioDeviceBuffer::AudioDeviceBuffer(TaskQueueFactory* task_queue_factory) play_start_time_(0), only_silence_recorded_(true), log_stats_(false) { - RTC_LOG(INFO) << "AudioDeviceBuffer::ctor"; + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::ctor"; #ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE phase_ = 0.0; RTC_LOG(WARNING) << "AUDIO_DEVICE_PLAYS_SINUS_TONE is defined!"; @@ -72,13 +72,13 @@ AudioDeviceBuffer::~AudioDeviceBuffer() { RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!playing_); RTC_DCHECK(!recording_); - RTC_LOG(INFO) << "AudioDeviceBuffer::~dtor"; + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::~dtor"; } int32_t AudioDeviceBuffer::RegisterAudioCallback( AudioTransport* audio_callback) { RTC_DCHECK_RUN_ON(&main_thread_checker_); - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (playing_ || recording_) { RTC_LOG(LS_ERROR) << "Failed to set audio transport since media was active"; return -1; @@ -95,7 +95,7 @@ void AudioDeviceBuffer::StartPlayout() { if (playing_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Clear members tracking playout stats and do it on the task queue. task_queue_.PostTask([this] { ResetPlayStats(); }); // Start a periodic timer based on task queue if not already done by the @@ -114,7 +114,7 @@ void AudioDeviceBuffer::StartRecording() { if (recording_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Clear members tracking recording stats and do it on the task queue. task_queue_.PostTask([this] { ResetRecStats(); }); // Start a periodic timer based on task queue if not already done by the @@ -136,13 +136,14 @@ void AudioDeviceBuffer::StopPlayout() { if (!playing_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; playing_ = false; // Stop periodic logging if no more media is active. if (!recording_) { StopPeriodicLogging(); } - RTC_LOG(INFO) << "total playout time: " << rtc::TimeSince(play_start_time_); + RTC_LOG(LS_INFO) << "total playout time: " + << rtc::TimeSince(play_start_time_); } void AudioDeviceBuffer::StopRecording() { @@ -150,7 +151,7 @@ void AudioDeviceBuffer::StopRecording() { if (!recording_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; recording_ = false; // Stop periodic logging if no more media is active. if (!playing_) { @@ -170,20 +171,20 @@ void AudioDeviceBuffer::StopRecording() { if (time_since_start > kMinValidCallTimeTimeInMilliseconds) { const int only_zeros = static_cast(only_silence_recorded_); RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.RecordedOnlyZeros", only_zeros); - RTC_LOG(INFO) << "HISTOGRAM(WebRTC.Audio.RecordedOnlyZeros): " - << only_zeros; + RTC_LOG(LS_INFO) << "HISTOGRAM(WebRTC.Audio.RecordedOnlyZeros): " + << only_zeros; } - RTC_LOG(INFO) << "total recording time: " << time_since_start; + RTC_LOG(LS_INFO) << "total recording time: " << time_since_start; } int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) { - RTC_LOG(INFO) << "SetRecordingSampleRate(" << fsHz << ")"; + RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << fsHz << ")"; rec_sample_rate_ = fsHz; return 0; } int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) { - RTC_LOG(INFO) << "SetPlayoutSampleRate(" << fsHz << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << fsHz << ")"; play_sample_rate_ = fsHz; return 0; } @@ -197,13 +198,13 @@ uint32_t AudioDeviceBuffer::PlayoutSampleRate() const { } int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) { - RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")"; rec_channels_ = channels; return 0; } int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) { - RTC_LOG(INFO) << "SetPlayoutChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")"; play_channels_ = channels; return 0; } @@ -408,21 +409,21 @@ void AudioDeviceBuffer::LogStats(LogState state) { ((100.0f * std::abs(rate - rec_sample_rate)) / rec_sample_rate)); RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.RecordSampleRateOffsetInPercent", abs_diff_rate_in_percent); - RTC_LOG(INFO) << "[REC : " << time_since_last << "msec, " - << rec_sample_rate / 1000 << "kHz] callbacks: " - << stats.rec_callbacks - last_stats_.rec_callbacks - << ", " - "samples: " - << diff_samples - << ", " - "rate: " - << static_cast(rate + 0.5) - << ", " - "rate diff: " - << abs_diff_rate_in_percent - << "%, " - "level: " - << stats.max_rec_level; + RTC_LOG(LS_INFO) << "[REC : " << time_since_last << "msec, " + << rec_sample_rate / 1000 << "kHz] callbacks: " + << stats.rec_callbacks - last_stats_.rec_callbacks + << ", " + "samples: " + << diff_samples + << ", " + "rate: " + << static_cast(rate + 0.5) + << ", " + "rate diff: " + << abs_diff_rate_in_percent + << "%, " + "level: " + << stats.max_rec_level; } diff_samples = stats.play_samples - last_stats_.play_samples; @@ -434,21 +435,21 @@ void AudioDeviceBuffer::LogStats(LogState state) { ((100.0f * std::abs(rate - play_sample_rate)) / play_sample_rate)); RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.PlayoutSampleRateOffsetInPercent", abs_diff_rate_in_percent); - RTC_LOG(INFO) << "[PLAY: " << time_since_last << "msec, " - << play_sample_rate / 1000 << "kHz] callbacks: " - << stats.play_callbacks - last_stats_.play_callbacks - << ", " - "samples: " - << diff_samples - << ", " - "rate: " - << static_cast(rate + 0.5) - << ", " - "rate diff: " - << abs_diff_rate_in_percent - << "%, " - "level: " - << stats.max_play_level; + RTC_LOG(LS_INFO) << "[PLAY: " << time_since_last << "msec, " + << play_sample_rate / 1000 << "kHz] callbacks: " + << stats.play_callbacks - last_stats_.play_callbacks + << ", " + "samples: " + << diff_samples + << ", " + "rate: " + << static_cast(rate + 0.5) + << ", " + "rate diff: " + << abs_diff_rate_in_percent + << "%, " + "level: " + << stats.max_play_level; } } last_stats_ = stats; diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc index 84460ff83f..9d540f911c 100644 --- a/modules/audio_device/audio_device_impl.cc +++ b/modules/audio_device/audio_device_impl.cc @@ -73,7 +73,7 @@ namespace webrtc { rtc::scoped_refptr AudioDeviceModule::Create( AudioLayer audio_layer, TaskQueueFactory* task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory); } @@ -81,7 +81,7 @@ rtc::scoped_refptr AudioDeviceModule::Create( rtc::scoped_refptr AudioDeviceModule::CreateForTest( AudioLayer audio_layer, TaskQueueFactory* task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own // dedicated factory method which should be used instead. @@ -118,28 +118,28 @@ AudioDeviceModuleImpl::AudioDeviceModuleImpl( AudioLayer audio_layer, TaskQueueFactory* task_queue_factory) : audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; } int32_t AudioDeviceModuleImpl::CheckPlatform() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Ensure that the current platform is supported PlatformType platform(kPlatformNotSupported); #if defined(_WIN32) platform = kPlatformWin32; - RTC_LOG(INFO) << "current platform is Win32"; + RTC_LOG(LS_INFO) << "current platform is Win32"; #elif defined(WEBRTC_ANDROID) platform = kPlatformAndroid; - RTC_LOG(INFO) << "current platform is Android"; + RTC_LOG(LS_INFO) << "current platform is Android"; #elif defined(WEBRTC_LINUX) platform = kPlatformLinux; - RTC_LOG(INFO) << "current platform is Linux"; + RTC_LOG(LS_INFO) << "current platform is Linux"; #elif defined(WEBRTC_IOS) platform = kPlatformIOS; - RTC_LOG(INFO) << "current platform is IOS"; + RTC_LOG(LS_INFO) << "current platform is IOS"; #elif defined(WEBRTC_MAC) platform = kPlatformMac; - RTC_LOG(INFO) << "current platform is Mac"; + RTC_LOG(LS_INFO) << "current platform is Mac"; #endif if (platform == kPlatformNotSupported) { RTC_LOG(LERROR) @@ -152,19 +152,19 @@ int32_t AudioDeviceModuleImpl::CheckPlatform() { } int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; // Dummy ADM implementations if build flags are set. #if defined(WEBRTC_DUMMY_AUDIO_BUILD) audio_device_.reset(new AudioDeviceDummy()); - RTC_LOG(INFO) << "Dummy Audio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized"; #elif defined(WEBRTC_DUMMY_FILE_DEVICES) audio_device_.reset(FileAudioDeviceFactory::CreateFileAudioDevice()); if (audio_device_) { - RTC_LOG(INFO) << "Will use file-playing dummy device."; + RTC_LOG(LS_INFO) << "Will use file-playing dummy device."; } else { // Create a dummy device instead. audio_device_.reset(new AudioDeviceDummy()); - RTC_LOG(INFO) << "Dummy Audio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized"; } // Real (non-dummy) ADM implementations. @@ -174,10 +174,10 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) if ((audio_layer == kWindowsCoreAudio) || (audio_layer == kPlatformDefaultAudio)) { - RTC_LOG(INFO) << "Attempting to use the Windows Core Audio APIs..."; + RTC_LOG(LS_INFO) << "Attempting to use the Windows Core Audio APIs..."; if (AudioDeviceWindowsCore::CoreAudioIsSupported()) { audio_device_.reset(new AudioDeviceWindowsCore()); - RTC_LOG(INFO) << "Windows Core Audio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Windows Core Audio APIs will be utilized"; } } #endif // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) @@ -255,7 +255,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { if ((audio_layer == kLinuxAlsaAudio) || (audio_layer == kPlatformDefaultAudio)) { audio_device_.reset(new AudioDeviceLinuxALSA()); - RTC_LOG(INFO) << "Linux ALSA APIs will be utilized."; + RTC_LOG(LS_INFO) << "Linux ALSA APIs will be utilized."; } #else // Build flag 'rtc_include_pulse_audio' is set to true (default). In this @@ -263,12 +263,12 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { // - kPlatformDefaultAudio => PulseAudio, and // - kLinuxPulseAudio => PulseAudio, and // - kLinuxAlsaAudio => ALSA (supported but not default). - RTC_LOG(INFO) << "PulseAudio support is enabled."; + RTC_LOG(LS_INFO) << "PulseAudio support is enabled."; if ((audio_layer == kLinuxPulseAudio) || (audio_layer == kPlatformDefaultAudio)) { // Linux PulseAudio implementation is default. audio_device_.reset(new AudioDeviceLinuxPulse()); - RTC_LOG(INFO) << "Linux PulseAudio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Linux PulseAudio APIs will be utilized"; } else if (audio_layer == kLinuxAlsaAudio) { audio_device_.reset(new AudioDeviceLinuxALSA()); RTC_LOG(WARNING) << "Linux ALSA APIs will be utilized."; @@ -281,7 +281,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { if (audio_layer == kPlatformDefaultAudio) { audio_device_.reset( new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false)); - RTC_LOG(INFO) << "iPhone Audio APIs will be utilized."; + RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized."; } // END #if defined(WEBRTC_IOS) @@ -289,14 +289,14 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { #elif defined(WEBRTC_MAC) if (audio_layer == kPlatformDefaultAudio) { audio_device_.reset(new AudioDeviceMac()); - RTC_LOG(INFO) << "Mac OS X Audio APIs will be utilized."; + RTC_LOG(LS_INFO) << "Mac OS X Audio APIs will be utilized."; } #endif // WEBRTC_MAC // Dummy ADM implementation. if (audio_layer == kDummyAudio) { audio_device_.reset(new AudioDeviceDummy()); - RTC_LOG(INFO) << "Dummy Audio APIs will be utilized."; + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized."; } #endif // if defined(WEBRTC_DUMMY_AUDIO_BUILD) @@ -309,17 +309,17 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { } int32_t AudioDeviceModuleImpl::AttachAudioBuffer() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; audio_device_->AttachAudioBuffer(&audio_device_buffer_); return 0; } AudioDeviceModuleImpl::~AudioDeviceModuleImpl() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; } int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; AudioLayer activeAudio; if (audio_device_->ActiveAudioLayer(activeAudio) == -1) { return -1; @@ -329,7 +329,7 @@ int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const { } int32_t AudioDeviceModuleImpl::Init() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (initialized_) return 0; RTC_CHECK(audio_device_); @@ -346,7 +346,7 @@ int32_t AudioDeviceModuleImpl::Init() { } int32_t AudioDeviceModuleImpl::Terminate() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (!initialized_) return 0; if (audio_device_->Terminate() == -1) { @@ -357,65 +357,65 @@ int32_t AudioDeviceModuleImpl::Terminate() { } bool AudioDeviceModuleImpl::Initialized() const { - RTC_LOG(INFO) << __FUNCTION__ << ": " << initialized_; + RTC_LOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; return initialized_; } int32_t AudioDeviceModuleImpl::InitSpeaker() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitSpeaker(); } int32_t AudioDeviceModuleImpl::InitMicrophone() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitMicrophone(); } int32_t AudioDeviceModuleImpl::SpeakerVolumeIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetSpeakerVolume(uint32_t volume) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerVolume(volume); } int32_t AudioDeviceModuleImpl::SpeakerVolume(uint32_t* volume) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->SpeakerVolume(level) == -1) { return -1; } *volume = level; - RTC_LOG(INFO) << "output: " << *volume; + RTC_LOG(LS_INFO) << "output: " << *volume; return 0; } bool AudioDeviceModuleImpl::SpeakerIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->SpeakerIsInitialized(); - RTC_LOG(INFO) << "output: " << isInitialized; + RTC_LOG(LS_INFO) << "output: " << isInitialized; return isInitialized; } bool AudioDeviceModuleImpl::MicrophoneIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->MicrophoneIsInitialized(); - RTC_LOG(INFO) << "output: " << isInitialized; + RTC_LOG(LS_INFO) << "output: " << isInitialized; return isInitialized; } @@ -440,110 +440,110 @@ int32_t AudioDeviceModuleImpl::MinSpeakerVolume(uint32_t* minVolume) const { } int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetSpeakerMute(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerMute(enable); } int32_t AudioDeviceModuleImpl::SpeakerMute(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->SpeakerMute(muted) == -1) { return -1; } *enabled = muted; - RTC_LOG(INFO) << "output: " << muted; + RTC_LOG(LS_INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleImpl::MicrophoneMuteIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetMicrophoneMute(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneMute(enable)); } int32_t AudioDeviceModuleImpl::MicrophoneMute(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->MicrophoneMute(muted) == -1) { return -1; } *enabled = muted; - RTC_LOG(INFO) << "output: " << muted; + RTC_LOG(LS_INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleImpl::MicrophoneVolumeIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetMicrophoneVolume(uint32_t volume) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneVolume(volume)); } int32_t AudioDeviceModuleImpl::MicrophoneVolume(uint32_t* volume) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->MicrophoneVolume(level) == -1) { return -1; } *volume = level; - RTC_LOG(INFO) << "output: " << *volume; + RTC_LOG(LS_INFO) << "output: " << *volume; return 0; } int32_t AudioDeviceModuleImpl::StereoRecordingIsAvailable( bool* available) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetStereoRecording(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (audio_device_->RecordingIsInitialized()) { RTC_LOG(LERROR) @@ -565,31 +565,31 @@ int32_t AudioDeviceModuleImpl::SetStereoRecording(bool enable) { } int32_t AudioDeviceModuleImpl::StereoRecording(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoRecording(stereo) == -1) { return -1; } *enabled = stereo; - RTC_LOG(INFO) << "output: " << stereo; + RTC_LOG(LS_INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleImpl::StereoPlayoutIsAvailable(bool* available) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetStereoPlayout(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (audio_device_->PlayoutIsInitialized()) { RTC_LOG(LERROR) @@ -609,38 +609,38 @@ int32_t AudioDeviceModuleImpl::SetStereoPlayout(bool enable) { } int32_t AudioDeviceModuleImpl::StereoPlayout(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoPlayout(stereo) == -1) { return -1; } *enabled = stereo; - RTC_LOG(INFO) << "output: " << stereo; + RTC_LOG(LS_INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleImpl::PlayoutIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::RecordingIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->RecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } @@ -665,21 +665,21 @@ int32_t AudioDeviceModuleImpl::MinMicrophoneVolume(uint32_t* minVolume) const { } int16_t AudioDeviceModuleImpl::PlayoutDevices() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nPlayoutDevices = audio_device_->PlayoutDevices(); - RTC_LOG(INFO) << "output: " << nPlayoutDevices; + RTC_LOG(LS_INFO) << "output: " << nPlayoutDevices; return (int16_t)(nPlayoutDevices); } int32_t AudioDeviceModuleImpl::SetPlayoutDevice(uint16_t index) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(index); } int32_t AudioDeviceModuleImpl::SetPlayoutDevice(WindowsDeviceType device) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(device); } @@ -688,7 +688,7 @@ int32_t AudioDeviceModuleImpl::PlayoutDeviceName( uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -697,10 +697,10 @@ int32_t AudioDeviceModuleImpl::PlayoutDeviceName( return -1; } if (name != NULL) { - RTC_LOG(INFO) << "output: name = " << name; + RTC_LOG(LS_INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_LOG(INFO) << "output: guid = " << guid; + RTC_LOG(LS_INFO) << "output: guid = " << guid; } return 0; } @@ -709,7 +709,7 @@ int32_t AudioDeviceModuleImpl::RecordingDeviceName( uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -718,137 +718,137 @@ int32_t AudioDeviceModuleImpl::RecordingDeviceName( return -1; } if (name != NULL) { - RTC_LOG(INFO) << "output: name = " << name; + RTC_LOG(LS_INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_LOG(INFO) << "output: guid = " << guid; + RTC_LOG(LS_INFO) << "output: guid = " << guid; } return 0; } int16_t AudioDeviceModuleImpl::RecordingDevices() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nRecordingDevices = audio_device_->RecordingDevices(); - RTC_LOG(INFO) << "output: " << nRecordingDevices; + RTC_LOG(LS_INFO) << "output: " << nRecordingDevices; return (int16_t)nRecordingDevices; } int32_t AudioDeviceModuleImpl::SetRecordingDevice(uint16_t index) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetRecordingDevice(index); } int32_t AudioDeviceModuleImpl::SetRecordingDevice(WindowsDeviceType device) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetRecordingDevice(device); } int32_t AudioDeviceModuleImpl::InitPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (PlayoutIsInitialized()) { return 0; } int32_t result = audio_device_->InitPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleImpl::InitRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (RecordingIsInitialized()) { return 0; } int32_t result = audio_device_->InitRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleImpl::PlayoutIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->PlayoutIsInitialized(); } bool AudioDeviceModuleImpl::RecordingIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->RecordingIsInitialized(); } int32_t AudioDeviceModuleImpl::StartPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (Playing()) { return 0; } audio_device_buffer_.StartPlayout(); int32_t result = audio_device_->StartPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleImpl::StopPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopPlayout(); audio_device_buffer_.StopPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleImpl::Playing() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Playing(); } int32_t AudioDeviceModuleImpl::StartRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (Recording()) { return 0; } audio_device_buffer_.StartRecording(); int32_t result = audio_device_->StartRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleImpl::StopRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopRecording(); audio_device_buffer_.StopRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleImpl::Recording() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Recording(); } int32_t AudioDeviceModuleImpl::RegisterAudioCallback( AudioTransport* audioCallback) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return audio_device_buffer_.RegisterAudioCallback(audioCallback); } @@ -864,87 +864,87 @@ int32_t AudioDeviceModuleImpl::PlayoutDelay(uint16_t* delayMS) const { } bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAECIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAEC(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_LOG(LS_INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleImpl::BuiltInAGCIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAGCIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleImpl::EnableBuiltInAGC(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAGC(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_LOG(LS_INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleImpl::BuiltInNSIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInNSIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleImpl::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInNS(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_LOG(LS_INFO) << "output: " << ok; return ok; } int32_t AudioDeviceModuleImpl::GetPlayoutUnderrunCount() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t underrunCount = audio_device_->GetPlayoutUnderrunCount(); - RTC_LOG(INFO) << "output: " << underrunCount; + RTC_LOG(LS_INFO) << "output: " << underrunCount; return underrunCount; } #if defined(WEBRTC_IOS) int AudioDeviceModuleImpl::GetPlayoutAudioParameters( AudioParameters* params) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; int r = audio_device_->GetPlayoutAudioParameters(params); - RTC_LOG(INFO) << "output: " << r; + RTC_LOG(LS_INFO) << "output: " << r; return r; } int AudioDeviceModuleImpl::GetRecordAudioParameters( AudioParameters* params) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; int r = audio_device_->GetRecordAudioParameters(params); - RTC_LOG(INFO) << "output: " << r; + RTC_LOG(LS_INFO) << "output: " << r; return r; } #endif // WEBRTC_IOS AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return platform_type_; } AudioDeviceModule::AudioLayer AudioDeviceModuleImpl::PlatformAudioLayer() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return audio_layer_; } diff --git a/modules/audio_device/fine_audio_buffer.cc b/modules/audio_device/fine_audio_buffer.cc index 4f3f48c677..86240da196 100644 --- a/modules/audio_device/fine_audio_buffer.cc +++ b/modules/audio_device/fine_audio_buffer.cc @@ -29,21 +29,21 @@ FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer) playout_channels_(audio_device_buffer->PlayoutChannels()), record_channels_(audio_device_buffer->RecordingChannels()) { RTC_DCHECK(audio_device_buffer_); - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (IsReadyForPlayout()) { - RTC_DLOG(INFO) << "playout_samples_per_channel_10ms: " - << playout_samples_per_channel_10ms_; - RTC_DLOG(INFO) << "playout_channels: " << playout_channels_; + RTC_DLOG(LS_INFO) << "playout_samples_per_channel_10ms: " + << playout_samples_per_channel_10ms_; + RTC_DLOG(LS_INFO) << "playout_channels: " << playout_channels_; } if (IsReadyForRecord()) { - RTC_DLOG(INFO) << "record_samples_per_channel_10ms: " - << record_samples_per_channel_10ms_; - RTC_DLOG(INFO) << "record_channels: " << record_channels_; + RTC_DLOG(LS_INFO) << "record_samples_per_channel_10ms: " + << record_samples_per_channel_10ms_; + RTC_DLOG(LS_INFO) << "record_channels: " << record_channels_; } } FineAudioBuffer::~FineAudioBuffer() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; } void FineAudioBuffer::ResetPlayout() { diff --git a/modules/audio_device/include/audio_device_factory.cc b/modules/audio_device/include/audio_device_factory.cc index d5b381029e..130e096e6d 100644 --- a/modules/audio_device/include/audio_device_factory.cc +++ b/modules/audio_device/include/audio_device_factory.cc @@ -27,7 +27,7 @@ namespace webrtc { rtc::scoped_refptr CreateWindowsCoreAudioAudioDeviceModule( TaskQueueFactory* task_queue_factory, bool automatic_restart) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return CreateWindowsCoreAudioAudioDeviceModuleForTest(task_queue_factory, automatic_restart); } @@ -36,7 +36,7 @@ rtc::scoped_refptr CreateWindowsCoreAudioAudioDeviceModuleForTest( TaskQueueFactory* task_queue_factory, bool automatic_restart) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Returns NULL if Core Audio is not supported or if COM has not been // initialized correctly using ScopedCOMInitializer. if (!webrtc_win::core_audio_utility::IsSupported()) { diff --git a/modules/audio_device/win/audio_device_core_win.cc b/modules/audio_device/win/audio_device_core_win.cc index 6c08fcc4af..f1b6b9329c 100644 --- a/modules/audio_device/win/audio_device_core_win.cc +++ b/modules/audio_device/win/audio_device_core_win.cc @@ -1887,18 +1887,18 @@ int32_t AudioDeviceWindowsCore::InitPlayout() { break; } else { if (pWfxClosestMatch) { - RTC_LOG(INFO) << "nChannels=" << Wfx.nChannels - << ", nSamplesPerSec=" << Wfx.nSamplesPerSec - << " is not supported. Closest match: " - "nChannels=" - << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" - << pWfxClosestMatch->nSamplesPerSec; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels + << ", nSamplesPerSec=" << Wfx.nSamplesPerSec + << " is not supported. Closest match: " + "nChannels=" + << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" + << pWfxClosestMatch->nSamplesPerSec; CoTaskMemFree(pWfxClosestMatch); pWfxClosestMatch = NULL; } else { - RTC_LOG(INFO) << "nChannels=" << Wfx.nChannels - << ", nSamplesPerSec=" << Wfx.nSamplesPerSec - << " is not supported. No closest match."; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels + << ", nSamplesPerSec=" << Wfx.nSamplesPerSec + << " is not supported. No closest match."; } } } @@ -2208,18 +2208,18 @@ int32_t AudioDeviceWindowsCore::InitRecording() { break; } else { if (pWfxClosestMatch) { - RTC_LOG(INFO) << "nChannels=" << Wfx.Format.nChannels - << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec - << " is not supported. Closest match: " - "nChannels=" - << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" - << pWfxClosestMatch->nSamplesPerSec; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels + << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec + << " is not supported. Closest match: " + "nChannels=" + << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" + << pWfxClosestMatch->nSamplesPerSec; CoTaskMemFree(pWfxClosestMatch); pWfxClosestMatch = NULL; } else { - RTC_LOG(INFO) << "nChannels=" << Wfx.Format.nChannels - << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec - << " is not supported. No closest match."; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels + << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec + << " is not supported. No closest match."; } } } diff --git a/modules/audio_device/win/audio_device_module_win.cc b/modules/audio_device/win/audio_device_module_win.cc index ad26953ef2..6643d8479e 100644 --- a/modules/audio_device/win/audio_device_module_win.cc +++ b/modules/audio_device/win/audio_device_module_win.cc @@ -95,12 +95,12 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { task_queue_factory_(task_queue_factory) { RTC_CHECK(input_); RTC_CHECK(output_); - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); } ~WindowsAudioDeviceModule() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); Terminate(); } @@ -110,7 +110,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t ActiveAudioLayer( AudioDeviceModule::AudioLayer* audioLayer) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); // TODO(henrika): it might be possible to remove this unique signature. *audioLayer = AudioDeviceModule::kWindowsCoreAudio2; @@ -118,14 +118,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t RegisterAudioCallback(AudioTransport* audioCallback) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(audio_device_buffer_); RTC_DCHECK_RUN_ON(&thread_checker_); return audio_device_buffer_->RegisterAudioCallback(audioCallback); } int32_t Init() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_INPUT_RESTARTS(0); @@ -153,7 +153,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t Terminate() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_INPUT_RESTARTS(0); @@ -172,14 +172,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int16_t PlayoutDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->NumDevices(); } int16_t RecordingDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); return input_->NumDevices(); @@ -188,7 +188,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t PlayoutDeviceName(uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); std::string name_str, guid_str; @@ -205,7 +205,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t RecordingDeviceName(uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); std::string name_str, guid_str; @@ -221,7 +221,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t SetPlayoutDevice(uint16_t index) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->SetDevice(index); @@ -229,33 +229,33 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SetPlayoutDevice( AudioDeviceModule::WindowsDeviceType device) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->SetDevice(device); } int32_t SetRecordingDevice(uint16_t index) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->SetDevice(index); } int32_t SetRecordingDevice( AudioDeviceModule::WindowsDeviceType device) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->SetDevice(device); } int32_t PlayoutIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; } int32_t InitPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_OUTPUT_IS_INITIALIZED(0); @@ -263,21 +263,21 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } bool PlayoutIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(true); return output_->PlayoutIsInitialized(); } int32_t RecordingIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; } int32_t InitRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); RETURN_IF_INPUT_IS_INITIALIZED(0); @@ -285,14 +285,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } bool RecordingIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(true); return input_->RecordingIsInitialized(); } int32_t StartPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_OUTPUT_IS_ACTIVE(0); @@ -300,21 +300,21 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t StopPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(-1); return output_->StopPlayout(); } bool Playing() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(true); return output_->Playing(); } int32_t StartRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); RETURN_IF_INPUT_IS_ACTIVE(0); @@ -322,41 +322,41 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t StopRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(-1); return input_->StopRecording(); } bool Recording() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RETURN_IF_INPUT_RESTARTS(true); return input_->Recording(); } int32_t InitSpeaker() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_ ? 0 : -1; } bool SpeakerIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_; } int32_t InitMicrophone() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_ ? 0 : -1; } bool MicrophoneIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_; @@ -364,7 +364,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SpeakerVolumeIsAvailable(bool* available) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = false; return 0; @@ -377,7 +377,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t MicrophoneVolumeIsAvailable(bool* available) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = false; return 0; @@ -398,7 +398,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t StereoPlayoutIsAvailable(bool* available) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; @@ -406,14 +406,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SetStereoPlayout(bool enable) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int32_t StereoPlayout(bool* enabled) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *enabled = true; return 0; @@ -421,7 +421,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t StereoRecordingIsAvailable(bool* available) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; @@ -429,14 +429,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SetStereoRecording(bool enable) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int32_t StereoRecording(bool* enabled) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *enabled = true; return 0; @@ -453,33 +453,33 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t EnableBuiltInNS(bool enable) override { return 0; } int32_t AttachAudioBuffer() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; output_->AttachAudioBuffer(audio_device_buffer_.get()); input_->AttachAudioBuffer(audio_device_buffer_.get()); return 0; } int RestartPlayoutInternally() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->RestartPlayout(); } int RestartRecordingInternally() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->RestartRecording(); } int SetPlayoutSampleRate(uint32_t sample_rate) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return output_->SetSampleRate(sample_rate); } int SetRecordingSampleRate(uint32_t sample_rate) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->SetSampleRate(sample_rate); } @@ -513,7 +513,7 @@ CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput( std::unique_ptr audio_input, std::unique_ptr audio_output, TaskQueueFactory* task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return rtc::make_ref_counted( std::move(audio_input), std::move(audio_output), task_queue_factory); } diff --git a/modules/audio_device/win/core_audio_base_win.cc b/modules/audio_device/win/core_audio_base_win.cc index c42c091ed2..40645d5422 100644 --- a/modules/audio_device/win/core_audio_base_win.cc +++ b/modules/audio_device/win/core_audio_base_win.cc @@ -125,7 +125,7 @@ const char* SessionDisconnectReasonToString( bool IsLowLatencySupported(IAudioClient3* client3, const WAVEFORMATEXTENSIBLE* format, uint32_t* min_period_in_frames) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Get the range of periodicities supported by the engine for the specified // stream format. @@ -143,7 +143,7 @@ bool IsLowLatencySupported(IAudioClient3* client3, // default engine period. // TODO(henrika): verify that this assumption is correct. const bool low_latency = min_period < default_period; - RTC_LOG(INFO) << "low_latency: " << low_latency; + RTC_LOG(LS_INFO) << "low_latency: " << low_latency; *min_period_in_frames = low_latency ? min_period : 0; return low_latency; } @@ -161,9 +161,10 @@ CoreAudioBase::CoreAudioBase(Direction direction, on_error_callback_(error_callback), device_index_(kUndefined), is_restarting_(false) { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction) << "]"; - RTC_DLOG(INFO) << "Automatic restart: " << automatic_restart; - RTC_DLOG(INFO) << "Windows version: " << rtc::rtc_win::GetVersion(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction) + << "]"; + RTC_DLOG(LS_INFO) << "Automatic restart: " << automatic_restart; + RTC_DLOG(LS_INFO) << "Windows version: " << rtc::rtc_win::GetVersion(); // Create the event which the audio engine will signal each time a buffer // becomes ready to be processed by the client. @@ -181,7 +182,7 @@ CoreAudioBase::CoreAudioBase(Direction direction, } CoreAudioBase::~CoreAudioBase() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_EQ(ref_count_, 1); } @@ -207,7 +208,7 @@ int CoreAudioBase::NumberOfEnumeratedDevices() const { } void CoreAudioBase::ReleaseCOMObjects() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // ComPtr::Reset() sets the ComPtr to nullptr releasing any previous // reference. if (audio_client_) { @@ -288,15 +289,15 @@ std::string CoreAudioBase::GetDeviceID(int index) const { } int CoreAudioBase::SetDevice(int index) { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]: index=" << IndexToString(index); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]: index=" << IndexToString(index); if (initialized_) { return -1; } std::string device_id = GetDeviceID(index); - RTC_DLOG(INFO) << "index=" << IndexToString(index) - << " => device_id: " << device_id; + RTC_DLOG(LS_INFO) << "index=" << IndexToString(index) + << " => device_id: " << device_id; device_index_ = index; device_id_ = device_id; @@ -306,8 +307,8 @@ int CoreAudioBase::SetDevice(int index) { int CoreAudioBase::DeviceName(int index, std::string* name, std::string* guid) const { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]: index=" << IndexToString(index); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]: index=" << IndexToString(index); if (index > NumberOfEnumeratedDevices() - 1) { RTC_LOG(LS_ERROR) << "Invalid device index"; return -1; @@ -324,17 +325,17 @@ int CoreAudioBase::DeviceName(int index, } *name = device_names[index].device_name; - RTC_DLOG(INFO) << "name: " << *name; + RTC_DLOG(LS_INFO) << "name: " << *name; if (guid != nullptr) { *guid = device_names[index].unique_id; - RTC_DLOG(INFO) << "guid: " << *guid; + RTC_DLOG(LS_INFO) << "guid: " << *guid; } return 0; } bool CoreAudioBase::Init() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; RTC_DCHECK_GE(device_index_, 0); RTC_DCHECK(!device_id_.empty()); RTC_DCHECK(audio_device_buffer_); @@ -360,15 +361,15 @@ bool CoreAudioBase::Init() { // an audio stream between an audio application and the audio engine. ComPtr audio_client; if (core_audio_utility::GetAudioClientVersion() == 3) { - RTC_DLOG(INFO) << "Using IAudioClient3"; + RTC_DLOG(LS_INFO) << "Using IAudioClient3"; audio_client = core_audio_utility::CreateClient3(device_id, GetDataFlow(), role); } else if (core_audio_utility::GetAudioClientVersion() == 2) { - RTC_DLOG(INFO) << "Using IAudioClient2"; + RTC_DLOG(LS_INFO) << "Using IAudioClient2"; audio_client = core_audio_utility::CreateClient2(device_id, GetDataFlow(), role); } else { - RTC_DLOG(INFO) << "Using IAudioClient"; + RTC_DLOG(LS_INFO) << "Using IAudioClient"; audio_client = core_audio_utility::CreateClient(device_id, GetDataFlow(), role); } @@ -429,7 +430,7 @@ bool CoreAudioBase::Init() { format_.dwChannelMask = format->nChannels == 1 ? KSAUDIO_SPEAKER_MONO : KSAUDIO_SPEAKER_STEREO; format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; - RTC_DLOG(INFO) << core_audio_utility::WaveFormatToString(&format_); + RTC_DLOG(LS_INFO) << core_audio_utility::WaveFormatToString(&format_); // Verify that the format is supported but exclude the test if the default // sample rate has been overridden. If so, the WASAPI audio engine will do @@ -502,8 +503,8 @@ bool CoreAudioBase::Init() { 1000.0L; const int preferred_frames_per_buffer = static_cast(params.sample_rate() * device_period_in_seconds + 0.5); - RTC_DLOG(INFO) << "preferred_frames_per_buffer: " - << preferred_frames_per_buffer; + RTC_DLOG(LS_INFO) << "preferred_frames_per_buffer: " + << preferred_frames_per_buffer; if (preferred_frames_per_buffer % params.frames_per_buffer()) { RTC_LOG(WARNING) << "Buffer size of " << params.frames_per_buffer() << " is not an even divisor of " @@ -525,7 +526,7 @@ bool CoreAudioBase::Init() { if (FAILED(audio_session_control->GetState(&state))) { return false; } - RTC_DLOG(INFO) << "audio session state: " << SessionStateToString(state); + RTC_DLOG(LS_INFO) << "audio session state: " << SessionStateToString(state); RTC_DCHECK_EQ(state, AudioSessionStateInactive); // Register the client to receive notifications of session events, including @@ -542,8 +543,8 @@ bool CoreAudioBase::Init() { } bool CoreAudioBase::Start() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; if (IsRestarting()) { // Audio thread should be alive during internal restart since the restart // callback is triggered on that thread and it also makes the restart @@ -559,8 +560,8 @@ bool CoreAudioBase::Start() { audio_thread_ = rtc::PlatformThread::SpawnJoinable( [this] { ThreadRun(); }, name, rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); - RTC_DLOG(INFO) << "Started thread with name: " << name - << " and handle: " << *audio_thread_.GetHandle(); + RTC_DLOG(LS_INFO) << "Started thread with name: " << name + << " and handle: " << *audio_thread_.GetHandle(); } // Start streaming data between the endpoint buffer and the audio engine. @@ -579,9 +580,9 @@ bool CoreAudioBase::Start() { } bool CoreAudioBase::Stop() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; - RTC_DLOG(INFO) << "total activity time: " << TimeSinceStart(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + RTC_DLOG(LS_INFO) << "total activity time: " << TimeSinceStart(); // Stop audio streaming. _com_error error = audio_client_->Stop(); @@ -614,8 +615,8 @@ bool CoreAudioBase::Stop() { // Delete the previous registration by the client to receive notifications // about audio session events. - RTC_DLOG(INFO) << "audio session state: " - << SessionStateToString(GetAudioSessionState()); + RTC_DLOG(LS_INFO) << "audio session state: " + << SessionStateToString(GetAudioSessionState()); error = audio_session_control_->UnregisterAudioSessionNotification(this); if (FAILED(error.Error())) { RTC_LOG(LS_ERROR) @@ -663,7 +664,7 @@ bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const { << core_audio_utility::ErrorToString(error); *available = false; } - RTC_DLOG(INFO) << "master volume for output audio session: " << volume; + RTC_DLOG(LS_INFO) << "master volume for output audio session: " << volume; *available = true; return false; @@ -674,8 +675,8 @@ bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const { // device notifications. Hence, the emulated restart sequence covers most parts // of a real sequence expect the actual device switch. bool CoreAudioBase::Restart() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; if (!automatic_restart()) { return false; } @@ -685,12 +686,12 @@ bool CoreAudioBase::Restart() { } void CoreAudioBase::StopThread() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(!IsRestarting()); if (!audio_thread_.empty()) { - RTC_DLOG(INFO) << "Sets stop_event..."; + RTC_DLOG(LS_INFO) << "Sets stop_event..."; SetEvent(stop_event_.Get()); - RTC_DLOG(INFO) << "PlatformThread::Finalize..."; + RTC_DLOG(LS_INFO) << "PlatformThread::Finalize..."; audio_thread_.Finalize(); // Ensure that we don't quit the main thread loop immediately next @@ -701,8 +702,8 @@ void CoreAudioBase::StopThread() { } bool CoreAudioBase::HandleRestartEvent() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(!audio_thread_.empty()); RTC_DCHECK(IsRestarting()); @@ -716,13 +717,13 @@ bool CoreAudioBase::HandleRestartEvent() { } bool CoreAudioBase::SwitchDeviceIfNeeded() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(IsRestarting()); - RTC_DLOG(INFO) << "device_index=" << device_index_ - << " => device_id: " << device_id_; + RTC_DLOG(LS_INFO) << "device_index=" << device_index_ + << " => device_id: " << device_id_; // Ensure that at least one device exists and can be utilized. The most // probable cause for ending up here is that a device has been removed. @@ -746,7 +747,7 @@ bool CoreAudioBase::SwitchDeviceIfNeeded() { return false; } } else { - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "Device configuration has not changed => keeping selected device"; } return true; @@ -766,14 +767,14 @@ AudioSessionState CoreAudioBase::GetAudioSessionState() const { // TODO(henrika): only used for debugging purposes currently. ULONG CoreAudioBase::AddRef() { ULONG new_ref = InterlockedIncrement(&ref_count_); - // RTC_DLOG(INFO) << "__AddRef => " << new_ref; + // RTC_DLOG(LS_INFO) << "__AddRef => " << new_ref; return new_ref; } // TODO(henrika): does not call delete this. ULONG CoreAudioBase::Release() { ULONG new_ref = InterlockedDecrement(&ref_count_); - // RTC_DLOG(INFO) << "__Release => " << new_ref; + // RTC_DLOG(LS_INFO) << "__Release => " << new_ref; return new_ref; } @@ -792,9 +793,9 @@ HRESULT CoreAudioBase::QueryInterface(REFIID iid, void** object) { // IAudioSessionEvents::OnStateChanged. HRESULT CoreAudioBase::OnStateChanged(AudioSessionState new_state) { - RTC_DLOG(INFO) << "___" << __FUNCTION__ << "[" - << DirectionToString(direction()) - << "] new_state: " << SessionStateToString(new_state); + RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "[" + << DirectionToString(direction()) + << "] new_state: " << SessionStateToString(new_state); return S_OK; } @@ -806,9 +807,9 @@ HRESULT CoreAudioBase::OnStateChanged(AudioSessionState new_state) { // same event. HRESULT CoreAudioBase::OnSessionDisconnected( AudioSessionDisconnectReason disconnect_reason) { - RTC_DLOG(INFO) << "___" << __FUNCTION__ << "[" - << DirectionToString(direction()) << "] reason: " - << SessionDisconnectReasonToString(disconnect_reason); + RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "[" + << DirectionToString(direction()) << "] reason: " + << SessionDisconnectReasonToString(disconnect_reason); // Ignore changes in the audio session (don't try to restart) if the user // has explicitly asked for this type of ADM during construction. if (!automatic_restart()) { @@ -869,8 +870,8 @@ void CoreAudioBase::ThreadRun() { RTC_LOG(LS_ERROR) << "MMCSS is not supported"; return; } - RTC_DLOG(INFO) << "[" << DirectionToString(direction()) - << "] ThreadRun starts..."; + RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction()) + << "] ThreadRun starts..."; // TODO(henrika): difference between "Pro Audio" and "Audio"? ScopedMMCSSRegistration mmcss_registration(L"Pro Audio"); ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA); @@ -938,8 +939,8 @@ void CoreAudioBase::ThreadRun() { // this stream should be destroyed instead of reused in the future. } - RTC_DLOG(INFO) << "[" << DirectionToString(direction()) - << "] ...ThreadRun stops"; + RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction()) + << "] ...ThreadRun stops"; } } // namespace webrtc_win diff --git a/modules/audio_device/win/core_audio_input_win.cc b/modules/audio_device/win/core_audio_input_win.cc index be4aec8c3f..6ad5f46c41 100644 --- a/modules/audio_device/win/core_audio_input_win.cc +++ b/modules/audio_device/win/core_audio_input_win.cc @@ -33,24 +33,24 @@ CoreAudioInput::CoreAudioInput(bool automatic_restart) automatic_restart, [this](uint64_t freq) { return OnDataCallback(freq); }, [this](ErrorType err) { return OnErrorCallback(err); }) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); thread_checker_audio_.Detach(); } CoreAudioInput::~CoreAudioInput() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); } int CoreAudioInput::Init() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int CoreAudioInput::Terminate() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); StopRecording(); return 0; @@ -62,17 +62,17 @@ int CoreAudioInput::NumDevices() const { } int CoreAudioInput::SetDevice(int index) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_GE(index, 0); RTC_DCHECK_RUN_ON(&thread_checker_); return CoreAudioBase::SetDevice(index); } int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " - << ((device == AudioDeviceModule::kDefaultDevice) - ? "Default" - : "DefaultCommunication"); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " + << ((device == AudioDeviceModule::kDefaultDevice) + ? "Default" + : "DefaultCommunication"); RTC_DCHECK_RUN_ON(&thread_checker_); return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1); } @@ -80,26 +80,26 @@ int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { int CoreAudioInput::DeviceName(int index, std::string* name, std::string* guid) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DCHECK(name); return CoreAudioBase::DeviceName(index, name, guid); } void CoreAudioInput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); audio_device_buffer_ = audio_buffer; } bool CoreAudioInput::RecordingIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << initialized_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; RTC_DCHECK_RUN_ON(&thread_checker_); return initialized_; } int CoreAudioInput::InitRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(!initialized_); RTC_DCHECK(!Recording()); RTC_DCHECK(!audio_capture_client_); @@ -155,7 +155,7 @@ int CoreAudioInput::InitRecording() { } int CoreAudioInput::StartRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(!Recording()); RTC_DCHECK(fine_audio_buffer_); RTC_DCHECK(audio_device_buffer_); @@ -179,7 +179,7 @@ int CoreAudioInput::StartRecording() { } int CoreAudioInput::StopRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) { return 0; } @@ -214,7 +214,7 @@ int CoreAudioInput::StopRecording() { } bool CoreAudioInput::Recording() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << is_active_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_; return is_active_; } @@ -222,7 +222,7 @@ bool CoreAudioInput::Recording() { // are not compatible with the old ADM implementation since it allows accessing // the volume control with any active audio output stream. int CoreAudioInput::VolumeIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return IsVolumeControlAvailable(available) ? 0 : -1; } @@ -230,7 +230,7 @@ int CoreAudioInput::VolumeIsAvailable(bool* available) { // Triggers the restart sequence. Only used for testing purposes to emulate // a real event where e.g. an active input device is removed. int CoreAudioInput::RestartRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); if (!Recording()) { return 0; @@ -249,14 +249,14 @@ bool CoreAudioInput::Restarting() const { } int CoreAudioInput::SetSampleRate(uint32_t sample_rate) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); sample_rate_ = sample_rate; return 0; } void CoreAudioInput::ReleaseCOMObjects() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CoreAudioBase::ReleaseCOMObjects(); if (audio_capture_client_.Get()) { audio_capture_client_.Reset(); @@ -273,7 +273,7 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { return false; } if (num_data_callbacks_ == 0) { - RTC_LOG(INFO) << "--- Input audio stream is alive ---"; + RTC_LOG(LS_INFO) << "--- Input audio stream is alive ---"; } UINT32 num_frames_in_next_packet = 0; _com_error error = @@ -328,7 +328,7 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { } } if (num_data_callbacks_ % 500 == 0) { - RTC_DLOG(INFO) << "latency: " << latency_ms_; + RTC_DLOG(LS_INFO) << "latency: " << latency_ms_; } // The data in the packet is not correlated with the previous packet's @@ -382,7 +382,7 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { } bool CoreAudioInput::OnErrorCallback(ErrorType error) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << as_integer(error); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error); RTC_DCHECK_RUN_ON(&thread_checker_audio_); if (error == CoreAudioBase::ErrorType::kStreamDisconnected) { HandleStreamDisconnected(); @@ -426,7 +426,7 @@ absl::optional CoreAudioInput::EstimateLatencyMillis( // safe. // TODO(henrika): add more details. bool CoreAudioInput::HandleStreamDisconnected() { - RTC_DLOG(INFO) << "<<<--- " << __FUNCTION__; + RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(automatic_restart()); @@ -445,7 +445,7 @@ bool CoreAudioInput::HandleStreamDisconnected() { return false; } - RTC_DLOG(INFO) << __FUNCTION__ << " --->>>"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>"; return true; } diff --git a/modules/audio_device/win/core_audio_output_win.cc b/modules/audio_device/win/core_audio_output_win.cc index bd4132a961..75229228ec 100644 --- a/modules/audio_device/win/core_audio_output_win.cc +++ b/modules/audio_device/win/core_audio_output_win.cc @@ -29,25 +29,25 @@ CoreAudioOutput::CoreAudioOutput(bool automatic_restart) automatic_restart, [this](uint64_t freq) { return OnDataCallback(freq); }, [this](ErrorType err) { return OnErrorCallback(err); }) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); thread_checker_audio_.Detach(); } CoreAudioOutput::~CoreAudioOutput() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); Terminate(); } int CoreAudioOutput::Init() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int CoreAudioOutput::Terminate() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); StopPlayout(); return 0; @@ -59,17 +59,17 @@ int CoreAudioOutput::NumDevices() const { } int CoreAudioOutput::SetDevice(int index) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_GE(index, 0); RTC_DCHECK_RUN_ON(&thread_checker_); return CoreAudioBase::SetDevice(index); } int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " - << ((device == AudioDeviceModule::kDefaultDevice) - ? "Default" - : "DefaultCommunication"); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " + << ((device == AudioDeviceModule::kDefaultDevice) + ? "Default" + : "DefaultCommunication"); RTC_DCHECK_RUN_ON(&thread_checker_); return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1); } @@ -77,26 +77,26 @@ int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { int CoreAudioOutput::DeviceName(int index, std::string* name, std::string* guid) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DCHECK(name); return CoreAudioBase::DeviceName(index, name, guid); } void CoreAudioOutput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); audio_device_buffer_ = audio_buffer; } bool CoreAudioOutput::PlayoutIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return initialized_; } int CoreAudioOutput::InitPlayout() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); RTC_DCHECK(!initialized_); RTC_DCHECK(!Playing()); RTC_DCHECK(!audio_render_client_); @@ -150,7 +150,7 @@ int CoreAudioOutput::InitPlayout() { } int CoreAudioOutput::StartPlayout() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); RTC_DCHECK(!Playing()); RTC_DCHECK(fine_audio_buffer_); RTC_DCHECK(audio_device_buffer_); @@ -180,7 +180,7 @@ int CoreAudioOutput::StartPlayout() { } int CoreAudioOutput::StopPlayout() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); if (!initialized_) { return 0; } @@ -214,7 +214,7 @@ int CoreAudioOutput::StopPlayout() { } bool CoreAudioOutput::Playing() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << is_active_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_; return is_active_; } @@ -222,7 +222,7 @@ bool CoreAudioOutput::Playing() { // are not compatible with the old ADM implementation since it allows accessing // the volume control with any active audio output stream. int CoreAudioOutput::VolumeIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return IsVolumeControlAvailable(available) ? 0 : -1; } @@ -230,7 +230,7 @@ int CoreAudioOutput::VolumeIsAvailable(bool* available) { // Triggers the restart sequence. Only used for testing purposes to emulate // a real event where e.g. an active output device is removed. int CoreAudioOutput::RestartPlayout() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); if (!Playing()) { return 0; @@ -243,20 +243,20 @@ int CoreAudioOutput::RestartPlayout() { } bool CoreAudioOutput::Restarting() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return IsRestarting(); } int CoreAudioOutput::SetSampleRate(uint32_t sample_rate) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); sample_rate_ = sample_rate; return 0; } void CoreAudioOutput::ReleaseCOMObjects() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CoreAudioBase::ReleaseCOMObjects(); if (audio_render_client_.Get()) { audio_render_client_.Reset(); @@ -264,7 +264,7 @@ void CoreAudioOutput::ReleaseCOMObjects() { } bool CoreAudioOutput::OnErrorCallback(ErrorType error) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << as_integer(error); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error); RTC_DCHECK_RUN_ON(&thread_checker_audio_); if (!initialized_ || !Playing()) { return true; @@ -281,7 +281,7 @@ bool CoreAudioOutput::OnErrorCallback(ErrorType error) { bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) { RTC_DCHECK_RUN_ON(&thread_checker_audio_); if (num_data_callbacks_ == 0) { - RTC_LOG(INFO) << "--- Output audio stream is alive ---"; + RTC_LOG(LS_INFO) << "--- Output audio stream is alive ---"; } // Get the padding value which indicates the amount of valid unread data that // the endpoint buffer currently contains. @@ -329,7 +329,7 @@ bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) { // TODO(henrika): note that FineAudioBuffer adds latency as well. latency_ms_ = EstimateOutputLatencyMillis(device_frequency); if (num_data_callbacks_ % 500 == 0) { - RTC_DLOG(INFO) << "latency: " << latency_ms_; + RTC_DLOG(LS_INFO) << "latency: " << latency_ms_; } } @@ -394,7 +394,7 @@ int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) { // safe. // TODO(henrika): add more details. bool CoreAudioOutput::HandleStreamDisconnected() { - RTC_DLOG(INFO) << "<<<--- " << __FUNCTION__; + RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(automatic_restart()); @@ -413,7 +413,7 @@ bool CoreAudioOutput::HandleStreamDisconnected() { return false; } - RTC_DLOG(INFO) << __FUNCTION__ << " --->>>"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>"; return true; } diff --git a/modules/audio_device/win/core_audio_utility_win.cc b/modules/audio_device/win/core_audio_utility_win.cc index 5950c8dced..976edc8d01 100644 --- a/modules/audio_device/win/core_audio_utility_win.cc +++ b/modules/audio_device/win/core_audio_utility_win.cc @@ -205,7 +205,7 @@ bool LoadAudiosesDll() { L"%WINDIR%\\system32\\audioses.dll"; wchar_t path[MAX_PATH] = {0}; ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path)); - RTC_DLOG(INFO) << rtc::ToUtf8(path); + RTC_DLOG(LS_INFO) << rtc::ToUtf8(path); return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) != nullptr); } @@ -214,7 +214,7 @@ bool LoadAvrtDll() { static const wchar_t* const kAvrtDLL = L"%WINDIR%\\system32\\Avrt.dll"; wchar_t path[MAX_PATH] = {0}; ExpandEnvironmentStringsW(kAvrtDLL, path, arraysize(path)); - RTC_DLOG(INFO) << rtc::ToUtf8(path); + RTC_DLOG(LS_INFO) << rtc::ToUtf8(path); return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) != nullptr); } @@ -283,10 +283,10 @@ bool IsDeviceActive(IMMDevice* device) { ComPtr CreateDeviceInternal(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateDeviceInternal: " - "id=" - << device_id << ", flow=" << FlowToString(data_flow) - << ", role=" << RoleToString(role); + RTC_DLOG(LS_INFO) << "CreateDeviceInternal: " + "id=" + << device_id << ", flow=" << FlowToString(data_flow) + << ", role=" << RoleToString(role); ComPtr audio_endpoint_device; // Create the IMMDeviceEnumerator interface. @@ -587,10 +587,10 @@ bool GetDeviceNamesInternal(EDataFlow data_flow, // [2] friendly name: Headset Microphone (2- Arctis 7 Chat) // [2] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c} for (size_t i = 0; i < device_names->size(); ++i) { - RTC_DLOG(INFO) << "[" << i - << "] friendly name: " << (*device_names)[i].device_name; - RTC_DLOG(INFO) << "[" << i - << "] unique id : " << (*device_names)[i].unique_id; + RTC_DLOG(LS_INFO) << "[" << i + << "] friendly name: " << (*device_names)[i].device_name; + RTC_DLOG(LS_INFO) << "[" << i + << "] unique id : " << (*device_names)[i].unique_id; } return true; @@ -614,8 +614,8 @@ HRESULT GetPreferredAudioParametersInternal(IAudioClient* client, // Override default sample rate if `fixed_sample_rate` is set and different // from the default rate. if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) { - RTC_DLOG(INFO) << "Using fixed sample rate instead of the preferred: " - << sample_rate << " is replaced by " << fixed_sample_rate; + RTC_DLOG(LS_INFO) << "Using fixed sample rate instead of the preferred: " + << sample_rate << " is replaced by " << fixed_sample_rate; sample_rate = fixed_sample_rate; } // TODO(henrika): utilize full mix_format.Format.wBitsPerSample. @@ -634,7 +634,7 @@ HRESULT GetPreferredAudioParametersInternal(IAudioClient* client, AudioParameters audio_params(sample_rate, channels, frames_per_buffer); *params = audio_params; - RTC_DLOG(INFO) << audio_params.ToString(); + RTC_DLOG(LS_INFO) << audio_params.ToString(); return hr; } @@ -669,13 +669,13 @@ size_t WaveFormatWrapper::size() const { } bool IsSupported() { - RTC_DLOG(INFO) << "IsSupported"; + RTC_DLOG(LS_INFO) << "IsSupported"; static bool g_is_supported = IsSupportedInternal(); return g_is_supported; } bool IsMMCSSSupported() { - RTC_DLOG(INFO) << "IsMMCSSSupported"; + RTC_DLOG(LS_INFO) << "IsMMCSSSupported"; return LoadAvrtDll(); } @@ -698,7 +698,7 @@ int NumberOfActiveDevices(EDataFlow data_flow) { } else if (data_flow == eAll) { str = "Total number of devices: "; } - RTC_DLOG(INFO) << str << number_of_active_devices; + RTC_DLOG(LS_INFO) << str << number_of_active_devices; return static_cast(number_of_active_devices); } @@ -713,33 +713,33 @@ uint32_t GetAudioClientVersion() { } ComPtr CreateDeviceEnumerator() { - RTC_DLOG(INFO) << "CreateDeviceEnumerator"; + RTC_DLOG(LS_INFO) << "CreateDeviceEnumerator"; return CreateDeviceEnumeratorInternal(true); } std::string GetDefaultInputDeviceID() { - RTC_DLOG(INFO) << "GetDefaultInputDeviceID"; + RTC_DLOG(LS_INFO) << "GetDefaultInputDeviceID"; ComPtr device( CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eConsole)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); } std::string GetDefaultOutputDeviceID() { - RTC_DLOG(INFO) << "GetDefaultOutputDeviceID"; + RTC_DLOG(LS_INFO) << "GetDefaultOutputDeviceID"; ComPtr device( CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eConsole)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); } std::string GetCommunicationsInputDeviceID() { - RTC_DLOG(INFO) << "GetCommunicationsInputDeviceID"; + RTC_DLOG(LS_INFO) << "GetCommunicationsInputDeviceID"; ComPtr device(CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eCommunications)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); } std::string GetCommunicationsOutputDeviceID() { - RTC_DLOG(INFO) << "GetCommunicationsOutputDeviceID"; + RTC_DLOG(LS_INFO) << "GetCommunicationsOutputDeviceID"; ComPtr device(CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eCommunications)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); @@ -748,24 +748,24 @@ std::string GetCommunicationsOutputDeviceID() { ComPtr CreateDevice(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateDevice"; + RTC_DLOG(LS_INFO) << "CreateDevice"; return CreateDeviceInternal(device_id, data_flow, role); } AudioDeviceName GetDeviceName(IMMDevice* device) { - RTC_DLOG(INFO) << "GetDeviceName"; + RTC_DLOG(LS_INFO) << "GetDeviceName"; RTC_DCHECK(device); AudioDeviceName device_name(GetDeviceFriendlyNameInternal(device), GetDeviceIdInternal(device)); - RTC_DLOG(INFO) << "friendly name: " << device_name.device_name; - RTC_DLOG(INFO) << "unique id : " << device_name.unique_id; + RTC_DLOG(LS_INFO) << "friendly name: " << device_name.device_name; + RTC_DLOG(LS_INFO) << "unique id : " << device_name.unique_id; return device_name; } std::string GetFriendlyName(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "GetFriendlyName"; + RTC_DLOG(LS_INFO) << "GetFriendlyName"; ComPtr audio_device = CreateDevice(device_id, data_flow, role); if (!audio_device.Get()) return std::string(); @@ -775,7 +775,7 @@ std::string GetFriendlyName(const std::string& device_id, } EDataFlow GetDataFlow(IMMDevice* device) { - RTC_DLOG(INFO) << "GetDataFlow"; + RTC_DLOG(LS_INFO) << "GetDataFlow"; RTC_DCHECK(device); ComPtr endpoint; _com_error error = device->QueryInterface(endpoint.GetAddressOf()); @@ -796,32 +796,32 @@ EDataFlow GetDataFlow(IMMDevice* device) { } bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names) { - RTC_DLOG(INFO) << "GetInputDeviceNames"; + RTC_DLOG(LS_INFO) << "GetInputDeviceNames"; RTC_DCHECK(device_names); RTC_DCHECK(device_names->empty()); return GetDeviceNamesInternal(eCapture, device_names); } bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) { - RTC_DLOG(INFO) << "GetOutputDeviceNames"; + RTC_DLOG(LS_INFO) << "GetOutputDeviceNames"; RTC_DCHECK(device_names); RTC_DCHECK(device_names->empty()); return GetDeviceNamesInternal(eRender, device_names); } ComPtr CreateSessionManager2(IMMDevice* device) { - RTC_DLOG(INFO) << "CreateSessionManager2"; + RTC_DLOG(LS_INFO) << "CreateSessionManager2"; return CreateSessionManager2Internal(device); } Microsoft::WRL::ComPtr CreateSessionEnumerator( IMMDevice* device) { - RTC_DLOG(INFO) << "CreateSessionEnumerator"; + RTC_DLOG(LS_INFO) << "CreateSessionEnumerator"; return CreateSessionEnumeratorInternal(device); } int NumberOfActiveSessions(IMMDevice* device) { - RTC_DLOG(INFO) << "NumberOfActiveSessions"; + RTC_DLOG(LS_INFO) << "NumberOfActiveSessions"; ComPtr session_enumerator = CreateSessionEnumerator(device); @@ -833,7 +833,7 @@ int NumberOfActiveSessions(IMMDevice* device) { << ErrorToString(error); return 0; } - RTC_DLOG(INFO) << "Total number of audio sessions: " << session_count; + RTC_DLOG(LS_INFO) << "Total number of audio sessions: " << session_count; int num_active = 0; for (int session = 0; session < session_count; session++) { @@ -849,8 +849,8 @@ int NumberOfActiveSessions(IMMDevice* device) { // Log the display name of the audio session for debugging purposes. LPWSTR display_name; if (SUCCEEDED(session_control->GetDisplayName(&display_name))) { - RTC_DLOG(INFO) << "display name: " - << rtc::ToUtf8(display_name, wcslen(display_name)); + RTC_DLOG(LS_INFO) << "display name: " + << rtc::ToUtf8(display_name, wcslen(display_name)); CoTaskMemFree(display_name); } @@ -867,14 +867,14 @@ int NumberOfActiveSessions(IMMDevice* device) { } } - RTC_DLOG(INFO) << "Number of active audio sessions: " << num_active; + RTC_DLOG(LS_INFO) << "Number of active audio sessions: " << num_active; return num_active; } ComPtr CreateClient(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateClient"; + RTC_DLOG(LS_INFO) << "CreateClient"; ComPtr device(CreateDevice(device_id, data_flow, role)); return CreateClientInternal(device.Get()); } @@ -882,7 +882,7 @@ ComPtr CreateClient(const std::string& device_id, ComPtr CreateClient2(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateClient2"; + RTC_DLOG(LS_INFO) << "CreateClient2"; ComPtr device(CreateDevice(device_id, data_flow, role)); return CreateClient2Internal(device.Get()); } @@ -890,13 +890,13 @@ ComPtr CreateClient2(const std::string& device_id, ComPtr CreateClient3(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateClient3"; + RTC_DLOG(LS_INFO) << "CreateClient3"; ComPtr device(CreateDevice(device_id, data_flow, role)); return CreateClient3Internal(device.Get()); } HRESULT SetClientProperties(IAudioClient2* client) { - RTC_DLOG(INFO) << "SetClientProperties"; + RTC_DLOG(LS_INFO) << "SetClientProperties"; RTC_DCHECK(client); if (GetAudioClientVersion() < 2) { RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher"; @@ -919,10 +919,10 @@ HRESULT SetClientProperties(IAudioClient2* client) { RTC_LOG(LS_ERROR) << "IAudioClient2::IsOffloadCapable failed: " << ErrorToString(error); } - RTC_DLOG(INFO) << "supports_offload: " << supports_offload; + RTC_DLOG(LS_INFO) << "supports_offload: " << supports_offload; props.bIsOffload = false; #if (NTDDI_VERSION < NTDDI_WINBLUE) - RTC_DLOG(INFO) << "options: Not supported in this build"; + RTC_DLOG(LS_INFO) << "options: Not supported in this build"; #else // TODO(henrika): pros and cons compared with AUDCLNT_STREAMOPTIONS_NONE? props.Options |= AUDCLNT_STREAMOPTIONS_NONE; @@ -939,7 +939,7 @@ HRESULT SetClientProperties(IAudioClient2* client) { // an appropriate interface to use for communications scenarios. // This interface is mainly meant for pro audio scenarios. // props.Options |= AUDCLNT_STREAMOPTIONS_MATCH_FORMAT; - RTC_DLOG(INFO) << "options: 0x" << rtc::ToHex(props.Options); + RTC_DLOG(LS_INFO) << "options: 0x" << rtc::ToHex(props.Options); #endif error = client->SetClientProperties(&props); if (FAILED(error.Error())) { @@ -953,7 +953,7 @@ HRESULT GetBufferSizeLimits(IAudioClient2* client, const WAVEFORMATEXTENSIBLE* format, REFERENCE_TIME* min_buffer_duration, REFERENCE_TIME* max_buffer_duration) { - RTC_DLOG(INFO) << "GetBufferSizeLimits"; + RTC_DLOG(LS_INFO) << "GetBufferSizeLimits"; RTC_DCHECK(client); if (GetAudioClientVersion() < 2) { RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher"; @@ -975,15 +975,15 @@ HRESULT GetBufferSizeLimits(IAudioClient2* client, } else { *min_buffer_duration = min_duration; *max_buffer_duration = max_duration; - RTC_DLOG(INFO) << "min_buffer_duration: " << min_buffer_duration; - RTC_DLOG(INFO) << "max_buffer_duration: " << max_buffer_duration; + RTC_DLOG(LS_INFO) << "min_buffer_duration: " << min_buffer_duration; + RTC_DLOG(LS_INFO) << "max_buffer_duration: " << max_buffer_duration; } return error.Error(); } HRESULT GetSharedModeMixFormat(IAudioClient* client, WAVEFORMATEXTENSIBLE* format) { - RTC_DLOG(INFO) << "GetSharedModeMixFormat"; + RTC_DLOG(LS_INFO) << "GetSharedModeMixFormat"; RTC_DCHECK(client); // The GetMixFormat method retrieves the stream format that the audio engine @@ -1030,7 +1030,7 @@ HRESULT GetSharedModeMixFormat(IAudioClient* client, // the returned structure is correctly extended or not. RTC_CHECK_LE(wrapped_format.size(), sizeof(WAVEFORMATEXTENSIBLE)); memcpy(format, wrapped_format.get(), wrapped_format.size()); - RTC_DLOG(INFO) << WaveFormatToString(format); + RTC_DLOG(LS_INFO) << WaveFormatToString(format); return error.Error(); } @@ -1038,7 +1038,7 @@ HRESULT GetSharedModeMixFormat(IAudioClient* client, bool IsFormatSupported(IAudioClient* client, AUDCLNT_SHAREMODE share_mode, const WAVEFORMATEXTENSIBLE* format) { - RTC_DLOG(INFO) << "IsFormatSupported"; + RTC_DLOG(LS_INFO) << "IsFormatSupported"; RTC_DCHECK(client); ScopedCoMem closest_match; // This method provides a way for a client to determine, before calling @@ -1049,23 +1049,23 @@ bool IsFormatSupported(IAudioClient* client, _com_error error = client->IsFormatSupported( share_mode, reinterpret_cast(format), &closest_match); - RTC_LOG(INFO) << WaveFormatToString( + RTC_LOG(LS_INFO) << WaveFormatToString( const_cast(format)); if ((error.Error() == S_OK) && (closest_match == nullptr)) { - RTC_DLOG(INFO) + RTC_DLOG(LS_INFO) << "The audio endpoint device supports the specified stream format"; } else if ((error.Error() == S_FALSE) && (closest_match != nullptr)) { // Call succeeded with a closest match to the specified format. This log can // only be triggered for shared mode. RTC_LOG(LS_WARNING) << "Exact format is not supported, but a closest match exists"; - RTC_LOG(INFO) << WaveFormatToString(closest_match.Get()); + RTC_LOG(LS_INFO) << WaveFormatToString(closest_match.Get()); } else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) && (closest_match == nullptr)) { // The audio engine does not support the caller-specified format or any // similar format. - RTC_DLOG(INFO) << "The audio endpoint device does not support the " - "specified stream format"; + RTC_DLOG(LS_INFO) << "The audio endpoint device does not support the " + "specified stream format"; } else { RTC_LOG(LS_ERROR) << "IAudioClient::IsFormatSupported failed: " << ErrorToString(error); @@ -1077,7 +1077,7 @@ bool IsFormatSupported(IAudioClient* client, HRESULT GetDevicePeriod(IAudioClient* client, AUDCLNT_SHAREMODE share_mode, REFERENCE_TIME* device_period) { - RTC_DLOG(INFO) << "GetDevicePeriod"; + RTC_DLOG(LS_INFO) << "GetDevicePeriod"; RTC_DCHECK(client); // The `default_period` parameter specifies the default scheduling period // for a shared-mode stream. The `minimum_period` parameter specifies the @@ -1094,10 +1094,10 @@ HRESULT GetDevicePeriod(IAudioClient* client, *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period : minimum_period; - RTC_LOG(INFO) << "device_period: " - << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]"; - RTC_LOG(INFO) << "minimum_period: " - << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]"; + RTC_LOG(LS_INFO) << "device_period: " + << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]"; + RTC_LOG(LS_INFO) << "minimum_period: " + << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]"; return error.Error(); } @@ -1107,7 +1107,7 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, uint32_t* fundamental_period_in_frames, uint32_t* min_period_in_frames, uint32_t* max_period_in_frames) { - RTC_DLOG(INFO) << "GetSharedModeEnginePeriod"; + RTC_DLOG(LS_INFO) << "GetSharedModeEnginePeriod"; RTC_DCHECK(client3); UINT32 default_period = 0; @@ -1125,15 +1125,17 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, WAVEFORMATEX format_ex = format->Format; const WORD sample_rate = format_ex.nSamplesPerSec; - RTC_LOG(INFO) << "default_period_in_frames: " << default_period << " (" - << FramesToMilliseconds(default_period, sample_rate) << " ms)"; - RTC_LOG(INFO) << "fundamental_period_in_frames: " << fundamental_period - << " (" << FramesToMilliseconds(fundamental_period, sample_rate) - << " ms)"; - RTC_LOG(INFO) << "min_period_in_frames: " << min_period << " (" - << FramesToMilliseconds(min_period, sample_rate) << " ms)"; - RTC_LOG(INFO) << "max_period_in_frames: " << max_period << " (" - << FramesToMilliseconds(max_period, sample_rate) << " ms)"; + RTC_LOG(LS_INFO) << "default_period_in_frames: " << default_period << " (" + << FramesToMilliseconds(default_period, sample_rate) + << " ms)"; + RTC_LOG(LS_INFO) << "fundamental_period_in_frames: " << fundamental_period + << " (" + << FramesToMilliseconds(fundamental_period, sample_rate) + << " ms)"; + RTC_LOG(LS_INFO) << "min_period_in_frames: " << min_period << " (" + << FramesToMilliseconds(min_period, sample_rate) << " ms)"; + RTC_LOG(LS_INFO) << "max_period_in_frames: " << max_period << " (" + << FramesToMilliseconds(max_period, sample_rate) << " ms)"; *default_period_in_frames = default_period; *fundamental_period_in_frames = fundamental_period; *min_period_in_frames = min_period; @@ -1143,7 +1145,7 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, HRESULT GetPreferredAudioParameters(IAudioClient* client, AudioParameters* params) { - RTC_DLOG(INFO) << "GetPreferredAudioParameters"; + RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters"; RTC_DCHECK(client); return GetPreferredAudioParametersInternal(client, params, -1); } @@ -1151,7 +1153,7 @@ HRESULT GetPreferredAudioParameters(IAudioClient* client, HRESULT GetPreferredAudioParameters(IAudioClient* client, webrtc::AudioParameters* params, uint32_t sample_rate) { - RTC_DLOG(INFO) << "GetPreferredAudioParameters: " << sample_rate; + RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters: " << sample_rate; RTC_DCHECK(client); return GetPreferredAudioParametersInternal(client, params, sample_rate); } @@ -1162,8 +1164,9 @@ HRESULT SharedModeInitialize(IAudioClient* client, REFERENCE_TIME buffer_duration, bool auto_convert_pcm, uint32_t* endpoint_buffer_size) { - RTC_DLOG(INFO) << "SharedModeInitialize: buffer_duration=" << buffer_duration - << ", auto_convert_pcm=" << auto_convert_pcm; + RTC_DLOG(LS_INFO) << "SharedModeInitialize: buffer_duration=" + << buffer_duration + << ", auto_convert_pcm=" << auto_convert_pcm; RTC_DCHECK(client); RTC_DCHECK_GE(buffer_duration, 0); if (buffer_duration != 0) { @@ -1188,7 +1191,7 @@ HRESULT SharedModeInitialize(IAudioClient* client, (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE); if (use_event) { stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; - RTC_DLOG(INFO) << "The stream is initialized to be event driven"; + RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven"; } // Check if sample-rate conversion is requested. @@ -1197,11 +1200,11 @@ HRESULT SharedModeInitialize(IAudioClient* client, // from our (the client's) format to the audio engine mix format. // Currently only supported for testing, i.e., not possible to enable using // public APIs. - RTC_DLOG(INFO) << "The stream is initialized to support rate conversion"; + RTC_DLOG(LS_INFO) << "The stream is initialized to support rate conversion"; stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; } - RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); + RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); // Initialize the shared mode client for minimal delay if `buffer_duration` // is 0 or possibly a higher delay (more robust) if `buffer_duration` is @@ -1243,23 +1246,23 @@ HRESULT SharedModeInitialize(IAudioClient* client, } *endpoint_buffer_size = buffer_size_in_frames; - RTC_DLOG(INFO) << "endpoint buffer size: " << buffer_size_in_frames - << " [audio frames]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames + << " [audio frames]"; const double size_in_ms = static_cast(buffer_size_in_frames) / (format->Format.nSamplesPerSec / 1000.0); - RTC_DLOG(INFO) << "endpoint buffer size: " - << static_cast(size_in_ms + 0.5) << " [ms]"; - RTC_DLOG(INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; - RTC_DLOG(INFO) << "endpoint buffer size: " - << buffer_size_in_frames * format->Format.nChannels * - (format->Format.wBitsPerSample / 8) - << " [bytes]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << static_cast(size_in_ms + 0.5) << " [ms]"; + RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << buffer_size_in_frames * format->Format.nChannels * + (format->Format.wBitsPerSample / 8) + << " [bytes]"; // TODO(henrika): utilize when delay measurements are added. REFERENCE_TIME latency = 0; error = client->GetStreamLatency(&latency); - RTC_DLOG(INFO) << "stream latency: " << ReferenceTimeToTimeDelta(latency).ms() - << " [ms]"; + RTC_DLOG(LS_INFO) << "stream latency: " + << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; return error.Error(); } @@ -1269,9 +1272,9 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, uint32_t period_in_frames, bool auto_convert_pcm, uint32_t* endpoint_buffer_size) { - RTC_DLOG(INFO) << "SharedModeInitializeLowLatency: period_in_frames=" - << period_in_frames - << ", auto_convert_pcm=" << auto_convert_pcm; + RTC_DLOG(LS_INFO) << "SharedModeInitializeLowLatency: period_in_frames=" + << period_in_frames + << ", auto_convert_pcm=" << auto_convert_pcm; RTC_DCHECK(client); RTC_DCHECK_GT(period_in_frames, 0); if (auto_convert_pcm) { @@ -1284,13 +1287,13 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE); if (use_event) { stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; - RTC_DLOG(INFO) << "The stream is initialized to be event driven"; + RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven"; } if (auto_convert_pcm) { stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; } - RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); + RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); // Initialize the shared mode client for lowest possible latency. // It is assumed that GetSharedModeEnginePeriod() has been used to query the @@ -1324,17 +1327,17 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, } *endpoint_buffer_size = buffer_size_in_frames; - RTC_DLOG(INFO) << "endpoint buffer size: " << buffer_size_in_frames - << " [audio frames]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames + << " [audio frames]"; const double size_in_ms = static_cast(buffer_size_in_frames) / (format->Format.nSamplesPerSec / 1000.0); - RTC_DLOG(INFO) << "endpoint buffer size: " - << static_cast(size_in_ms + 0.5) << " [ms]"; - RTC_DLOG(INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; - RTC_DLOG(INFO) << "endpoint buffer size: " - << buffer_size_in_frames * format->Format.nChannels * - (format->Format.wBitsPerSample / 8) - << " [bytes]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << static_cast(size_in_ms + 0.5) << " [ms]"; + RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << buffer_size_in_frames * format->Format.nChannels * + (format->Format.wBitsPerSample / 8) + << " [bytes]"; // TODO(henrika): utilize when delay measurements are added. REFERENCE_TIME latency = 0; @@ -1343,14 +1346,14 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, RTC_LOG(LS_WARNING) << "IAudioClient::GetStreamLatency failed: " << ErrorToString(error); } else { - RTC_DLOG(INFO) << "stream latency: " - << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; + RTC_DLOG(LS_INFO) << "stream latency: " + << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; } return error.Error(); } ComPtr CreateRenderClient(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateRenderClient"; + RTC_DLOG(LS_INFO) << "CreateRenderClient"; RTC_DCHECK(client); // Get access to the IAudioRenderClient interface. This interface // enables us to write output data to a rendering endpoint buffer. @@ -1366,7 +1369,7 @@ ComPtr CreateRenderClient(IAudioClient* client) { } ComPtr CreateCaptureClient(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateCaptureClient"; + RTC_DLOG(LS_INFO) << "CreateCaptureClient"; RTC_DCHECK(client); // Get access to the IAudioCaptureClient interface. This interface // enables us to read input data from a capturing endpoint buffer. @@ -1382,7 +1385,7 @@ ComPtr CreateCaptureClient(IAudioClient* client) { } ComPtr CreateAudioClock(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateAudioClock"; + RTC_DLOG(LS_INFO) << "CreateAudioClock"; RTC_DCHECK(client); // Get access to the IAudioClock interface. This interface enables us to // monitor a stream's data rate and the current position in the stream. @@ -1397,7 +1400,7 @@ ComPtr CreateAudioClock(IAudioClient* client) { } ComPtr CreateAudioSessionControl(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateAudioSessionControl"; + RTC_DLOG(LS_INFO) << "CreateAudioSessionControl"; RTC_DCHECK(client); ComPtr audio_session_control; _com_error error = client->GetService(IID_PPV_ARGS(&audio_session_control)); @@ -1410,7 +1413,7 @@ ComPtr CreateAudioSessionControl(IAudioClient* client) { } ComPtr CreateSimpleAudioVolume(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateSimpleAudioVolume"; + RTC_DLOG(LS_INFO) << "CreateSimpleAudioVolume"; RTC_DCHECK(client); // Get access to the ISimpleAudioVolume interface. This interface enables a // client to control the master volume level of an audio session. @@ -1427,7 +1430,7 @@ ComPtr CreateSimpleAudioVolume(IAudioClient* client) { bool FillRenderEndpointBufferWithSilence(IAudioClient* client, IAudioRenderClient* render_client) { - RTC_DLOG(INFO) << "FillRenderEndpointBufferWithSilence"; + RTC_DLOG(LS_INFO) << "FillRenderEndpointBufferWithSilence"; RTC_DCHECK(client); RTC_DCHECK(render_client); UINT32 endpoint_buffer_size = 0; @@ -1447,11 +1450,11 @@ bool FillRenderEndpointBufferWithSilence(IAudioClient* client, << ErrorToString(error); return false; } - RTC_DLOG(INFO) << "num_queued_frames: " << num_queued_frames; + RTC_DLOG(LS_INFO) << "num_queued_frames: " << num_queued_frames; BYTE* data = nullptr; int num_frames_to_fill = endpoint_buffer_size - num_queued_frames; - RTC_DLOG(INFO) << "num_frames_to_fill: " << num_frames_to_fill; + RTC_DLOG(LS_INFO) << "num_frames_to_fill: " << num_frames_to_fill; error = render_client->GetBuffer(num_frames_to_fill, &data); if (FAILED(error.Error())) { RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: " diff --git a/modules/audio_device/win/core_audio_utility_win.h b/modules/audio_device/win/core_audio_utility_win.h index 95ed91176d..afadee5635 100644 --- a/modules/audio_device/win/core_audio_utility_win.h +++ b/modules/audio_device/win/core_audio_utility_win.h @@ -83,7 +83,7 @@ class ScopedMMCSSRegistration { } explicit ScopedMMCSSRegistration(const wchar_t* task_name) { - RTC_DLOG(INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name); + RTC_DLOG(LS_INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name); // Register the calling thread with MMCSS for the supplied `task_name`. DWORD mmcss_task_index = 0; mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index); @@ -93,18 +93,18 @@ class ScopedMMCSSRegistration { } else { const DWORD priority_class = GetPriorityClass(GetCurrentProcess()); const int priority = GetThreadPriority(GetCurrentThread()); - RTC_DLOG(INFO) << "priority class: " - << PriorityClassToString(priority_class) << "(" - << priority_class << ")"; - RTC_DLOG(INFO) << "priority: " << PriorityToString(priority) << "(" - << priority << ")"; + RTC_DLOG(LS_INFO) << "priority class: " + << PriorityClassToString(priority_class) << "(" + << priority_class << ")"; + RTC_DLOG(LS_INFO) << "priority: " << PriorityToString(priority) << "(" + << priority << ")"; } } ~ScopedMMCSSRegistration() { if (Succeeded()) { // Deregister with MMCSS. - RTC_DLOG(INFO) << "~ScopedMMCSSRegistration"; + RTC_DLOG(LS_INFO) << "~ScopedMMCSSRegistration"; AvRevertMmThreadCharacteristics(mmcss_handle_); } } diff --git a/modules/utility/source/jvm_android.cc b/modules/utility/source/jvm_android.cc index 8e24daa0f2..ee9930bcaa 100644 --- a/modules/utility/source/jvm_android.cc +++ b/modules/utility/source/jvm_android.cc @@ -38,10 +38,10 @@ struct { // stack. Consequently, we only look up all classes once in native WebRTC. // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass void LoadClasses(JNIEnv* jni) { - RTC_LOG(INFO) << "LoadClasses:"; + RTC_LOG(LS_INFO) << "LoadClasses:"; for (auto& c : loaded_classes) { jclass localRef = FindClass(jni, c.name); - RTC_LOG(INFO) << "name: " << c.name; + RTC_LOG(LS_INFO) << "name: " << c.name; CHECK_EXCEPTION(jni) << "Error during FindClass: " << c.name; RTC_CHECK(localRef) << c.name; jclass globalRef = reinterpret_cast(jni->NewGlobalRef(localRef)); @@ -69,12 +69,12 @@ jclass LookUpClass(const char* name) { // JvmThreadConnector implementation. JvmThreadConnector::JvmThreadConnector() : attached_(false) { - RTC_LOG(INFO) << "JvmThreadConnector::ctor"; + RTC_LOG(LS_INFO) << "JvmThreadConnector::ctor"; JavaVM* jvm = JVM::GetInstance()->jvm(); RTC_CHECK(jvm); JNIEnv* jni = GetEnv(jvm); if (!jni) { - RTC_LOG(INFO) << "Attaching thread to JVM"; + RTC_LOG(LS_INFO) << "Attaching thread to JVM"; JNIEnv* env = nullptr; jint ret = jvm->AttachCurrentThread(&env, nullptr); attached_ = (ret == JNI_OK); @@ -82,10 +82,10 @@ JvmThreadConnector::JvmThreadConnector() : attached_(false) { } JvmThreadConnector::~JvmThreadConnector() { - RTC_LOG(INFO) << "JvmThreadConnector::dtor"; + RTC_LOG(LS_INFO) << "JvmThreadConnector::dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); if (attached_) { - RTC_LOG(INFO) << "Detaching thread from JVM"; + RTC_LOG(LS_INFO) << "Detaching thread from JVM"; jint res = JVM::GetInstance()->jvm()->DetachCurrentThread(); RTC_CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res; } @@ -94,11 +94,11 @@ JvmThreadConnector::~JvmThreadConnector() { // GlobalRef implementation. GlobalRef::GlobalRef(JNIEnv* jni, jobject object) : jni_(jni), j_object_(NewGlobalRef(jni, object)) { - RTC_LOG(INFO) << "GlobalRef::ctor"; + RTC_LOG(LS_INFO) << "GlobalRef::ctor"; } GlobalRef::~GlobalRef() { - RTC_LOG(INFO) << "GlobalRef::dtor"; + RTC_LOG(LS_INFO) << "GlobalRef::dtor"; DeleteGlobalRef(jni_, j_object_); } @@ -131,11 +131,11 @@ void GlobalRef::CallVoidMethod(jmethodID methodID, ...) { // NativeRegistration implementation. NativeRegistration::NativeRegistration(JNIEnv* jni, jclass clazz) : JavaClass(jni, clazz), jni_(jni) { - RTC_LOG(INFO) << "NativeRegistration::ctor"; + RTC_LOG(LS_INFO) << "NativeRegistration::ctor"; } NativeRegistration::~NativeRegistration() { - RTC_LOG(INFO) << "NativeRegistration::dtor"; + RTC_LOG(LS_INFO) << "NativeRegistration::dtor"; jni_->UnregisterNatives(j_class_); CHECK_EXCEPTION(jni_) << "Error during UnregisterNatives"; } @@ -143,7 +143,7 @@ NativeRegistration::~NativeRegistration() { std::unique_ptr NativeRegistration::NewObject(const char* name, const char* signature, ...) { - RTC_LOG(INFO) << "NativeRegistration::NewObject"; + RTC_LOG(LS_INFO) << "NativeRegistration::NewObject"; va_list args; va_start(args, signature); jobject obj = jni_->NewObjectV( @@ -181,11 +181,11 @@ jint JavaClass::CallStaticIntMethod(jmethodID methodID, ...) { // JNIEnvironment implementation. JNIEnvironment::JNIEnvironment(JNIEnv* jni) : jni_(jni) { - RTC_LOG(INFO) << "JNIEnvironment::ctor"; + RTC_LOG(LS_INFO) << "JNIEnvironment::ctor"; } JNIEnvironment::~JNIEnvironment() { - RTC_LOG(INFO) << "JNIEnvironment::dtor"; + RTC_LOG(LS_INFO) << "JNIEnvironment::dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); } @@ -193,7 +193,7 @@ std::unique_ptr JNIEnvironment::RegisterNatives( const char* name, const JNINativeMethod* methods, int num_methods) { - RTC_LOG(INFO) << "JNIEnvironment::RegisterNatives: " << name; + RTC_LOG(LS_INFO) << "JNIEnvironment::RegisterNatives: " << name; RTC_DCHECK(thread_checker_.IsCurrent()); jclass clazz = LookUpClass(name); jni_->RegisterNatives(clazz, methods, num_methods); @@ -216,7 +216,7 @@ std::string JNIEnvironment::JavaToStdString(const jstring& j_string) { // static void JVM::Initialize(JavaVM* jvm) { - RTC_LOG(INFO) << "JVM::Initialize"; + RTC_LOG(LS_INFO) << "JVM::Initialize"; RTC_CHECK(!g_jvm); g_jvm = new JVM(jvm); } @@ -234,7 +234,7 @@ void JVM::Initialize(JavaVM* jvm, jobject context) { // static void JVM::Uninitialize() { - RTC_LOG(INFO) << "JVM::Uninitialize"; + RTC_LOG(LS_INFO) << "JVM::Uninitialize"; RTC_DCHECK(g_jvm); delete g_jvm; g_jvm = nullptr; @@ -247,19 +247,19 @@ JVM* JVM::GetInstance() { } JVM::JVM(JavaVM* jvm) : jvm_(jvm) { - RTC_LOG(INFO) << "JVM::JVM"; + RTC_LOG(LS_INFO) << "JVM::JVM"; RTC_CHECK(jni()) << "AttachCurrentThread() must be called on this thread."; LoadClasses(jni()); } JVM::~JVM() { - RTC_LOG(INFO) << "JVM::~JVM"; + RTC_LOG(LS_INFO) << "JVM::~JVM"; RTC_DCHECK(thread_checker_.IsCurrent()); FreeClassReferences(jni()); } std::unique_ptr JVM::environment() { - RTC_LOG(INFO) << "JVM::environment"; + RTC_LOG(LS_INFO) << "JVM::environment"; ; // The JNIEnv is used for thread-local storage. For this reason, we cannot // share a JNIEnv between threads. If a piece of code has no other way to get @@ -276,7 +276,7 @@ std::unique_ptr JVM::environment() { } JavaClass JVM::GetClass(const char* name) { - RTC_LOG(INFO) << "JVM::GetClass: " << name; + RTC_LOG(LS_INFO) << "JVM::GetClass: " << name; RTC_DCHECK(thread_checker_.IsCurrent()); return JavaClass(jni(), LookUpClass(name)); } diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc index 887aa58098..425692621d 100644 --- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc +++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc @@ -586,8 +586,8 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const { // theoretically use all available reference buffers. encoder_params.iNumRefFrame = encoder_params.iTemporalLayerNum - 1; } - RTC_LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "." - << OPENH264_MINOR; + RTC_LOG(LS_INFO) << "OpenH264 version is " << OPENH264_MAJOR << "." + << OPENH264_MINOR; switch (packetization_mode_) { case H264PacketizationMode::SingleNalUnit: // Limit the size of the packets produced. @@ -596,8 +596,8 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const { SM_SIZELIMITED_SLICE; encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint = static_cast(max_payload_size_); - RTC_LOG(INFO) << "Encoder is configured with NALU constraint: " - << max_payload_size_ << " bytes"; + RTC_LOG(LS_INFO) << "Encoder is configured with NALU constraint: " + << max_payload_size_ << " bytes"; break; case H264PacketizationMode::NonInterleaved: // When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc index 88b840cb43..0d6fdfae10 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc @@ -1293,8 +1293,8 @@ void LibvpxVp8Encoder::MaybeUpdatePixelFormat(vpx_img_fmt fmt) { << "Not all raw images had the right format!"; return; } - RTC_LOG(INFO) << "Updating vp8 encoder pixel format to " - << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420"); + RTC_LOG(LS_INFO) << "Updating vp8 encoder pixel format to " + << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420"); for (size_t i = 0; i < raw_images_.size(); ++i) { vpx_image_t& img = raw_images_[i]; auto d_w = img.d_w; diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc index ffbaaaa94b..c32673d3b4 100644 --- a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc +++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc @@ -1930,8 +1930,8 @@ void LibvpxVp9Encoder::MaybeRewrapRawWithFormat(const vpx_img_fmt fmt) { raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1, nullptr); } else if (raw_->fmt != fmt) { - RTC_LOG(INFO) << "Switching VP9 encoder pixel format to " - << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420"); + RTC_LOG(LS_INFO) << "Switching VP9 encoder pixel format to " + << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420"); libvpx_->img_free(raw_); raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1, nullptr); diff --git a/modules/video_coding/utility/ivf_file_reader.cc b/modules/video_coding/utility/ivf_file_reader.cc index f326c8cb53..63925702b5 100644 --- a/modules/video_coding/utility/ivf_file_reader.cc +++ b/modules/video_coding/utility/ivf_file_reader.cc @@ -104,10 +104,10 @@ bool IvfFileReader::Reset() { has_error_ = false; const char* codec_name = CodecTypeToPayloadString(codec_type_); - RTC_LOG(INFO) << "Opened IVF file with codec data of type " << codec_name - << " at resolution " << width_ << " x " << height_ << ", using " - << (using_capture_timestamps_ ? "1" : "90") - << "kHz clock resolution."; + RTC_LOG(LS_INFO) << "Opened IVF file with codec data of type " << codec_name + << " at resolution " << width_ << " x " << height_ + << ", using " << (using_capture_timestamps_ ? "1" : "90") + << "kHz clock resolution."; return true; } diff --git a/p2p/base/fake_ice_transport.h b/p2p/base/fake_ice_transport.h index 8b52fe934c..c053abd5f9 100644 --- a/p2p/base/fake_ice_transport.h +++ b/p2p/base/fake_ice_transport.h @@ -364,7 +364,7 @@ class FakeIceTransport : public IceTransportInternal { if (writable_ == writable) { return; } - RTC_LOG(INFO) << "Change writable_ to " << writable; + RTC_LOG(LS_INFO) << "Change writable_ to " << writable; writable_ = writable; if (writable_) { SignalReadyToSend(this); diff --git a/p2p/base/p2p_transport_channel.cc b/p2p/base/p2p_transport_channel.cc index 5587a84636..5c6b32c17e 100644 --- a/p2p/base/p2p_transport_channel.cc +++ b/p2p/base/p2p_transport_channel.cc @@ -1476,11 +1476,11 @@ bool P2PTransportChannel::CreateConnection(PortInterface* port, // It is not legal to try to change any of the parameters of an existing // connection; however, the other side can send a duplicate candidate. if (!remote_candidate.IsEquivalent(connection->remote_candidate())) { - RTC_LOG(INFO) << "Attempt to change a remote candidate." - " Existing remote candidate: " - << connection->remote_candidate().ToSensitiveString() - << "New remote candidate: " - << remote_candidate.ToSensitiveString(); + RTC_LOG(LS_INFO) << "Attempt to change a remote candidate." + " Existing remote candidate: " + << connection->remote_candidate().ToSensitiveString() + << "New remote candidate: " + << remote_candidate.ToSensitiveString(); } return false; } @@ -1532,8 +1532,8 @@ void P2PTransportChannel::RememberRemoteCandidate( size_t i = 0; while (i < remote_candidates_.size()) { if (remote_candidates_[i].generation() < remote_candidate.generation()) { - RTC_LOG(INFO) << "Pruning candidate from old generation: " - << remote_candidates_[i].address().ToSensitiveString(); + RTC_LOG(LS_INFO) << "Pruning candidate from old generation: " + << remote_candidates_[i].address().ToSensitiveString(); remote_candidates_.erase(remote_candidates_.begin() + i); } else { i += 1; @@ -1542,8 +1542,8 @@ void P2PTransportChannel::RememberRemoteCandidate( // Make sure this candidate is not a duplicate. if (IsDuplicateRemoteCandidate(remote_candidate)) { - RTC_LOG(INFO) << "Duplicate candidate: " - << remote_candidate.ToSensitiveString(); + RTC_LOG(LS_INFO) << "Duplicate candidate: " + << remote_candidate.ToSensitiveString(); return; } @@ -2147,8 +2147,8 @@ void P2PTransportChannel::OnPortDestroyed(PortInterface* port) { pruned_ports_.erase( std::remove(pruned_ports_.begin(), pruned_ports_.end(), port), pruned_ports_.end()); - RTC_LOG(INFO) << "Removed port because it is destroyed: " << ports_.size() - << " remaining"; + RTC_LOG(LS_INFO) << "Removed port because it is destroyed: " << ports_.size() + << " remaining"; } void P2PTransportChannel::OnPortsPruned( @@ -2157,8 +2157,8 @@ void P2PTransportChannel::OnPortsPruned( RTC_DCHECK_RUN_ON(network_thread_); for (PortInterface* port : ports) { if (PrunePort(port)) { - RTC_LOG(INFO) << "Removed port: " << port->ToString() << " " - << ports_.size() << " remaining"; + RTC_LOG(LS_INFO) << "Removed port: " << port->ToString() << " " + << ports_.size() << " remaining"; } } } diff --git a/p2p/base/port_unittest.cc b/p2p/base/port_unittest.cc index 06dafe8dbf..dc32463dab 100644 --- a/p2p/base/port_unittest.cc +++ b/p2p/base/port_unittest.cc @@ -350,7 +350,7 @@ class TestChannel : public sigslot::has_slots<> { void OnDestroyed(Connection* conn) { ASSERT_EQ(conn_, conn); - RTC_LOG(INFO) << "OnDestroy connection " << conn << " deleted"; + RTC_LOG(LS_INFO) << "OnDestroy connection " << conn << " deleted"; conn_ = NULL; // When the connection is destroyed, also clear these fields so future // connections are possible. diff --git a/p2p/client/basic_port_allocator.cc b/p2p/client/basic_port_allocator.cc index 5a26934b03..15f59bd522 100644 --- a/p2p/client/basic_port_allocator.cc +++ b/p2p/client/basic_port_allocator.cc @@ -103,9 +103,9 @@ void FilterNetworks(NetworkList* networks, NetworkFilter filter) { if (start_to_remove == networks->end()) { return; } - RTC_LOG(INFO) << "Filtered out " << filter.description << " networks:"; + RTC_LOG(LS_INFO) << "Filtered out " << filter.description << " networks:"; for (auto it = start_to_remove; it != networks->end(); ++it) { - RTC_LOG(INFO) << (*it)->ToString(); + RTC_LOG(LS_INFO) << (*it)->ToString(); } networks->erase(start_to_remove, networks->end()); } diff --git a/pc/jsep_transport.cc b/pc/jsep_transport.cc index e84426441f..e40c7b5f1b 100644 --- a/pc/jsep_transport.cc +++ b/pc/jsep_transport.cc @@ -365,7 +365,7 @@ webrtc::RTCError JsepTransport::VerifyCertificateFingerprint( void JsepTransport::SetActiveResetSrtpParams(bool active_reset_srtp_params) { RTC_DCHECK_RUN_ON(network_thread_); if (dtls_srtp_transport_) { - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "Setting active_reset_srtp_params of DtlsSrtpTransport to: " << active_reset_srtp_params; dtls_srtp_transport_->SetActiveResetSrtpParams(active_reset_srtp_params); diff --git a/pc/jsep_transport_controller.cc b/pc/jsep_transport_controller.cc index 475389c212..24154fd17d 100644 --- a/pc/jsep_transport_controller.cc +++ b/pc/jsep_transport_controller.cc @@ -366,7 +366,7 @@ void JsepTransportController::SetActiveResetSrtpParams( return; } RTC_DCHECK_RUN_ON(network_thread_); - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "Updating the active_reset_srtp_params for JsepTransportController: " << active_reset_srtp_params; active_reset_srtp_params_ = active_reset_srtp_params; diff --git a/rtc_base/logging.h b/rtc_base/logging.h index 4fbbb5c6fc..9bfef80bdc 100644 --- a/rtc_base/logging.h +++ b/rtc_base/logging.h @@ -89,6 +89,8 @@ enum LoggingSeverity { LS_WARNING, LS_ERROR, LS_NONE, + // Compatibility aliases, to be deleted. + // TODO(bugs.webrtc.org/13362): Remove usage and delete. INFO = LS_INFO, WARNING = LS_WARNING, LERROR = LS_ERROR diff --git a/rtc_base/network_unittest.cc b/rtc_base/network_unittest.cc index 4d94e7ce6d..87df0d3230 100644 --- a/rtc_base/network_unittest.cc +++ b/rtc_base/network_unittest.cc @@ -128,11 +128,11 @@ class FakeNetworkMonitorFactory : public NetworkMonitorFactory { bool SameNameAndPrefix(const rtc::Network& a, const rtc::Network& b) { if (a.name() != b.name()) { - RTC_LOG(INFO) << "Different interface names."; + RTC_LOG(LS_INFO) << "Different interface names."; return false; } if (a.prefix_length() != b.prefix_length() || a.prefix() != b.prefix()) { - RTC_LOG(INFO) << "Different IP prefixes."; + RTC_LOG(LS_INFO) << "Different IP prefixes."; return false; } return true; diff --git a/rtc_base/socket_adapters.cc b/rtc_base/socket_adapters.cc index c6cd196a7a..abe5e30d8f 100644 --- a/rtc_base/socket_adapters.cc +++ b/rtc_base/socket_adapters.cc @@ -105,7 +105,7 @@ void BufferedReadAdapter::OnReadEvent(Socket* socket) { buffer_size_ - data_len_, nullptr); if (len < 0) { // TODO: Do something better like forwarding the error to the user. - RTC_LOG_ERR(INFO) << "Recv"; + RTC_LOG_ERR(LS_INFO) << "Recv"; return; } diff --git a/rtc_base/win/scoped_com_initializer.cc b/rtc_base/win/scoped_com_initializer.cc index 81079fb54c..e791adc582 100644 --- a/rtc_base/win/scoped_com_initializer.cc +++ b/rtc_base/win/scoped_com_initializer.cc @@ -16,13 +16,13 @@ namespace webrtc { ScopedCOMInitializer::ScopedCOMInitializer() { - RTC_DLOG(INFO) << "Single-Threaded Apartment (STA) COM thread"; + RTC_DLOG(LS_INFO) << "Single-Threaded Apartment (STA) COM thread"; Initialize(COINIT_APARTMENTTHREADED); } // Constructor for MTA initialization. ScopedCOMInitializer::ScopedCOMInitializer(SelectMTA mta) { - RTC_DLOG(INFO) << "Multi-Threaded Apartment (MTA) COM thread"; + RTC_DLOG(LS_INFO) << "Multi-Threaded Apartment (MTA) COM thread"; Initialize(COINIT_MULTITHREADED); } @@ -46,7 +46,7 @@ void ScopedCOMInitializer::Initialize(COINIT init) { // successful call to CoInitializeEx, including any call that returns // S_FALSE, must be balanced by a corresponding call to CoUninitialize. if (hr_ == S_OK) { - RTC_DLOG(INFO) + RTC_DLOG(LS_INFO) << "The COM library was initialized successfully on this thread"; } else if (hr_ == S_FALSE) { RTC_DLOG(WARNING) diff --git a/rtc_base/win/windows_version_unittest.cc b/rtc_base/win/windows_version_unittest.cc index 9e582e549f..e1cd920157 100644 --- a/rtc_base/win/windows_version_unittest.cc +++ b/rtc_base/win/windows_version_unittest.cc @@ -18,11 +18,11 @@ namespace rtc_win { namespace { void MethodSupportedOnWin10AndLater() { - RTC_DLOG(INFO) << "MethodSupportedOnWin10AndLater"; + RTC_DLOG(LS_INFO) << "MethodSupportedOnWin10AndLater"; } void MethodNotSupportedOnWin10AndLater() { - RTC_DLOG(INFO) << "MethodNotSupportedOnWin10AndLater"; + RTC_DLOG(LS_INFO) << "MethodNotSupportedOnWin10AndLater"; } // Use global GetVersion() and use it in a way a user would typically use it @@ -39,7 +39,7 @@ TEST(WindowsVersion, GetVersionGlobalScopeAccessor) { TEST(WindowsVersion, ProcessorModelName) { std::string name = OSInfo::GetInstance()->processor_model_name(); EXPECT_FALSE(name.empty()); - RTC_DLOG(INFO) << "processor_model_name: " << name; + RTC_DLOG(LS_INFO) << "processor_model_name: " << name; } } // namespace diff --git a/rtc_tools/converter/yuv_to_ivf_converter.cc b/rtc_tools/converter/yuv_to_ivf_converter.cc index e4a1e125f8..d00457dec7 100644 --- a/rtc_tools/converter/yuv_to_ivf_converter.cc +++ b/rtc_tools/converter/yuv_to_ivf_converter.cc @@ -81,8 +81,8 @@ class IvfFileWriterEncodedCallback : public EncodedImageCallback { received_frames_count_++; RTC_CHECK_LE(received_frames_count_, expected_frames_count_); if (received_frames_count_ % kFrameLogInterval == 0) { - RTC_LOG(INFO) << received_frames_count_ << " out of " - << expected_frames_count_ << " frames written"; + RTC_LOG(LS_INFO) << received_frames_count_ << " out of " + << expected_frames_count_ << " frames written"; } next_frame_written_.Set(); return Result(Result::Error::OK); @@ -231,11 +231,11 @@ void WriteVideoFile(std::string input_file_name, encoder.WaitNextFrameWritten(kMaxFrameEncodeWaitTimeoutMs); if ((i + 1) % kFrameLogInterval == 0) { - RTC_LOG(INFO) << i + 1 << " out of " << frames_count - << " frames are sent for encoding"; + RTC_LOG(LS_INFO) << i + 1 << " out of " << frames_count + << " frames are sent for encoding"; } } - RTC_LOG(INFO) << "All " << frames_count << " frame are sent for encoding"; + RTC_LOG(LS_INFO) << "All " << frames_count << " frame are sent for encoding"; } } // namespace diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.cc b/sdk/android/native_api/audio_device_module/audio_device_android.cc index 8a57e4af91..9a4236165a 100644 --- a/sdk/android/native_api/audio_device_module/audio_device_android.cc +++ b/sdk/android/native_api/audio_device_module/audio_device_android.cc @@ -57,7 +57,7 @@ void GetDefaultAudioParameters(JNIEnv* env, rtc::scoped_refptr CreateAAudioAudioDeviceModule( JNIEnv* env, jobject application_context) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Get default audio input/output parameters. AudioParameters input_parameters; AudioParameters output_parameters; @@ -76,7 +76,7 @@ rtc::scoped_refptr CreateAAudioAudioDeviceModule( rtc::scoped_refptr CreateJavaAudioDeviceModule( JNIEnv* env, jobject application_context) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Get default audio input/output parameters. const JavaParamRef j_context(application_context); const ScopedJavaLocalRef j_audio_manager = @@ -104,7 +104,7 @@ rtc::scoped_refptr CreateJavaAudioDeviceModule( rtc::scoped_refptr CreateOpenSLESAudioDeviceModule( JNIEnv* env, jobject application_context) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Get default audio input/output parameters. AudioParameters input_parameters; AudioParameters output_parameters; @@ -127,7 +127,7 @@ rtc::scoped_refptr CreateOpenSLESAudioDeviceModule( rtc::scoped_refptr CreateJavaInputAndOpenSLESOutputAudioDeviceModule(JNIEnv* env, jobject application_context) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Get default audio input/output parameters. const JavaParamRef j_context(application_context); const ScopedJavaLocalRef j_audio_manager = diff --git a/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc b/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc index 4cd62bc6e0..fd988a567b 100644 --- a/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc +++ b/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc @@ -77,10 +77,10 @@ rtc::scoped_refptr CreateTestPCF( TEST(PeerConnectionFactoryTest, NativeToJavaPeerConnectionFactory) { JNIEnv* jni = AttachCurrentThreadIfNeeded(); - RTC_LOG(INFO) << "Initializing java peer connection factory."; + RTC_LOG(LS_INFO) << "Initializing java peer connection factory."; jni::Java_PeerConnectionFactoryInitializationHelper_initializeFactoryForTests( jni); - RTC_LOG(INFO) << "Java peer connection factory initialized."; + RTC_LOG(LS_INFO) << "Java peer connection factory initialized."; auto socket_server = std::make_unique(); @@ -105,7 +105,7 @@ TEST(PeerConnectionFactoryTest, NativeToJavaPeerConnectionFactory) { jni, factory, std::move(socket_server), std::move(network_thread), std::move(worker_thread), std::move(signaling_thread)); - RTC_LOG(INFO) << java_factory; + RTC_LOG(LS_INFO) << java_factory; EXPECT_NE(java_factory, nullptr); } diff --git a/sdk/android/src/jni/audio_device/aaudio_player.cc b/sdk/android/src/jni/audio_device/aaudio_player.cc index da68b839af..29bcfae214 100644 --- a/sdk/android/src/jni/audio_device/aaudio_player.cc +++ b/sdk/android/src/jni/audio_device/aaudio_player.cc @@ -28,19 +28,19 @@ enum AudioDeviceMessageType : uint32_t { AAudioPlayer::AAudioPlayer(const AudioParameters& audio_parameters) : main_thread_(rtc::Thread::Current()), aaudio_(audio_parameters, AAUDIO_DIRECTION_OUTPUT, this) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; thread_checker_aaudio_.Detach(); } AAudioPlayer::~AAudioPlayer() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK_RUN_ON(&main_thread_checker_); Terminate(); - RTC_LOG(INFO) << "#detected underruns: " << underrun_count_; + RTC_LOG(LS_INFO) << "#detected underruns: " << underrun_count_; } int AAudioPlayer::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK_RUN_ON(&main_thread_checker_); if (aaudio_.audio_parameters().channels() == 2) { RTC_DLOG(LS_WARNING) << "Stereo mode is enabled"; @@ -49,14 +49,14 @@ int AAudioPlayer::Init() { } int AAudioPlayer::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK_RUN_ON(&main_thread_checker_); StopPlayout(); return 0; } int AAudioPlayer::InitPlayout() { - RTC_LOG(INFO) << "InitPlayout"; + RTC_LOG(LS_INFO) << "InitPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!initialized_); RTC_DCHECK(!playing_); @@ -73,7 +73,7 @@ bool AAudioPlayer::PlayoutIsInitialized() const { } int AAudioPlayer::StartPlayout() { - RTC_LOG(INFO) << "StartPlayout"; + RTC_LOG(LS_INFO) << "StartPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!playing_); if (!initialized_) { @@ -94,7 +94,7 @@ int AAudioPlayer::StartPlayout() { } int AAudioPlayer::StopPlayout() { - RTC_LOG(INFO) << "StopPlayout"; + RTC_LOG(LS_INFO) << "StopPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); if (!initialized_ || !playing_) { return 0; @@ -115,7 +115,7 @@ bool AAudioPlayer::Playing() const { } void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_DLOG(INFO) << "AttachAudioBuffer"; + RTC_DLOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK_RUN_ON(&main_thread_checker_); audio_device_buffer_ = audioBuffer; const AudioParameters audio_parameters = aaudio_.audio_parameters(); @@ -173,9 +173,9 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, // Log device id in first data callback to ensure that a valid device is // utilized. if (first_data_callback_) { - RTC_LOG(INFO) << "--- First output data callback: " - "device id=" - << aaudio_.device_id(); + RTC_LOG(LS_INFO) << "--- First output data callback: " + "device id=" + << aaudio_.device_id(); first_data_callback_ = false; } @@ -195,8 +195,8 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, latency_millis_ = aaudio_.EstimateLatencyMillis(); // TODO(henrika): use for development only. if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) { - RTC_DLOG(INFO) << "output latency: " << latency_millis_ - << ", num_frames: " << num_frames; + RTC_DLOG(LS_INFO) << "output latency: " << latency_millis_ + << ", num_frames: " << num_frames; } // Read audio data from the WebRTC source using the FineAudioBuffer object @@ -231,7 +231,7 @@ void AAudioPlayer::OnMessage(rtc::Message* msg) { void AAudioPlayer::HandleStreamDisconnected() { RTC_DCHECK_RUN_ON(&main_thread_checker_); - RTC_DLOG(INFO) << "HandleStreamDisconnected"; + RTC_DLOG(LS_INFO) << "HandleStreamDisconnected"; if (!initialized_ || !playing_) { return; } diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.cc b/sdk/android/src/jni/audio_device/aaudio_recorder.cc index 8a4c35313a..8ab097d7a4 100644 --- a/sdk/android/src/jni/audio_device/aaudio_recorder.cc +++ b/sdk/android/src/jni/audio_device/aaudio_recorder.cc @@ -29,19 +29,19 @@ enum AudioDeviceMessageType : uint32_t { AAudioRecorder::AAudioRecorder(const AudioParameters& audio_parameters) : main_thread_(rtc::Thread::Current()), aaudio_(audio_parameters, AAUDIO_DIRECTION_INPUT, this) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; thread_checker_aaudio_.Detach(); } AAudioRecorder::~AAudioRecorder() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); - RTC_LOG(INFO) << "detected owerflows: " << overflow_count_; + RTC_LOG(LS_INFO) << "detected owerflows: " << overflow_count_; } int AAudioRecorder::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); if (aaudio_.audio_parameters().channels() == 2) { RTC_DLOG(LS_WARNING) << "Stereo mode is enabled"; @@ -50,14 +50,14 @@ int AAudioRecorder::Init() { } int AAudioRecorder::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopRecording(); return 0; } int AAudioRecorder::InitRecording() { - RTC_LOG(INFO) << "InitRecording"; + RTC_LOG(LS_INFO) << "InitRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK(!recording_); @@ -73,7 +73,7 @@ bool AAudioRecorder::RecordingIsInitialized() const { } int AAudioRecorder::StartRecording() { - RTC_LOG(INFO) << "StartRecording"; + RTC_LOG(LS_INFO) << "StartRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(initialized_); RTC_DCHECK(!recording_); @@ -90,7 +90,7 @@ int AAudioRecorder::StartRecording() { } int AAudioRecorder::StopRecording() { - RTC_LOG(INFO) << "StopRecording"; + RTC_LOG(LS_INFO) << "StopRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !recording_) { return 0; @@ -109,7 +109,7 @@ bool AAudioRecorder::Recording() const { } void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const AudioParameters audio_parameters = aaudio_.audio_parameters(); @@ -131,13 +131,13 @@ bool AAudioRecorder::IsNoiseSuppressorSupported() const { } int AAudioRecorder::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAEC: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInAEC: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } int AAudioRecorder::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInNS: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInNS: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } @@ -165,14 +165,14 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( int32_t num_frames) { // TODO(henrika): figure out why we sometimes hit this one. // RTC_DCHECK(thread_checker_aaudio_.IsCurrent()); - // RTC_LOG(INFO) << "OnDataCallback: " << num_frames; + // RTC_LOG(LS_INFO) << "OnDataCallback: " << num_frames; // Drain the input buffer at first callback to ensure that it does not // contain any old data. Will also ensure that the lowest possible latency // is obtained. if (first_data_callback_) { - RTC_LOG(INFO) << "--- First input data callback: " - "device id=" - << aaudio_.device_id(); + RTC_LOG(LS_INFO) << "--- First input data callback: " + "device id=" + << aaudio_.device_id(); aaudio_.ClearInputStream(audio_data, num_frames); first_data_callback_ = false; } @@ -188,8 +188,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( latency_millis_ = aaudio_.EstimateLatencyMillis(); // TODO(henrika): use for development only. if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) { - RTC_DLOG(INFO) << "input latency: " << latency_millis_ - << ", num_frames: " << num_frames; + RTC_DLOG(LS_INFO) << "input latency: " << latency_millis_ + << ", num_frames: " << num_frames; } // Copy recorded audio in `audio_data` to the WebRTC sink using the // FineAudioBuffer object. @@ -215,7 +215,7 @@ void AAudioRecorder::OnMessage(rtc::Message* msg) { void AAudioRecorder::HandleStreamDisconnected() { RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_LOG(INFO) << "HandleStreamDisconnected"; + RTC_LOG(LS_INFO) << "HandleStreamDisconnected"; if (!initialized_ || !recording_) { return; } diff --git a/sdk/android/src/jni/audio_device/aaudio_wrapper.cc b/sdk/android/src/jni/audio_device/aaudio_wrapper.cc index 8ce720fec3..8fc8e78c6c 100644 --- a/sdk/android/src/jni/audio_device/aaudio_wrapper.cc +++ b/sdk/android/src/jni/audio_device/aaudio_wrapper.cc @@ -137,20 +137,20 @@ AAudioWrapper::AAudioWrapper(const AudioParameters& audio_parameters, : audio_parameters_(audio_parameters), direction_(direction), observer_(observer) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(observer_); aaudio_thread_checker_.Detach(); - RTC_LOG(INFO) << audio_parameters_.ToString(); + RTC_LOG(LS_INFO) << audio_parameters_.ToString(); } AAudioWrapper::~AAudioWrapper() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!stream_); } bool AAudioWrapper::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); // Creates a stream builder which can be used to open an audio stream. ScopedStreamBuilder builder; @@ -174,7 +174,7 @@ bool AAudioWrapper::Init() { } bool AAudioWrapper::Start() { - RTC_LOG(INFO) << "Start"; + RTC_LOG(LS_INFO) << "Start"; RTC_DCHECK(thread_checker_.IsCurrent()); // TODO(henrika): this state check might not be needed. aaudio_stream_state_t current_state = AAudioStream_getState(stream_); @@ -190,7 +190,7 @@ bool AAudioWrapper::Start() { } bool AAudioWrapper::Stop() { - RTC_LOG(INFO) << "Stop: " << DirectionToString(direction()); + RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction()); RTC_DCHECK(thread_checker_.IsCurrent()); // Asynchronous request for the stream to stop. RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false); @@ -240,7 +240,7 @@ double AAudioWrapper::EstimateLatencyMillis() const { // Returns new buffer size or a negative error value if buffer size could not // be increased. bool AAudioWrapper::IncreaseOutputBufferSize() { - RTC_LOG(INFO) << "IncreaseBufferSize"; + RTC_LOG(LS_INFO) << "IncreaseBufferSize"; RTC_DCHECK(stream_); RTC_DCHECK(aaudio_thread_checker_.IsCurrent()); RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT); @@ -255,20 +255,20 @@ bool AAudioWrapper::IncreaseOutputBufferSize() { << ") is higher than max: " << max_buffer_size; return false; } - RTC_LOG(INFO) << "Updating buffer size to: " << buffer_size - << " (max=" << max_buffer_size << ")"; + RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size + << " (max=" << max_buffer_size << ")"; buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size); if (buffer_size < 0) { RTC_LOG(LS_ERROR) << "Failed to change buffer size: " << AAudio_convertResultToText(buffer_size); return false; } - RTC_LOG(INFO) << "Buffer size changed to: " << buffer_size; + RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size; return true; } void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) { - RTC_LOG(INFO) << "ClearInputStream"; + RTC_LOG(LS_INFO) << "ClearInputStream"; RTC_DCHECK(stream_); RTC_DCHECK(aaudio_thread_checker_.IsCurrent()); RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT); @@ -357,7 +357,7 @@ int64_t AAudioWrapper::frames_read() const { } void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) { - RTC_LOG(INFO) << "SetStreamConfiguration"; + RTC_LOG(LS_INFO) << "SetStreamConfiguration"; RTC_DCHECK(builder); RTC_DCHECK(thread_checker_.IsCurrent()); // Request usage of default primary output/input device. @@ -390,7 +390,7 @@ void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) { } bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) { - RTC_LOG(INFO) << "OpenStream"; + RTC_LOG(LS_INFO) << "OpenStream"; RTC_DCHECK(builder); AAudioStream* stream = nullptr; RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false); @@ -400,7 +400,7 @@ bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) { } void AAudioWrapper::CloseStream() { - RTC_LOG(INFO) << "CloseStream"; + RTC_LOG(LS_INFO) << "CloseStream"; RTC_DCHECK(stream_); LOG_ON_ERROR(AAudioStream_close(stream_)); stream_ = nullptr; @@ -419,16 +419,16 @@ void AAudioWrapper::LogStreamConfiguration() { ss << ", direction=" << DirectionToString(direction()); ss << ", device id=" << AAudioStream_getDeviceId(stream_); ss << ", frames per callback=" << frames_per_callback(); - RTC_LOG(INFO) << ss.str(); + RTC_LOG(LS_INFO) << ss.str(); } void AAudioWrapper::LogStreamState() { - RTC_LOG(INFO) << "AAudio stream state: " - << AAudio_convertStreamStateToText(stream_state()); + RTC_LOG(LS_INFO) << "AAudio stream state: " + << AAudio_convertStreamStateToText(stream_state()); } bool AAudioWrapper::VerifyStreamConfiguration() { - RTC_LOG(INFO) << "VerifyStreamConfiguration"; + RTC_LOG(LS_INFO) << "VerifyStreamConfiguration"; RTC_DCHECK(stream_); // TODO(henrika): should we verify device ID as well? if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) { @@ -466,16 +466,16 @@ bool AAudioWrapper::VerifyStreamConfiguration() { } bool AAudioWrapper::OptimizeBuffers() { - RTC_LOG(INFO) << "OptimizeBuffers"; + RTC_LOG(LS_INFO) << "OptimizeBuffers"; RTC_DCHECK(stream_); // Maximum number of frames that can be filled without blocking. - RTC_LOG(INFO) << "max buffer capacity in frames: " - << buffer_capacity_in_frames(); + RTC_LOG(LS_INFO) << "max buffer capacity in frames: " + << buffer_capacity_in_frames(); // Query the number of frames that the application should read or write at // one time for optimal performance. int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_); - RTC_LOG(INFO) << "frames per burst for optimal performance: " - << frames_per_burst; + RTC_LOG(LS_INFO) << "frames per burst for optimal performance: " + << frames_per_burst; frames_per_burst_ = frames_per_burst; if (direction() == AAUDIO_DIRECTION_INPUT) { // There is no point in calling setBufferSizeInFrames() for input streams @@ -492,7 +492,7 @@ bool AAudioWrapper::OptimizeBuffers() { return false; } // Maximum number of frames that can be filled without blocking. - RTC_LOG(INFO) << "buffer burst size in frames: " << buffer_size; + RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size; return true; } diff --git a/sdk/android/src/jni/audio_device/audio_device_module.cc b/sdk/android/src/jni/audio_device/audio_device_module.cc index 4c9c36b7ac..21c644fd51 100644 --- a/sdk/android/src/jni/audio_device/audio_device_module.cc +++ b/sdk/android/src/jni/audio_device/audio_device_module.cc @@ -70,26 +70,26 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { initialized_(false) { RTC_CHECK(input_); RTC_CHECK(output_); - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; thread_checker_.Detach(); } - ~AndroidAudioDeviceModule() override { RTC_DLOG(INFO) << __FUNCTION__; } + ~AndroidAudioDeviceModule() override { RTC_DLOG(LS_INFO) << __FUNCTION__; } int32_t ActiveAudioLayer( AudioDeviceModule::AudioLayer* audioLayer) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *audioLayer = audio_layer_; return 0; } int32_t RegisterAudioCallback(AudioTransport* audioCallback) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_device_buffer_->RegisterAudioCallback(audioCallback); } int32_t Init() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = std::make_unique(task_queue_factory_.get()); @@ -118,7 +118,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t Terminate() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return 0; RTC_DCHECK(thread_checker_.IsCurrent()); @@ -132,19 +132,19 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } bool Initialized() const override { - RTC_DLOG(INFO) << __FUNCTION__ << ":" << initialized_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ":" << initialized_; return initialized_; } int16_t PlayoutDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; - RTC_LOG(INFO) << "output: " << 1; + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << "output: " << 1; return 1; } int16_t RecordingDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; - RTC_DLOG(INFO) << "output: " << 1; + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << "output: " << 1; return 1; } @@ -163,7 +163,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { int32_t SetPlayoutDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; return 0; } @@ -175,7 +175,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { int32_t SetRecordingDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; return 0; } @@ -185,66 +185,66 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t PlayoutIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *available = true; - RTC_DLOG(INFO) << "output: " << *available; + RTC_DLOG(LS_INFO) << "output: " << *available; return 0; } int32_t InitPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; if (PlayoutIsInitialized()) { return 0; } int32_t result = output_->InitPlayout(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", static_cast(result == 0)); return result; } bool PlayoutIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_->PlayoutIsInitialized(); } int32_t RecordingIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *available = true; - RTC_DLOG(INFO) << "output: " << *available; + RTC_DLOG(LS_INFO) << "output: " << *available; return 0; } int32_t InitRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; if (RecordingIsInitialized()) { return 0; } int32_t result = input_->InitRecording(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", static_cast(result == 0)); return result; } bool RecordingIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return input_->RecordingIsInitialized(); } int32_t StartPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; if (Playing()) { return 0; } int32_t result = output_->StartPlayout(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", static_cast(result == 0)); if (result == 0) { @@ -256,34 +256,34 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StopPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; if (!Playing()) return 0; - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; audio_device_buffer_->StopPlayout(); int32_t result = output_->StopPlayout(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", static_cast(result == 0)); return result; } bool Playing() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_->Playing(); } int32_t StartRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; if (Recording()) { return 0; } int32_t result = input_->StartRecording(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", static_cast(result == 0)); if (result == 0) { @@ -295,74 +295,74 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StopRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; if (!Recording()) return 0; audio_device_buffer_->StopRecording(); int32_t result = input_->StopRecording(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", static_cast(result == 0)); return result; } bool Recording() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return input_->Recording(); } int32_t InitSpeaker() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return initialized_ ? 0 : -1; } bool SpeakerIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return initialized_; } int32_t InitMicrophone() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return initialized_ ? 0 : -1; } bool MicrophoneIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return initialized_; } int32_t SpeakerVolumeIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; *available = output_->SpeakerVolumeIsAvailable(); - RTC_DLOG(INFO) << "output: " << *available; + RTC_DLOG(LS_INFO) << "output: " << *available; return 0; } int32_t SetSpeakerVolume(uint32_t volume) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; return output_->SetSpeakerVolume(volume); } int32_t SpeakerVolume(uint32_t* output_volume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; absl::optional volume = output_->SpeakerVolume(); if (!volume) return -1; *output_volume = *volume; - RTC_DLOG(INFO) << "output: " << *volume; + RTC_DLOG(LS_INFO) << "output: " << *volume; return 0; } int32_t MaxSpeakerVolume(uint32_t* output_max_volume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; absl::optional max_volume = output_->MaxSpeakerVolume(); @@ -373,7 +373,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t MinSpeakerVolume(uint32_t* output_min_volume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return -1; absl::optional min_volume = output_->MinSpeakerVolume(); @@ -384,71 +384,71 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t MicrophoneVolumeIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *available = false; - RTC_DLOG(INFO) << "output: " << *available; + RTC_DLOG(LS_INFO) << "output: " << *available; return -1; } int32_t SetMicrophoneVolume(uint32_t volume) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; RTC_CHECK_NOTREACHED(); } int32_t MicrophoneVolume(uint32_t* volume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK_NOTREACHED(); } int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK_NOTREACHED(); } int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK_NOTREACHED(); } int32_t SpeakerMuteIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK_NOTREACHED(); } int32_t SetSpeakerMute(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK_NOTREACHED(); } int32_t SpeakerMute(bool* enabled) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK_NOTREACHED(); } int32_t MicrophoneMuteIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK_NOTREACHED(); } int32_t SetMicrophoneMute(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK_NOTREACHED(); } int32_t MicrophoneMute(bool* enabled) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK_NOTREACHED(); } int32_t StereoPlayoutIsAvailable(bool* available) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *available = is_stereo_playout_supported_; - RTC_DLOG(INFO) << "output: " << *available; + RTC_DLOG(LS_INFO) << "output: " << *available; return 0; } int32_t SetStereoPlayout(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; // Android does not support changes between mono and stero on the fly. The // use of stereo or mono is determined by the audio layer. It is allowed // to call this method if that same state is not modified. @@ -461,21 +461,21 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StereoPlayout(bool* enabled) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *enabled = is_stereo_playout_supported_; - RTC_DLOG(INFO) << "output: " << *enabled; + RTC_DLOG(LS_INFO) << "output: " << *enabled; return 0; } int32_t StereoRecordingIsAvailable(bool* available) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *available = is_stereo_record_supported_; - RTC_DLOG(INFO) << "output: " << *available; + RTC_DLOG(LS_INFO) << "output: " << *available; return 0; } int32_t SetStereoRecording(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; // Android does not support changes between mono and stero on the fly. The // use of stereo or mono is determined by the audio layer. It is allowed // to call this method if that same state is not modified. @@ -488,9 +488,9 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StereoRecording(bool* enabled) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; *enabled = is_stereo_record_supported_; - RTC_DLOG(INFO) << "output: " << *enabled; + RTC_DLOG(LS_INFO) << "output: " << *enabled; return 0; } @@ -514,18 +514,18 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { // a "Not Implemented" log will be filed. This non-perfect state will remain // until I have added full support for audio effects based on OpenSL ES APIs. bool BuiltInAECIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return false; bool isAvailable = input_->IsAcousticEchoCancelerSupported(); - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } // Not implemented for any input device on Android. bool BuiltInAGCIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; - RTC_DLOG(INFO) << "output: " << false; + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << "output: " << false; return false; } @@ -534,38 +534,38 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { // TODO(henrika): add implementation for OpenSL ES based audio as well. // In addition, see comments for BuiltInAECIsAvailable(). bool BuiltInNSIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return false; bool isAvailable = input_->IsNoiseSuppressorSupported(); - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInAEC(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; if (!initialized_) return -1; RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available"; int32_t result = input_->EnableBuiltInAEC(enable); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; return result; } int32_t EnableBuiltInAGC(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK_NOTREACHED(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInNS(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; if (!initialized_) return -1; RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available"; int32_t result = input_->EnableBuiltInNS(enable); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; return result; } @@ -576,7 +576,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t AttachAudioBuffer() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; output_->AttachAudioBuffer(audio_device_buffer_.get()); input_->AttachAudioBuffer(audio_device_buffer_.get()); return 0; @@ -640,7 +640,7 @@ rtc::scoped_refptr CreateAudioDeviceModuleFromInputAndOutput( uint16_t playout_delay_ms, std::unique_ptr audio_input, std::unique_ptr audio_output) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return rtc::make_ref_counted( audio_layer, is_stereo_playout_supported, is_stereo_record_supported, playout_delay_ms, std::move(audio_input), std::move(audio_output)); diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.cc b/sdk/android/src/jni/audio_device/audio_record_jni.cc index 2739522be7..170c81af48 100644 --- a/sdk/android/src/jni/audio_device/audio_record_jni.cc +++ b/sdk/android/src/jni/audio_device/audio_record_jni.cc @@ -38,7 +38,7 @@ class ScopedHistogramTimer { ~ScopedHistogramTimer() { const int64_t life_time_ms = rtc::TimeSince(start_time_ms_); RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms); - RTC_LOG(INFO) << histogram_name_ << ": " << life_time_ms; + RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms; } private: @@ -68,7 +68,7 @@ AudioRecordJni::AudioRecordJni(JNIEnv* env, initialized_(false), recording_(false), audio_device_buffer_(nullptr) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(audio_parameters_.is_valid()); Java_WebRtcAudioRecord_setNativeAudioRecord(env, j_audio_record_, jni::jlongFromPointer(this)); @@ -79,20 +79,20 @@ AudioRecordJni::AudioRecordJni(JNIEnv* env, } AudioRecordJni::~AudioRecordJni() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); } int32_t AudioRecordJni::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; env_ = AttachCurrentThreadIfNeeded(); RTC_DCHECK(thread_checker_.IsCurrent()); return 0; } int32_t AudioRecordJni::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopRecording(); thread_checker_.Detach(); @@ -100,7 +100,7 @@ int32_t AudioRecordJni::Terminate() { } int32_t AudioRecordJni::InitRecording() { - RTC_LOG(INFO) << "InitRecording"; + RTC_LOG(LS_INFO) << "InitRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (initialized_) { // Already initialized. @@ -118,7 +118,7 @@ int32_t AudioRecordJni::InitRecording() { return -1; } frames_per_buffer_ = static_cast(frames_per_buffer); - RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_; + RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_; const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t); RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_, frames_per_buffer_ * bytes_per_frame); @@ -132,7 +132,7 @@ bool AudioRecordJni::RecordingIsInitialized() const { } int32_t AudioRecordJni::StartRecording() { - RTC_LOG(INFO) << "StartRecording"; + RTC_LOG(LS_INFO) << "StartRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (recording_) { // Already recording. @@ -153,7 +153,7 @@ int32_t AudioRecordJni::StartRecording() { } int32_t AudioRecordJni::StopRecording() { - RTC_LOG(INFO) << "StopRecording"; + RTC_LOG(LS_INFO) << "StopRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !recording_) { return 0; @@ -166,8 +166,9 @@ int32_t AudioRecordJni::StopRecording() { env_, j_audio_record_); RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.SourceMatchesRecordingSession", session_was_ok); - RTC_LOG(INFO) << "HISTOGRAM(WebRTC.Audio.SourceMatchesRecordingSession): " - << session_was_ok; + RTC_LOG(LS_INFO) + << "HISTOGRAM(WebRTC.Audio.SourceMatchesRecordingSession): " + << session_was_ok; } if (!Java_WebRtcAudioRecord_stopRecording(env_, j_audio_record_)) { RTC_LOG(LS_ERROR) << "StopRecording failed"; @@ -188,14 +189,14 @@ bool AudioRecordJni::Recording() const { } void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const int sample_rate_hz = audio_parameters_.sample_rate(); - RTC_LOG(INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")"; + RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")"; audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz); const size_t channels = audio_parameters_.channels(); - RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")"; audio_device_buffer_->SetRecordingChannels(channels); } @@ -212,7 +213,7 @@ bool AudioRecordJni::IsNoiseSuppressorSupported() const { } int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAEC(" << enable << ")"; + RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return Java_WebRtcAudioRecord_enableBuiltInAEC(env_, j_audio_record_, enable) ? 0 @@ -220,7 +221,7 @@ int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) { } int32_t AudioRecordJni::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInNS(" << enable << ")"; + RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return Java_WebRtcAudioRecord_enableBuiltInNS(env_, j_audio_record_, enable) ? 0 @@ -231,12 +232,12 @@ void AudioRecordJni::CacheDirectBufferAddress( JNIEnv* env, const JavaParamRef& j_caller, const JavaParamRef& byte_buffer) { - RTC_LOG(INFO) << "OnCacheDirectBufferAddress"; + RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!direct_buffer_address_); direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer.obj()); jlong capacity = env->GetDirectBufferCapacity(byte_buffer.obj()); - RTC_LOG(INFO) << "direct buffer capacity: " << capacity; + RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity; direct_buffer_capacity_in_bytes_ = static_cast(capacity); } @@ -257,7 +258,7 @@ void AudioRecordJni::DataIsRecorded(JNIEnv* env, // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter. audio_device_buffer_->SetVQEData(total_delay_ms_, 0); if (audio_device_buffer_->DeliverRecordedData() == -1) { - RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; } } diff --git a/sdk/android/src/jni/audio_device/audio_track_jni.cc b/sdk/android/src/jni/audio_device/audio_track_jni.cc index 85adee2861..f2f22f915b 100644 --- a/sdk/android/src/jni/audio_device/audio_track_jni.cc +++ b/sdk/android/src/jni/audio_device/audio_track_jni.cc @@ -44,7 +44,7 @@ AudioTrackJni::AudioTrackJni(JNIEnv* env, initialized_(false), playing_(false), audio_device_buffer_(nullptr) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(audio_parameters_.is_valid()); Java_WebRtcAudioTrack_setNativeAudioTrack(env, j_audio_track_, jni::jlongFromPointer(this)); @@ -55,20 +55,20 @@ AudioTrackJni::AudioTrackJni(JNIEnv* env, } AudioTrackJni::~AudioTrackJni() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); } int32_t AudioTrackJni::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; env_ = AttachCurrentThreadIfNeeded(); RTC_DCHECK(thread_checker_.IsCurrent()); return 0; } int32_t AudioTrackJni::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopPlayout(); thread_checker_.Detach(); @@ -76,7 +76,7 @@ int32_t AudioTrackJni::Terminate() { } int32_t AudioTrackJni::InitPlayout() { - RTC_LOG(INFO) << "InitPlayout"; + RTC_LOG(LS_INFO) << "InitPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); if (initialized_) { // Already initialized. @@ -126,7 +126,7 @@ bool AudioTrackJni::PlayoutIsInitialized() const { } int32_t AudioTrackJni::StartPlayout() { - RTC_LOG(INFO) << "StartPlayout"; + RTC_LOG(LS_INFO) << "StartPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); if (playing_) { // Already playing. @@ -146,7 +146,7 @@ int32_t AudioTrackJni::StartPlayout() { } int32_t AudioTrackJni::StopPlayout() { - RTC_LOG(INFO) << "StopPlayout"; + RTC_LOG(LS_INFO) << "StopPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !playing_) { return 0; @@ -185,7 +185,7 @@ bool AudioTrackJni::SpeakerVolumeIsAvailable() { } int AudioTrackJni::SetSpeakerVolume(uint32_t volume) { - RTC_LOG(INFO) << "SetSpeakerVolume(" << volume << ")"; + RTC_LOG(LS_INFO) << "SetSpeakerVolume(" << volume << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return Java_WebRtcAudioTrack_setStreamVolume(env_, j_audio_track_, static_cast(volume)) @@ -207,7 +207,7 @@ absl::optional AudioTrackJni::SpeakerVolume() const { RTC_DCHECK(thread_checker_.IsCurrent()); const uint32_t volume = Java_WebRtcAudioTrack_getStreamVolume(env_, j_audio_track_); - RTC_LOG(INFO) << "SpeakerVolume: " << volume; + RTC_LOG(LS_INFO) << "SpeakerVolume: " << volume; return volume; } @@ -217,30 +217,30 @@ int AudioTrackJni::GetPlayoutUnderrunCount() { // TODO(henrika): possibly add stereo support. void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const int sample_rate_hz = audio_parameters_.sample_rate(); - RTC_LOG(INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")"; audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz); const size_t channels = audio_parameters_.channels(); - RTC_LOG(INFO) << "SetPlayoutChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")"; audio_device_buffer_->SetPlayoutChannels(channels); } void AudioTrackJni::CacheDirectBufferAddress( JNIEnv* env, const JavaParamRef& byte_buffer) { - RTC_LOG(INFO) << "OnCacheDirectBufferAddress"; + RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!direct_buffer_address_); direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer.obj()); jlong capacity = env->GetDirectBufferCapacity(byte_buffer.obj()); - RTC_LOG(INFO) << "direct buffer capacity: " << capacity; + RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity; direct_buffer_capacity_in_bytes_ = static_cast(capacity); const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t); frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame; - RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_; + RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_; } // This method is called on a high-priority thread from Java. The name of diff --git a/sdk/android/src/jni/audio_device/opensles_common.cc b/sdk/android/src/jni/audio_device/opensles_common.cc index 0f35b2712a..abc415d894 100644 --- a/sdk/android/src/jni/audio_device/opensles_common.cc +++ b/sdk/android/src/jni/audio_device/opensles_common.cc @@ -107,7 +107,7 @@ OpenSLEngineManager::OpenSLEngineManager() { } SLObjectItf OpenSLEngineManager::GetOpenSLEngine() { - RTC_LOG(INFO) << "GetOpenSLEngine"; + RTC_LOG(LS_INFO) << "GetOpenSLEngine"; RTC_DCHECK(thread_checker_.IsCurrent()); // OpenSL ES for Android only supports a single engine per application. // If one already has been created, return existing object instead of diff --git a/sdk/objc/native/api/audio_device_module.mm b/sdk/objc/native/api/audio_device_module.mm index dd95775204..3c2790e38d 100644 --- a/sdk/objc/native/api/audio_device_module.mm +++ b/sdk/objc/native/api/audio_device_module.mm @@ -18,12 +18,11 @@ namespace webrtc { rtc::scoped_refptr CreateAudioDeviceModule(bool bypass_voice_processing) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; #if defined(WEBRTC_IOS) return new rtc::RefCountedObject(bypass_voice_processing); #else - RTC_LOG(LERROR) - << "current platform is not supported => this module will self destruct!"; + RTC_LOG(LS_ERROR) << "current platform is not supported => this module will self destruct!"; return nullptr; #endif } diff --git a/sdk/objc/native/src/audio/audio_device_module_ios.mm b/sdk/objc/native/src/audio/audio_device_module_ios.mm index 859442dc9e..db4ed21f9a 100644 --- a/sdk/objc/native/src/audio/audio_device_module_ios.mm +++ b/sdk/objc/native/src/audio/audio_device_module_ios.mm @@ -43,22 +43,22 @@ namespace ios_adm { AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) : bypass_voice_processing_(bypass_voice_processing), task_queue_factory_(CreateDefaultTaskQueueFactory()) { - RTC_LOG(INFO) << "current platform is IOS"; - RTC_LOG(INFO) << "iPhone Audio APIs will be utilized."; + RTC_LOG(LS_INFO) << "current platform is IOS"; + RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized."; } int32_t AudioDeviceModuleIOS::AttachAudioBuffer() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; audio_device_->AttachAudioBuffer(audio_device_buffer_.get()); return 0; } AudioDeviceModuleIOS::~AudioDeviceModuleIOS() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; } int32_t AudioDeviceModuleIOS::ActiveAudioLayer(AudioLayer* audioLayer) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; AudioLayer activeAudio; if (audio_device_->ActiveAudioLayer(activeAudio) == -1) { return -1; @@ -68,7 +68,7 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } int32_t AudioDeviceModuleIOS::Init() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (initialized_) return 0; @@ -91,7 +91,7 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } int32_t AudioDeviceModuleIOS::Terminate() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) return 0; if (audio_device_->Terminate() == -1) { @@ -102,65 +102,65 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } bool AudioDeviceModuleIOS::Initialized() const { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << initialized_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; return initialized_; } int32_t AudioDeviceModuleIOS::InitSpeaker() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitSpeaker(); } int32_t AudioDeviceModuleIOS::InitMicrophone() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitMicrophone(); } int32_t AudioDeviceModuleIOS::SpeakerVolumeIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetSpeakerVolume(uint32_t volume) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerVolume(volume); } int32_t AudioDeviceModuleIOS::SpeakerVolume(uint32_t* volume) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->SpeakerVolume(level) == -1) { return -1; } *volume = level; - RTC_DLOG(INFO) << "output: " << *volume; + RTC_DLOG(LS_INFO) << "output: " << *volume; return 0; } bool AudioDeviceModuleIOS::SpeakerIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->SpeakerIsInitialized(); - RTC_DLOG(INFO) << "output: " << isInitialized; + RTC_DLOG(LS_INFO) << "output: " << isInitialized; return isInitialized; } bool AudioDeviceModuleIOS::MicrophoneIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->MicrophoneIsInitialized(); - RTC_DLOG(INFO) << "output: " << isInitialized; + RTC_DLOG(LS_INFO) << "output: " << isInitialized; return isInitialized; } @@ -185,110 +185,110 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } int32_t AudioDeviceModuleIOS::SpeakerMuteIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetSpeakerMute(bool enable) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerMute(enable); } int32_t AudioDeviceModuleIOS::SpeakerMute(bool* enabled) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->SpeakerMute(muted) == -1) { return -1; } *enabled = muted; - RTC_DLOG(INFO) << "output: " << muted; + RTC_DLOG(LS_INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleIOS::MicrophoneMuteIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetMicrophoneMute(bool enable) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneMute(enable)); } int32_t AudioDeviceModuleIOS::MicrophoneMute(bool* enabled) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->MicrophoneMute(muted) == -1) { return -1; } *enabled = muted; - RTC_DLOG(INFO) << "output: " << muted; + RTC_DLOG(LS_INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleIOS::MicrophoneVolumeIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetMicrophoneVolume(uint32_t volume) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneVolume(volume)); } int32_t AudioDeviceModuleIOS::MicrophoneVolume(uint32_t* volume) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->MicrophoneVolume(level) == -1) { return -1; } *volume = level; - RTC_DLOG(INFO) << "output: " << *volume; + RTC_DLOG(LS_INFO) << "output: " << *volume; return 0; } int32_t AudioDeviceModuleIOS::StereoRecordingIsAvailable( bool* available) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetStereoRecording(bool enable) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (enable) { RTC_LOG(WARNING) << "recording in stereo is not supported"; @@ -297,31 +297,31 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } int32_t AudioDeviceModuleIOS::StereoRecording(bool* enabled) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoRecording(stereo) == -1) { return -1; } *enabled = stereo; - RTC_DLOG(INFO) << "output: " << stereo; + RTC_DLOG(LS_INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleIOS::StereoPlayoutIsAvailable(bool* available) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetStereoPlayout(bool enable) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (audio_device_->PlayoutIsInitialized()) { RTC_LOG(LERROR) @@ -341,38 +341,38 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } int32_t AudioDeviceModuleIOS::StereoPlayout(bool* enabled) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoPlayout(stereo) == -1) { return -1; } *enabled = stereo; - RTC_DLOG(INFO) << "output: " << stereo; + RTC_DLOG(LS_INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleIOS::PlayoutIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::RecordingIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->RecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return 0; } @@ -397,21 +397,21 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } int16_t AudioDeviceModuleIOS::PlayoutDevices() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nPlayoutDevices = audio_device_->PlayoutDevices(); - RTC_DLOG(INFO) << "output: " << nPlayoutDevices; + RTC_DLOG(LS_INFO) << "output: " << nPlayoutDevices; return (int16_t)(nPlayoutDevices); } int32_t AudioDeviceModuleIOS::SetPlayoutDevice(uint16_t index) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(index); } int32_t AudioDeviceModuleIOS::SetPlayoutDevice(WindowsDeviceType device) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(device); } @@ -420,7 +420,7 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -429,10 +429,10 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) return -1; } if (name != NULL) { - RTC_DLOG(INFO) << "output: name = " << name; + RTC_DLOG(LS_INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_DLOG(INFO) << "output: guid = " << guid; + RTC_DLOG(LS_INFO) << "output: guid = " << guid; } return 0; } @@ -441,7 +441,7 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -450,137 +450,137 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) return -1; } if (name != NULL) { - RTC_DLOG(INFO) << "output: name = " << name; + RTC_DLOG(LS_INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_DLOG(INFO) << "output: guid = " << guid; + RTC_DLOG(LS_INFO) << "output: guid = " << guid; } return 0; } int16_t AudioDeviceModuleIOS::RecordingDevices() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nRecordingDevices = audio_device_->RecordingDevices(); - RTC_DLOG(INFO) << "output: " << nRecordingDevices; + RTC_DLOG(LS_INFO) << "output: " << nRecordingDevices; return (int16_t)nRecordingDevices; } int32_t AudioDeviceModuleIOS::SetRecordingDevice(uint16_t index) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetRecordingDevice(index); } int32_t AudioDeviceModuleIOS::SetRecordingDevice(WindowsDeviceType device) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetRecordingDevice(device); } int32_t AudioDeviceModuleIOS::InitPlayout() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (PlayoutIsInitialized()) { return 0; } int32_t result = audio_device_->InitPlayout(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleIOS::InitRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (RecordingIsInitialized()) { return 0; } int32_t result = audio_device_->InitRecording(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleIOS::PlayoutIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->PlayoutIsInitialized(); } bool AudioDeviceModuleIOS::RecordingIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->RecordingIsInitialized(); } int32_t AudioDeviceModuleIOS::StartPlayout() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (Playing()) { return 0; } audio_device_buffer_.get()->StartPlayout(); int32_t result = audio_device_->StartPlayout(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleIOS::StopPlayout() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopPlayout(); audio_device_buffer_.get()->StopPlayout(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleIOS::Playing() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Playing(); } int32_t AudioDeviceModuleIOS::StartRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (Recording()) { return 0; } audio_device_buffer_.get()->StartRecording(); int32_t result = audio_device_->StartRecording(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleIOS::StopRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopRecording(); audio_device_buffer_.get()->StopRecording(); - RTC_DLOG(INFO) << "output: " << result; + RTC_DLOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleIOS::Recording() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Recording(); } int32_t AudioDeviceModuleIOS::RegisterAudioCallback( AudioTransport* audioCallback) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_device_buffer_.get()->RegisterAudioCallback(audioCallback); } @@ -596,50 +596,50 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) } bool AudioDeviceModuleIOS::BuiltInAECIsAvailable() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAECIsAvailable(); - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleIOS::EnableBuiltInAEC(bool enable) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAEC(enable); - RTC_DLOG(INFO) << "output: " << ok; + RTC_DLOG(LS_INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleIOS::BuiltInAGCIsAvailable() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAGCIsAvailable(); - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleIOS::EnableBuiltInAGC(bool enable) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAGC(enable); - RTC_DLOG(INFO) << "output: " << ok; + RTC_DLOG(LS_INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleIOS::BuiltInNSIsAvailable() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInNSIsAvailable(); - RTC_DLOG(INFO) << "output: " << isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleIOS::EnableBuiltInNS(bool enable) { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInNS(enable); - RTC_DLOG(INFO) << "output: " << ok; + RTC_DLOG(LS_INFO) << "output: " << ok; return ok; } @@ -653,17 +653,17 @@ AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) #if defined(WEBRTC_IOS) int AudioDeviceModuleIOS::GetPlayoutAudioParameters( AudioParameters* params) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; int r = audio_device_->GetPlayoutAudioParameters(params); - RTC_DLOG(INFO) << "output: " << r; + RTC_DLOG(LS_INFO) << "output: " << r; return r; } int AudioDeviceModuleIOS::GetRecordAudioParameters( AudioParameters* params) const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; int r = audio_device_->GetRecordAudioParameters(params); - RTC_DLOG(INFO) << "output: " << r; + RTC_DLOG(LS_INFO) << "output: " << r; return r; } #endif // WEBRTC_IOS diff --git a/test/network/cross_traffic_unittest.cc b/test/network/cross_traffic_unittest.cc index 2744a90ce3..0c1bb46302 100644 --- a/test/network/cross_traffic_unittest.cc +++ b/test/network/cross_traffic_unittest.cc @@ -87,8 +87,8 @@ TEST(CrossTrafficTest, PulsedPeaksCrossTraffic) { fixture.clock.AdvanceTimeMilliseconds(1); } - RTC_LOG(INFO) << fixture.counter.packets_count_ << " packets; " - << fixture.counter.total_packets_size_ << " bytes"; + RTC_LOG(LS_INFO) << fixture.counter.packets_count_ << " packets; " + << fixture.counter.total_packets_size_ << " bytes"; // Using 50% duty cycle. const auto kExpectedDataSent = kRunTime * config.peak_rate * 0.5; EXPECT_NEAR(fixture.counter.total_packets_size_, kExpectedDataSent.bytes(), @@ -115,8 +115,8 @@ TEST(CrossTrafficTest, RandomWalkCrossTraffic) { fixture.clock.AdvanceTimeMilliseconds(1); } - RTC_LOG(INFO) << fixture.counter.packets_count_ << " packets; " - << fixture.counter.total_packets_size_ << " bytes"; + RTC_LOG(LS_INFO) << fixture.counter.packets_count_ << " packets; " + << fixture.counter.total_packets_size_ << " bytes"; // Sending at peak rate since bias = 1. const auto kExpectedDataSent = kRunTime * config.peak_rate; EXPECT_NEAR(fixture.counter.total_packets_size_, kExpectedDataSent.bytes(), diff --git a/test/network/fake_network_socket_server.cc b/test/network/fake_network_socket_server.cc index 9597edbbbd..28c0473e44 100644 --- a/test/network/fake_network_socket_server.cc +++ b/test/network/fake_network_socket_server.cc @@ -132,7 +132,7 @@ int FakeNetworkSocket::Bind(const rtc::SocketAddress& addr) { endpoint_ = socket_server_->GetEndpointNode(local_addr_.ipaddr()); if (!endpoint_) { local_addr_.Clear(); - RTC_LOG(INFO) << "No endpoint for address: " << ToString(addr); + RTC_LOG(LS_INFO) << "No endpoint for address: " << ToString(addr); error_ = EADDRNOTAVAIL; return 2; } @@ -140,7 +140,7 @@ int FakeNetworkSocket::Bind(const rtc::SocketAddress& addr) { endpoint_->BindReceiver(local_addr_.port(), this); if (!port) { local_addr_.Clear(); - RTC_LOG(INFO) << "Cannot bind to in-use address: " << ToString(addr); + RTC_LOG(LS_INFO) << "Cannot bind to in-use address: " << ToString(addr); error_ = EADDRINUSE; return 1; } diff --git a/test/network/network_emulation.cc b/test/network/network_emulation.cc index ada9ab542a..4cd2fda1d0 100644 --- a/test/network/network_emulation.cc +++ b/test/network/network_emulation.cc @@ -477,8 +477,8 @@ EmulatedEndpointImpl::EmulatedEndpointImpl(const Options& options, network_->AddIP(options_.ip); enabled_state_checker_.Detach(); - RTC_LOG(INFO) << "Created emulated endpoint " << options_.log_name - << "; id=" << options_.id; + RTC_LOG(LS_INFO) << "Created emulated endpoint " << options_.log_name + << "; id=" << options_.id; } EmulatedEndpointImpl::~EmulatedEndpointImpl() = default; @@ -546,13 +546,13 @@ absl::optional EmulatedEndpointImpl::BindReceiverInternal( bool result = port_to_receiver_.insert({port, {receiver, is_one_shot}}).second; if (!result) { - RTC_LOG(INFO) << "Can't bind receiver to used port " << desired_port - << " in endpoint " << options_.log_name - << "; id=" << options_.id; + RTC_LOG(LS_INFO) << "Can't bind receiver to used port " << desired_port + << " in endpoint " << options_.log_name + << "; id=" << options_.id; return absl::nullopt; } - RTC_LOG(INFO) << "New receiver is binded to endpoint " << options_.log_name - << "; id=" << options_.id << " on port " << port; + RTC_LOG(LS_INFO) << "New receiver is binded to endpoint " << options_.log_name + << "; id=" << options_.id << " on port " << port; return port; } @@ -568,8 +568,9 @@ uint16_t EmulatedEndpointImpl::NextPort() { void EmulatedEndpointImpl::UnbindReceiver(uint16_t port) { MutexLock lock(&receiver_lock_); - RTC_LOG(INFO) << "Receiver is removed on port " << port << " from endpoint " - << options_.log_name << "; id=" << options_.id; + RTC_LOG(LS_INFO) << "Receiver is removed on port " << port + << " from endpoint " << options_.log_name + << "; id=" << options_.id; port_to_receiver_.erase(port); } @@ -579,15 +580,15 @@ void EmulatedEndpointImpl::BindDefaultReceiver( RTC_CHECK(!default_receiver_.has_value()) << "Endpoint " << options_.log_name << "; id=" << options_.id << " already has default receiver"; - RTC_LOG(INFO) << "Default receiver is binded to endpoint " - << options_.log_name << "; id=" << options_.id; + RTC_LOG(LS_INFO) << "Default receiver is binded to endpoint " + << options_.log_name << "; id=" << options_.id; default_receiver_ = receiver; } void EmulatedEndpointImpl::UnbindDefaultReceiver() { MutexLock lock(&receiver_lock_); - RTC_LOG(INFO) << "Default receiver is removed from endpoint " - << options_.log_name << "; id=" << options_.id; + RTC_LOG(LS_INFO) << "Default receiver is removed from endpoint " + << options_.log_name << "; id=" << options_.id; default_receiver_ = absl::nullopt; } @@ -616,9 +617,9 @@ void EmulatedEndpointImpl::OnPacketReceived(EmulatedIpPacket packet) { // It can happen, that remote peer closed connection, but there still some // packets, that are going to it. It can happen during peer connection close // process: one peer closed connection, second still sending data. - RTC_LOG(INFO) << "Drop packet: no receiver registered in " - << options_.log_name << "; id=" << options_.id << " on port " - << packet.to.port(); + RTC_LOG(LS_INFO) << "Drop packet: no receiver registered in " + << options_.log_name << "; id=" << options_.id + << " on port " << packet.to.port(); stats_builder_.OnPacketDropped(packet.from.ipaddr(), DataSize::Bytes(packet.ip_packet_size()), options_.stats_gathering_mode); diff --git a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc index e7b7f3f4c7..9af65e24be 100644 --- a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc +++ b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc @@ -39,13 +39,13 @@ constexpr absl::string_view kSkipRenderedFrameReasonDropped = "considered dropped"; void LogFrameCounters(const std::string& name, const FrameCounters& counters) { - RTC_LOG(INFO) << "[" << name << "] Captured : " << counters.captured; - RTC_LOG(INFO) << "[" << name << "] Pre encoded : " << counters.pre_encoded; - RTC_LOG(INFO) << "[" << name << "] Encoded : " << counters.encoded; - RTC_LOG(INFO) << "[" << name << "] Received : " << counters.received; - RTC_LOG(INFO) << "[" << name << "] Decoded : " << counters.decoded; - RTC_LOG(INFO) << "[" << name << "] Rendered : " << counters.rendered; - RTC_LOG(INFO) << "[" << name << "] Dropped : " << counters.dropped; + RTC_LOG(LS_INFO) << "[" << name << "] Captured : " << counters.captured; + RTC_LOG(LS_INFO) << "[" << name << "] Pre encoded : " << counters.pre_encoded; + RTC_LOG(LS_INFO) << "[" << name << "] Encoded : " << counters.encoded; + RTC_LOG(LS_INFO) << "[" << name << "] Received : " << counters.received; + RTC_LOG(LS_INFO) << "[" << name << "] Decoded : " << counters.decoded; + RTC_LOG(LS_INFO) << "[" << name << "] Rendered : " << counters.rendered; + RTC_LOG(LS_INFO) << "[" << name << "] Dropped : " << counters.dropped; } absl::string_view ToString(FrameDropPhase phase) { @@ -67,8 +67,8 @@ void LogStreamInternalStats(const std::string& name, const StreamStats& stats, Timestamp start_time) { for (const auto& entry : stats.dropped_by_phase) { - RTC_LOG(INFO) << "[" << name << "] Dropped at " << ToString(entry.first) - << ": " << entry.second; + RTC_LOG(LS_INFO) << "[" << name << "] Dropped at " << ToString(entry.first) + << ": " << entry.second; } Timestamp first_encoded_frame_time = Timestamp::PlusInfinity(); for (const StreamCodecInfo& encoder : stats.encoders) { @@ -77,32 +77,32 @@ void LogStreamInternalStats(const std::string& name, if (first_encoded_frame_time.IsInfinite()) { first_encoded_frame_time = encoder.switched_on_at; } - RTC_LOG(INFO) << "[" << name << "] Used encoder: \"" << encoder.codec_name - << "\" used from (frame_id=" << encoder.first_frame_id - << "; from_stream_start=" - << (encoder.switched_on_at - stats.stream_started_time).ms() - << "ms, from_call_start=" - << (encoder.switched_on_at - start_time).ms() - << "ms) until (frame_id=" << encoder.last_frame_id - << "; from_stream_start=" - << (encoder.switched_from_at - stats.stream_started_time).ms() - << "ms, from_call_start=" - << (encoder.switched_from_at - start_time).ms() << "ms)"; + RTC_LOG(LS_INFO) + << "[" << name << "] Used encoder: \"" << encoder.codec_name + << "\" used from (frame_id=" << encoder.first_frame_id + << "; from_stream_start=" + << (encoder.switched_on_at - stats.stream_started_time).ms() + << "ms, from_call_start=" << (encoder.switched_on_at - start_time).ms() + << "ms) until (frame_id=" << encoder.last_frame_id + << "; from_stream_start=" + << (encoder.switched_from_at - stats.stream_started_time).ms() + << "ms, from_call_start=" + << (encoder.switched_from_at - start_time).ms() << "ms)"; } for (const StreamCodecInfo& decoder : stats.decoders) { RTC_DCHECK(decoder.switched_on_at.IsFinite()); RTC_DCHECK(decoder.switched_from_at.IsFinite()); - RTC_LOG(INFO) << "[" << name << "] Used decoder: \"" << decoder.codec_name - << "\" used from (frame_id=" << decoder.first_frame_id - << "; from_stream_start=" - << (decoder.switched_on_at - stats.stream_started_time).ms() - << "ms, from_call_start=" - << (decoder.switched_on_at - start_time).ms() - << "ms) until (frame_id=" << decoder.last_frame_id - << "; from_stream_start=" - << (decoder.switched_from_at - stats.stream_started_time).ms() - << "ms, from_call_start=" - << (decoder.switched_from_at - start_time).ms() << "ms)"; + RTC_LOG(LS_INFO) + << "[" << name << "] Used decoder: \"" << decoder.codec_name + << "\" used from (frame_id=" << decoder.first_frame_id + << "; from_stream_start=" + << (decoder.switched_on_at - stats.stream_started_time).ms() + << "ms, from_call_start=" << (decoder.switched_on_at - start_time).ms() + << "ms) until (frame_id=" << decoder.last_frame_id + << "; from_stream_start=" + << (decoder.switched_from_at - stats.stream_started_time).ms() + << "ms, from_call_start=" + << (decoder.switched_from_at - start_time).ms() << "ms)"; } } @@ -668,8 +668,8 @@ std::set DefaultVideoQualityAnalyzer::GetKnownVideoStreams() const { MutexLock lock(&mutex_); std::set out; for (auto& item : frames_comparator_.stream_stats()) { - RTC_LOG(INFO) << item.first.ToString() << " ==> " - << ToStatsKey(item.first).ToString(); + RTC_LOG(LS_INFO) << item.first.ToString() << " ==> " + << ToStatsKey(item.first).ToString(); out.insert(ToStatsKey(item.first)); } return out; @@ -722,17 +722,19 @@ void DefaultVideoQualityAnalyzer::ReportResults() { start_time_); } if (!analyzer_stats_.comparisons_queue_size.IsEmpty()) { - RTC_LOG(INFO) << "comparisons_queue_size min=" - << analyzer_stats_.comparisons_queue_size.GetMin() - << "; max=" << analyzer_stats_.comparisons_queue_size.GetMax() - << "; 99%=" - << analyzer_stats_.comparisons_queue_size.GetPercentile(0.99); + RTC_LOG(LS_INFO) << "comparisons_queue_size min=" + << analyzer_stats_.comparisons_queue_size.GetMin() + << "; max=" + << analyzer_stats_.comparisons_queue_size.GetMax() + << "; 99%=" + << analyzer_stats_.comparisons_queue_size.GetPercentile( + 0.99); } - RTC_LOG(INFO) << "comparisons_done=" << analyzer_stats_.comparisons_done; - RTC_LOG(INFO) << "cpu_overloaded_comparisons_done=" - << analyzer_stats_.cpu_overloaded_comparisons_done; - RTC_LOG(INFO) << "memory_overloaded_comparisons_done=" - << analyzer_stats_.memory_overloaded_comparisons_done; + RTC_LOG(LS_INFO) << "comparisons_done=" << analyzer_stats_.comparisons_done; + RTC_LOG(LS_INFO) << "cpu_overloaded_comparisons_done=" + << analyzer_stats_.cpu_overloaded_comparisons_done; + RTC_LOG(LS_INFO) << "memory_overloaded_comparisons_done=" + << analyzer_stats_.memory_overloaded_comparisons_done; } void DefaultVideoQualityAnalyzer::ReportResults( diff --git a/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc b/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc index 4808cf40ff..669fcfe087 100644 --- a/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc +++ b/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc @@ -66,7 +66,7 @@ void ExampleVideoQualityAnalyzer::OnFrameEncoded( void ExampleVideoQualityAnalyzer::OnFrameDropped( absl::string_view peer_name, webrtc::EncodedImageCallback::DropReason reason) { - RTC_LOG(INFO) << "Frame dropped by encoder"; + RTC_LOG(LS_INFO) << "Frame dropped by encoder"; MutexLock lock(&lock_); ++frames_dropped_; } @@ -112,8 +112,8 @@ void ExampleVideoQualityAnalyzer::OnDecoderError(absl::string_view peer_name, void ExampleVideoQualityAnalyzer::Stop() { MutexLock lock(&lock_); - RTC_LOG(INFO) << "There are " << frames_in_flight_.size() - << " frames in flight, assuming all of them are dropped"; + RTC_LOG(LS_INFO) << "There are " << frames_in_flight_.size() + << " frames in flight, assuming all of them are dropped"; frames_dropped_ += frames_in_flight_.size(); } diff --git a/test/pc/e2e/echo/echo_emulation.cc b/test/pc/e2e/echo/echo_emulation.cc index 230e8e3eca..f2b4be9e0d 100644 --- a/test/pc/e2e/echo/echo_emulation.cc +++ b/test/pc/e2e/echo/echo_emulation.cc @@ -57,7 +57,7 @@ void EchoEmulatingCapturer::OnAudioRendered( } queue_input_.assign(data.begin(), data.end()); if (!renderer_queue_.Insert(&queue_input_)) { - RTC_LOG(WARNING) << "Echo queue is full"; + RTC_LOG(LS_WARNING) << "Echo queue is full"; } } diff --git a/test/pc/e2e/media/media_helper.cc b/test/pc/e2e/media/media_helper.cc index 6b1996adaa..c90b2e52bf 100644 --- a/test/pc/e2e/media/media_helper.cc +++ b/test/pc/e2e/media/media_helper.cc @@ -67,8 +67,8 @@ MediaHelper::MaybeAddVideo(TestPeer* peer) { rtc::make_ref_counted( std::move(capturer), is_screencast); out.push_back(source); - RTC_LOG(INFO) << "Adding video with video_config.stream_label=" - << video_config.stream_label.value(); + RTC_LOG(LS_INFO) << "Adding video with video_config.stream_label=" + << video_config.stream_label.value(); rtc::scoped_refptr track = peer->pc_factory()->CreateVideoTrack(video_config.stream_label.value(), source); diff --git a/test/pc/e2e/peer_connection_quality_test.cc b/test/pc/e2e/peer_connection_quality_test.cc index f9a129448f..4e9368fec9 100644 --- a/test/pc/e2e/peer_connection_quality_test.cc +++ b/test/pc/e2e/peer_connection_quality_test.cc @@ -202,17 +202,18 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { test::ScopedFieldTrials field_trials(GetFieldTrials(run_params)); // Print test summary - RTC_LOG(INFO) << "Media quality test: " << *alice_configurer->params()->name - << " will make a call to " << *bob_configurer->params()->name - << " with media video=" - << !alice_configurer->params()->video_configs.empty() - << "; audio=" - << alice_configurer->params()->audio_config.has_value() << ". " - << *bob_configurer->params()->name - << " will respond with media video=" - << !bob_configurer->params()->video_configs.empty() - << "; audio=" - << bob_configurer->params()->audio_config.has_value(); + RTC_LOG(LS_INFO) << "Media quality test: " + << *alice_configurer->params()->name + << " will make a call to " << *bob_configurer->params()->name + << " with media video=" + << !alice_configurer->params()->video_configs.empty() + << "; audio=" + << alice_configurer->params()->audio_config.has_value() + << ". " << *bob_configurer->params()->name + << " will respond with media video=" + << !bob_configurer->params()->video_configs.empty() + << "; audio=" + << bob_configurer->params()->audio_config.has_value(); const std::unique_ptr signaling_thread = time_controller_.CreateThread(kSignalThreadName); @@ -276,7 +277,7 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { } video_analyzer_threads = std::min(video_analyzer_threads, kMaxVideoAnalyzerThreads); - RTC_LOG(INFO) << "video_analyzer_threads=" << video_analyzer_threads; + RTC_LOG(LS_INFO) << "video_analyzer_threads=" << video_analyzer_threads; quality_metrics_reporters_.push_back( std::make_unique( time_controller_.GetClock())); @@ -315,8 +316,8 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { return kAliveMessageLogInterval; }); - RTC_LOG(INFO) << "Configuration is done. Now " << *alice_->params()->name - << " is calling to " << *bob_->params()->name << "..."; + RTC_LOG(LS_INFO) << "Configuration is done. Now " << *alice_->params()->name + << " is calling to " << *bob_->params()->name << "..."; // Setup stats poller. std::vector observers = { @@ -359,7 +360,7 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { time_controller_.AdvanceTime(run_params.run_duration); } - RTC_LOG(INFO) << "Test is done, initiating disconnect sequence."; + RTC_LOG(LS_INFO) << "Test is done, initiating disconnect sequence."; // Stop all client started tasks to prevent their access to any call related // objects after these objects will be destroyed during call tear down. @@ -382,7 +383,7 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { [this] { TearDownCallOnSignalingThread(); }); Timestamp end_time = Now(); - RTC_LOG(INFO) << "All peers are disconnected."; + RTC_LOG(LS_INFO) << "All peers are disconnected."; { MutexLock lock(&lock_); real_test_duration_ = end_time - start_time; @@ -619,13 +620,13 @@ void PeerConnectionE2EQualityTest::ExchangeOfferAnswer( auto offer = alice_->CreateOffer(); RTC_CHECK(offer); offer->ToString(&log_output); - RTC_LOG(INFO) << "Original offer: " << log_output; + RTC_LOG(LS_INFO) << "Original offer: " << log_output; LocalAndRemoteSdp patch_result = signaling_interceptor->PatchOffer( std::move(offer), alice_->params()->video_codecs[0]); patch_result.local_sdp->ToString(&log_output); - RTC_LOG(INFO) << "Offer to set as local description: " << log_output; + RTC_LOG(LS_INFO) << "Offer to set as local description: " << log_output; patch_result.remote_sdp->ToString(&log_output); - RTC_LOG(INFO) << "Offer to set as remote description: " << log_output; + RTC_LOG(LS_INFO) << "Offer to set as remote description: " << log_output; bool set_local_offer = alice_->SetLocalDescription(std::move(patch_result.local_sdp)); @@ -636,13 +637,13 @@ void PeerConnectionE2EQualityTest::ExchangeOfferAnswer( auto answer = bob_->CreateAnswer(); RTC_CHECK(answer); answer->ToString(&log_output); - RTC_LOG(INFO) << "Original answer: " << log_output; + RTC_LOG(LS_INFO) << "Original answer: " << log_output; patch_result = signaling_interceptor->PatchAnswer( std::move(answer), bob_->params()->video_codecs[0]); patch_result.local_sdp->ToString(&log_output); - RTC_LOG(INFO) << "Answer to set as local description: " << log_output; + RTC_LOG(LS_INFO) << "Answer to set as local description: " << log_output; patch_result.remote_sdp->ToString(&log_output); - RTC_LOG(INFO) << "Answer to set as remote description: " << log_output; + RTC_LOG(LS_INFO) << "Answer to set as remote description: " << log_output; bool set_local_answer = bob_->SetLocalDescription(std::move(patch_result.local_sdp)); @@ -661,9 +662,9 @@ void PeerConnectionE2EQualityTest::ExchangeIceCandidates( for (auto& candidate : alice_candidates) { std::string candidate_str; RTC_CHECK(candidate->ToString(&candidate_str)); - RTC_LOG(INFO) << *alice_->params()->name - << " ICE candidate(mid= " << candidate->sdp_mid() - << "): " << candidate_str; + RTC_LOG(LS_INFO) << *alice_->params()->name + << " ICE candidate(mid= " << candidate->sdp_mid() + << "): " << candidate_str; } ASSERT_TRUE(bob_->AddIceCandidates(std::move(alice_candidates))); std::vector> bob_candidates = @@ -672,9 +673,9 @@ void PeerConnectionE2EQualityTest::ExchangeIceCandidates( for (auto& candidate : bob_candidates) { std::string candidate_str; RTC_CHECK(candidate->ToString(&candidate_str)); - RTC_LOG(INFO) << *bob_->params()->name - << " ICE candidate(mid= " << candidate->sdp_mid() - << "): " << candidate_str; + RTC_LOG(LS_INFO) << *bob_->params()->name + << " ICE candidate(mid= " << candidate->sdp_mid() + << "): " << candidate_str; } ASSERT_TRUE(alice_->AddIceCandidates(std::move(bob_candidates))); } diff --git a/test/pc/e2e/stats_based_network_quality_metrics_reporter.cc b/test/pc/e2e/stats_based_network_quality_metrics_reporter.cc index 1ba31a3e32..94a46fbae5 100644 --- a/test/pc/e2e/stats_based_network_quality_metrics_reporter.cc +++ b/test/pc/e2e/stats_based_network_quality_metrics_reporter.cc @@ -378,7 +378,7 @@ void StatsBasedNetworkQualityMetricsReporter::LogNetworkLayerStats( } } - RTC_LOG(INFO) << log.str(); + RTC_LOG(LS_INFO) << log.str(); } } // namespace webrtc_pc_e2e diff --git a/video/video_quality_test.cc b/video/video_quality_test.cc index b77a4759a2..ac565cc2de 100644 --- a/video/video_quality_test.cc +++ b/video/video_quality_test.cc @@ -1342,7 +1342,7 @@ void VideoQualityTest::RunWithAnalyzer(const Params& params) { rtc::scoped_refptr VideoQualityTest::CreateAudioDevice() { #ifdef WEBRTC_WIN - RTC_LOG(INFO) << "Using latest version of ADM on Windows"; + RTC_LOG(LS_INFO) << "Using latest version of ADM on Windows"; // We must initialize the COM library on a thread before we calling any of // the library functions. All COM functions in the ADM will return // CO_E_NOTINITIALIZED otherwise. The legacy ADM for Windows used internal @@ -1433,7 +1433,7 @@ void VideoQualityTest::SetupAudio(Transport* transport) { } void VideoQualityTest::RunWithRenderers(const Params& params) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; num_video_streams_ = params.call.dual_video ? 2 : 1; std::unique_ptr send_transport; std::unique_ptr recv_transport; diff --git a/video/video_source_sink_controller.cc b/video/video_source_sink_controller.cc index 6955e3b1e7..810a4ff1f5 100644 --- a/video/video_source_sink_controller.cc +++ b/video/video_source_sink_controller.cc @@ -80,7 +80,8 @@ void VideoSourceSinkController::PushSourceSinkSettings() { if (!source_) return; rtc::VideoSinkWants wants = CurrentSettingsToSinkWants(); - RTC_LOG(INFO) << "Pushing SourceSink restrictions: " << WantsToString(wants); + RTC_LOG(LS_INFO) << "Pushing SourceSink restrictions: " + << WantsToString(wants); source_->AddOrUpdateSink(sink_, wants); } diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc index 1916a59bd5..7e0e7fc63e 100644 --- a/video/video_stream_encoder.cc +++ b/video/video_stream_encoder.cc @@ -2173,9 +2173,9 @@ void VideoStreamEncoder::OnVideoSourceRestrictionsUpdated( rtc::scoped_refptr reason, const VideoSourceRestrictions& unfiltered_restrictions) { RTC_DCHECK_RUN_ON(&encoder_queue_); - RTC_LOG(INFO) << "Updating sink restrictions from " - << (reason ? reason->Name() : std::string("")) << " to " - << restrictions.ToString(); + RTC_LOG(LS_INFO) << "Updating sink restrictions from " + << (reason ? reason->Name() : std::string("")) + << " to " << restrictions.ToString(); worker_queue_->PostTask(ToQueuedTask( task_safety_, [this, restrictions = std::move(restrictions)]() { RTC_DCHECK_RUN_ON(worker_queue_); diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc index ca1a527b3d..b41b4b0d1d 100644 --- a/video/video_stream_encoder_unittest.cc +++ b/video/video_stream_encoder_unittest.cc @@ -510,9 +510,9 @@ class AdaptingFrameForwarder : public test::FrameForwarder { int out_width = 0; int out_height = 0; if (adaption_enabled()) { - RTC_DLOG(INFO) << "IncomingCapturedFrame: AdaptFrameResolution()" - << "w=" << video_frame.width() - << "h=" << video_frame.height(); + RTC_DLOG(LS_INFO) << "IncomingCapturedFrame: AdaptFrameResolution()" + << "w=" << video_frame.width() + << "h=" << video_frame.height(); if (adapter_.AdaptFrameResolution( video_frame.width(), video_frame.height(), video_frame.timestamp_us() * 1000, &cropped_width, @@ -540,7 +540,7 @@ class AdaptingFrameForwarder : public test::FrameForwarder { last_height_ = absl::nullopt; } } else { - RTC_DLOG(INFO) << "IncomingCapturedFrame: adaptation not enabled"; + RTC_DLOG(LS_INFO) << "IncomingCapturedFrame: adaptation not enabled"; test::FrameForwarder::IncomingCapturedFrame(video_frame); last_width_.emplace(video_frame.width()); last_height_.emplace(video_frame.height()); @@ -1352,7 +1352,7 @@ class VideoStreamEncoderTest : public ::testing::Test { } log << "]"; } - RTC_DLOG(INFO) << "OnVideoLayersAllocationUpdated " << log.str(); + RTC_DLOG(LS_INFO) << "OnVideoLayersAllocationUpdated " << log.str(); } TimeController* const time_controller_;