From 64ce699f4bcfa61c9de97031553baca6257196e6 Mon Sep 17 00:00:00 2001 From: Tony Herre Date: Fri, 27 Jan 2023 13:25:32 +0100 Subject: [PATCH] Propagate Video CSRCs modified by an insertable streams frame transform MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow CSRCs to be modified per-frame in an Encoded Insertable Streams transform, to support a web API which allows per-frame CSRC modifications to signal when a JS application has changed the source of the video which is written into an encoded frame. Initially only for Video, with Audio support likely to follow later. Bug: webrtc:14709 Change-Id: Ib34f35faa9cee56216b30eaae42d7e65c78bb9f2 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/291324 Reviewed-by: Harald Alvestrand Reviewed-by: Tove Petersson Reviewed-by: Henrik Boström Commit-Queue: Tony Herre Cr-Commit-Position: refs/heads/main@{#39214} --- api/video/video_frame_metadata.cc | 8 ++ api/video/video_frame_metadata.h | 6 ++ .../frame_transformer_factory_unittest.cc | 10 ++ modules/rtp_rtcp/source/nack_rtx_unittest.cc | 4 +- .../source/rtp_rtcp_impl2_unittest.cc | 2 +- .../rtp_rtcp/source/rtp_rtcp_impl_unittest.cc | 2 +- modules/rtp_rtcp/source/rtp_sender.cc | 6 ++ modules/rtp_rtcp/source/rtp_sender.h | 1 + .../rtp_rtcp/source/rtp_sender_unittest.cc | 4 +- modules/rtp_rtcp/source/rtp_sender_video.cc | 9 +- modules/rtp_rtcp/source/rtp_sender_video.h | 3 +- ...sender_video_frame_transformer_delegate.cc | 28 +++-- ..._sender_video_frame_transformer_delegate.h | 3 + .../source/rtp_sender_video_unittest.cc | 100 +++++++++--------- 14 files changed, 117 insertions(+), 69 deletions(-) diff --git a/api/video/video_frame_metadata.cc b/api/video/video_frame_metadata.cc index e1863e9c13..6efb6ad875 100644 --- a/api/video/video_frame_metadata.cc +++ b/api/video/video_frame_metadata.cc @@ -136,4 +136,12 @@ void VideoFrameMetadata::SetRTPVideoHeaderCodecSpecifics( codec_specifics_ = std::move(codec_specifics); } +std::vector VideoFrameMetadata::GetCsrcs() const { + return csrcs_; +} + +void VideoFrameMetadata::SetCsrcs(std::vector csrcs) { + csrcs_ = std::move(csrcs); +} + } // namespace webrtc diff --git a/api/video/video_frame_metadata.h b/api/video/video_frame_metadata.h index 2703f11324..f8f144e9b9 100644 --- a/api/video/video_frame_metadata.h +++ b/api/video/video_frame_metadata.h @@ -12,6 +12,7 @@ #define API_VIDEO_VIDEO_FRAME_METADATA_H_ #include +#include #include "absl/container/inlined_vector.h" #include "absl/types/optional.h" @@ -88,6 +89,9 @@ class RTC_EXPORT VideoFrameMetadata { void SetRTPVideoHeaderCodecSpecifics( RTPVideoHeaderCodecSpecifics codec_specifics); + std::vector GetCsrcs() const; + void SetCsrcs(std::vector csrcs); + private: VideoFrameType frame_type_ = VideoFrameType::kEmptyFrame; int16_t width_ = 0; @@ -106,6 +110,8 @@ class RTC_EXPORT VideoFrameMetadata { uint8_t simulcast_idx_ = 0; VideoCodecType codec_ = VideoCodecType::kVideoCodecGeneric; RTPVideoHeaderCodecSpecifics codec_specifics_; + + std::vector csrcs_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc b/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc index 65a0e4cbb4..1a78c6deaa 100644 --- a/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc +++ b/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc @@ -29,6 +29,7 @@ namespace { using testing::NiceMock; using testing::Return; +using testing::ReturnRef; class MockTransformableVideoFrame : public webrtc::TransformableVideoFrameInterface { @@ -60,9 +61,18 @@ TEST(FrameTransformerFactory, CloneVideoFrame) { std::fill_n(data, 10, 5); rtc::ArrayView data_view(data); EXPECT_CALL(original_frame, GetData()).WillRepeatedly(Return(data_view)); + webrtc::VideoFrameMetadata metadata; + std::vector csrcs{123, 321}; + // Copy csrcs rather than moving so we can compare in an EXPECT_EQ later. + metadata.SetCsrcs(csrcs); + + EXPECT_CALL(original_frame, GetMetadata()) + .WillRepeatedly(ReturnRef(metadata)); auto cloned_frame = CloneVideoFrame(&original_frame); + EXPECT_EQ(cloned_frame->GetData().size(), 10u); EXPECT_THAT(cloned_frame->GetData(), testing::Each(5u)); + EXPECT_EQ(cloned_frame->GetMetadata().GetCsrcs(), csrcs); } } // namespace diff --git a/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/modules/rtp_rtcp/source/nack_rtx_unittest.cc index d0617f3804..87c6e661dc 100644 --- a/modules/rtp_rtcp/source/nack_rtx_unittest.cc +++ b/modules/rtp_rtcp/source/nack_rtx_unittest.cc @@ -210,7 +210,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test { video_header.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp, - timestamp / 90, payload_data, video_header, 0)); + timestamp / 90, payload_data, video_header, 0, {})); // Min required delay until retransmit = 5 + RTT ms (RTT = 0). fake_clock.AdvanceTimeMilliseconds(5); int length = BuildNackList(nack_list); @@ -260,7 +260,7 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) { video_header.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp, - timestamp / 90, payload_data, video_header, 0)); + timestamp / 90, payload_data, video_header, 0, {})); // Prepare next frame. timestamp += 3000; fake_clock.AdvanceTimeMilliseconds(33); diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc index 918e075be8..b793ba82a5 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc @@ -358,7 +358,7 @@ class RtpRtcpImpl2Test : public ::testing::Test { success &= sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8, rtp_timestamp, capture_time_ms, payload, - rtp_video_header, 0); + rtp_video_header, 0, {}); return success; } diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc index 96bef23a3a..4c621d70cf 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc @@ -228,7 +228,7 @@ class RtpRtcpImplTest : public ::testing::Test { const uint8_t payload[100] = {0}; EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true)); EXPECT_TRUE(sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8, - 0, 0, payload, rtp_video_header, 0)); + 0, 0, payload, rtp_video_header, 0, {})); } void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) { diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc index ac3bd55e05..9b977face0 100644 --- a/modules/rtp_rtcp/source/rtp_sender.cc +++ b/modules/rtp_rtcp/source/rtp_sender.cc @@ -521,6 +521,7 @@ std::unique_ptr RTPSender::AllocatePacket() const { &rtp_header_extension_map_, max_packet_size_ + kExtraCapacity); packet->SetSsrc(ssrc_); packet->SetCsrcs(csrcs_); + // Reserve extensions, if registered, RtpSender set in SendToNetwork. packet->ReserveExtension(); packet->ReserveExtension(); @@ -582,6 +583,11 @@ void RTPSender::SetMid(absl::string_view mid) { UpdateHeaderSizes(); } +std::vector RTPSender::Csrcs() const { + MutexLock lock(&send_mutex_); + return csrcs_; +} + void RTPSender::SetCsrcs(const std::vector& csrcs) { RTC_DCHECK_LE(csrcs.size(), kRtpCsrcSize); MutexLock lock(&send_mutex_); diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h index 55dee7f219..c45f597951 100644 --- a/modules/rtp_rtcp/source/rtp_sender.h +++ b/modules/rtp_rtcp/source/rtp_sender.h @@ -63,6 +63,7 @@ class RTPSender { uint16_t SequenceNumber() const RTC_LOCKS_EXCLUDED(send_mutex_); void SetSequenceNumber(uint16_t seq) RTC_LOCKS_EXCLUDED(send_mutex_); + std::vector Csrcs() const; void SetCsrcs(const std::vector& csrcs) RTC_LOCKS_EXCLUDED(send_mutex_); diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc index 432c5e72d3..ff6a3725fa 100644 --- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc @@ -1348,7 +1348,7 @@ TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) { EXPECT_TRUE(rtp_sender_video.SendVideo( kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms, - kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs)); + kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs, {})); time_controller_.AdvanceTime(TimeDelta::Millis(33)); } @@ -1364,7 +1364,7 @@ TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) { EXPECT_TRUE(rtp_sender_video.SendVideo( kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms, - kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs)); + kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs, {})); time_controller_.AdvanceTime(TimeDelta::Millis(33)); } diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc index e1ac4e41c3..e1f1697047 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video.cc @@ -171,6 +171,7 @@ RTPSenderVideo::RTPSenderVideo(const Config& config) this, config.frame_transformer, rtp_sender_->SSRC(), + rtp_sender_->Csrcs(), config.task_queue_factory) : nullptr), include_capture_clock_offset_(!absl::StartsWith( @@ -474,7 +475,8 @@ bool RTPSenderVideo::SendVideo( int64_t capture_time_ms, rtc::ArrayView payload, RTPVideoHeader video_header, - absl::optional expected_retransmission_time_ms) { + absl::optional expected_retransmission_time_ms, + std::vector csrcs) { TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms, "Send", "type", FrameTypeToString(video_header.frame_type)); RTC_CHECK_RUNS_SERIALIZED(&send_checker_); @@ -484,6 +486,7 @@ bool RTPSenderVideo::SendVideo( if (payload.empty()) return false; + if (!rtp_sender_->SendingMedia()) { return false; } @@ -530,6 +533,8 @@ bool RTPSenderVideo::SendVideo( capture_time = Timestamp::Millis(capture_time_ms); } + rtp_sender_->SetCsrcs(std::move(csrcs)); + std::unique_ptr single_packet = rtp_sender_->AllocatePacket(); RTC_DCHECK_LE(packet_capacity, single_packet->capacity()); @@ -778,7 +783,7 @@ bool RTPSenderVideo::SendEncodedImage( } return SendVideo(payload_type, codec_type, rtp_timestamp, encoded_image.capture_time_ms_, encoded_image, video_header, - expected_retransmission_time_ms); + expected_retransmission_time_ms, rtp_sender_->Csrcs()); } uint32_t RTPSenderVideo::PacketizationOverheadBps() const { diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h index ecff8d42de..825209f246 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.h +++ b/modules/rtp_rtcp/source/rtp_sender_video.h @@ -98,7 +98,8 @@ class RTPSenderVideo { int64_t capture_time_ms, rtc::ArrayView payload, RTPVideoHeader video_header, - absl::optional expected_retransmission_time_ms); + absl::optional expected_retransmission_time_ms, + std::vector csrcs = {}); bool SendEncodedImage( int payload_type, diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc index 27b6a17a20..ecf8aedf83 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc @@ -31,7 +31,8 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { absl::optional codec_type, uint32_t rtp_timestamp, absl::optional expected_retransmission_time_ms, - uint32_t ssrc) + uint32_t ssrc, + std::vector csrcs) : encoded_data_(encoded_image.GetEncodedData()), header_(video_header), metadata_(header_.GetAsMetadata()), @@ -44,6 +45,7 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { ssrc_(ssrc) { RTC_DCHECK_GE(payload_type_, 0); RTC_DCHECK_LE(payload_type_, 127); + metadata_.SetCsrcs(std::move(csrcs)); } ~TransformableVideoSenderFrame() override = default; @@ -71,9 +73,12 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { const VideoFrameMetadata& GetMetadata() const override { return metadata_; } void SetMetadata(const VideoFrameMetadata& metadata) override { header_.SetFromMetadata(metadata); + std::vector csrcs = metadata.GetCsrcs(); + // We have to keep a local copy because GetMetadata() has to return a // reference. metadata_ = header_.GetAsMetadata(); + metadata_.SetCsrcs(std::move(csrcs)); } const RTPVideoHeader& GetHeader() const { return header_; } @@ -109,10 +114,12 @@ RTPSenderVideoFrameTransformerDelegate::RTPSenderVideoFrameTransformerDelegate( RTPSenderVideo* sender, rtc::scoped_refptr frame_transformer, uint32_t ssrc, + std::vector csrcs, TaskQueueFactory* task_queue_factory) : sender_(sender), frame_transformer_(std::move(frame_transformer)), ssrc_(ssrc), + csrcs_(csrcs), transformation_queue_(task_queue_factory->CreateTaskQueue( "video_frame_transformer", TaskQueueFactory::Priority::NORMAL)) {} @@ -131,7 +138,7 @@ bool RTPSenderVideoFrameTransformerDelegate::TransformFrame( absl::optional expected_retransmission_time_ms) { frame_transformer_->Transform(std::make_unique( encoded_image, video_header, payload_type, codec_type, rtp_timestamp, - expected_retransmission_time_ms, ssrc_)); + expected_retransmission_time_ms, ssrc_, csrcs_)); return true; } @@ -160,13 +167,14 @@ void RTPSenderVideoFrameTransformerDelegate::SendVideo( return; auto* transformed_video_frame = static_cast(transformed_frame.get()); - sender_->SendVideo( - transformed_video_frame->GetPayloadType(), - transformed_video_frame->GetCodecType(), - transformed_video_frame->GetTimestamp(), - transformed_video_frame->GetCaptureTimeMs(), - transformed_video_frame->GetData(), transformed_video_frame->GetHeader(), - transformed_video_frame->GetExpectedRetransmissionTimeMs()); + sender_->SendVideo(transformed_video_frame->GetPayloadType(), + transformed_video_frame->GetCodecType(), + transformed_video_frame->GetTimestamp(), + transformed_video_frame->GetCaptureTimeMs(), + transformed_video_frame->GetData(), + transformed_video_frame->GetHeader(), + transformed_video_frame->GetExpectedRetransmissionTimeMs(), + transformed_video_frame->GetMetadata().GetCsrcs()); } void RTPSenderVideoFrameTransformerDelegate::SetVideoStructureUnderLock( @@ -221,7 +229,7 @@ std::unique_ptr CloneSenderVideoFrame( encoded_image, new_header, original->GetPayloadType(), new_codec_type, original->GetTimestamp(), absl::nullopt, // expected_retransmission_time_ms - original->GetSsrc()); + original->GetSsrc(), original->GetMetadata().GetCsrcs()); } } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h index 55f7961e2d..085f29bf28 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h +++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h @@ -12,6 +12,7 @@ #define MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_FRAME_TRANSFORMER_DELEGATE_H_ #include +#include #include "api/frame_transformer_interface.h" #include "api/scoped_refptr.h" @@ -34,6 +35,7 @@ class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback { RTPSenderVideo* sender, rtc::scoped_refptr frame_transformer, uint32_t ssrc, + std::vector csrcs, TaskQueueFactory* send_transport_queue); void Init(); @@ -80,6 +82,7 @@ class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback { RTPSenderVideo* sender_ RTC_GUARDED_BY(sender_lock_); rtc::scoped_refptr frame_transformer_; const uint32_t ssrc_; + std::vector csrcs_; // Used when the encoded frames arrives without a current task queue. This can // happen if a hardware encoder was used. std::unique_ptr transformation_queue_; diff --git a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc index 72dfd0238d..0cfa24c834 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc @@ -215,7 +215,7 @@ TEST_F(RtpSenderVideoTest, KeyFrameHasCVO) { hdr.rotation = kVideoRotation_0; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoRotation rotation; EXPECT_TRUE( @@ -242,7 +242,7 @@ TEST_F(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) { hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoSendTiming timing; EXPECT_TRUE(transport_.last_sent_packet().GetExtension( &timing)); @@ -261,13 +261,13 @@ TEST_F(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) { hdr.frame_type = VideoFrameType::kVideoFrameKey; EXPECT_TRUE( rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs)); + kDefaultExpectedRetransmissionTimeMs, {})); hdr.rotation = kVideoRotation_0; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - EXPECT_TRUE( - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame, - hdr, kDefaultExpectedRetransmissionTimeMs)); + EXPECT_TRUE(rtp_sender_video_->SendVideo( + kPayload, kType, kTimestamp + 1, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs, {})); VideoRotation rotation; EXPECT_TRUE( @@ -285,12 +285,12 @@ TEST_F(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) { hdr.frame_type = VideoFrameType::kVideoFrameKey; EXPECT_TRUE( rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs)); + kDefaultExpectedRetransmissionTimeMs, {})); hdr.frame_type = VideoFrameType::kVideoFrameDelta; - EXPECT_TRUE( - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame, - hdr, kDefaultExpectedRetransmissionTimeMs)); + EXPECT_TRUE(rtp_sender_video_->SendVideo( + kPayload, kType, kTimestamp + 1, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs, {})); VideoRotation rotation; EXPECT_TRUE( @@ -529,7 +529,7 @@ TEST_F(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key; @@ -555,7 +555,7 @@ TEST_F(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { DecodeTargetIndication::kRequired}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_EQ(transport_.packets_sent(), 2); DependencyDescriptor descriptor_delta; @@ -604,7 +604,7 @@ TEST_F(RtpSenderVideoTest, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key; @@ -620,7 +620,7 @@ TEST_F(RtpSenderVideoTest, DecodeTargetIndication::kRequired}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_EQ(transport_.packets_sent(), 2); EXPECT_FALSE(transport_.last_sent_packet() @@ -649,7 +649,7 @@ TEST_F(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) { generic.chain_diffs = {2}; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key; @@ -684,7 +684,7 @@ TEST_F(RtpSenderVideoTest, generic.chain_diffs = {1}; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key; @@ -722,7 +722,7 @@ TEST_F(RtpSenderVideoTest, hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SetVideoStructure(&video_structure1); rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); // Parse 1st extension. ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key1; @@ -738,7 +738,7 @@ TEST_F(RtpSenderVideoTest, DecodeTargetIndication::kNotPresent}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 2); RtpPacket delta_packet = transport_.last_sent_packet(); @@ -750,7 +750,7 @@ TEST_F(RtpSenderVideoTest, hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SetVideoStructure(&video_structure2); rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); // Parse the 2nd key frame. ASSERT_EQ(transport_.packets_sent(), 3); DependencyDescriptor descriptor_key2; @@ -804,7 +804,7 @@ TEST_F(RtpSenderVideoTest, EXPECT_CALL(*encryptor, Encrypt(_, _, Not(IsEmpty()), ElementsAreArray(kFrame), _, _)); rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); // Double check packet with the dependency descriptor is sent. ASSERT_EQ(transport_.packets_sent(), 1); EXPECT_TRUE(transport_.last_sent_packet() @@ -826,7 +826,7 @@ TEST_F(RtpSenderVideoTest, PopulateGenericFrameDescriptor) { generic.dependencies.push_back(kFrameId - 500); hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); RtpGenericFrameDescriptor descriptor_wire; EXPECT_EQ(1, transport_.packets_sent()); @@ -861,7 +861,7 @@ void RtpSenderVideoTest:: hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, VideoCodecType::kVideoCodecVP8, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); // Expect only minimal 1-byte vp8 descriptor was generated. @@ -898,7 +898,7 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) { RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; EXPECT_TRUE( @@ -908,7 +908,7 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) { // Next key frame also have the allocation. rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE( transport_.last_sent_packet() .GetExtension(&sent_allocation)); @@ -935,21 +935,21 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE(transport_.last_sent_packet() .HasExtension()); // No allocation sent on delta frame unless it has been updated. hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE(transport_.last_sent_packet() .HasExtension()); // Update the allocation. rtp_sender_video_->SetVideoLayersAllocation(allocation); rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; EXPECT_TRUE( @@ -984,7 +984,7 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet() .HasExtension()); @@ -998,7 +998,7 @@ TEST_F(RtpSenderVideoTest, rtp_sender_video_->SetVideoLayersAllocation(allocation); hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; EXPECT_TRUE( @@ -1031,7 +1031,7 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet() .HasExtension()); @@ -1040,7 +1040,7 @@ TEST_F(RtpSenderVideoTest, rtp_sender_video_->SetVideoLayersAllocation(allocation); hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; EXPECT_TRUE( @@ -1073,7 +1073,7 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet() .HasExtension()); @@ -1082,7 +1082,7 @@ TEST_F(RtpSenderVideoTest, rtp_sender_video_->SetVideoLayersAllocation(allocation); hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; EXPECT_TRUE( @@ -1110,7 +1110,7 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) { RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; EXPECT_TRUE( @@ -1120,14 +1120,14 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) { // VideoLayersAllocation not sent on the next delta frame. rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE(transport_.last_sent_packet() .HasExtension()); // Update allocation. VideoLayesAllocation should be sent on the next frame. rtp_sender_video_->SetVideoLayersAllocation(allocation); rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE( transport_.last_sent_packet() .GetExtension(&sent_allocation)); @@ -1157,14 +1157,14 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationNotSentOnHigherTemporalLayers) { vp8_header.temporalIdx = 1; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE(transport_.last_sent_packet() .HasExtension()); // Send a delta frame on tl0. vp8_header.temporalIdx = 0; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE(transport_.last_sent_packet() .HasExtension()); } @@ -1179,7 +1179,7 @@ TEST_F(RtpSenderVideoTest, AbsoluteCaptureTime) { hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kAbsoluteCaptureTimestampMs, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); absl::optional absolute_capture_time; @@ -1214,7 +1214,7 @@ TEST_F(RtpSenderVideoTest, hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, /*capture_time_ms=*/0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); // No absolute capture time should be set as the capture_time_ms was the // default value. for (const RtpPacketReceived& packet : transport_.sent_packets()) { @@ -1238,7 +1238,7 @@ TEST_F(RtpSenderVideoTest, AbsoluteCaptureTimeWithCaptureClockOffset) { hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kAbsoluteCaptureTimestampMs, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); absl::optional absolute_capture_time; @@ -1276,7 +1276,7 @@ TEST_F(RtpSenderVideoTest, AbsoluteCaptureTimeWithExtensionProvided) { hdr.absolute_capture_time = kAbsoluteCaptureTime; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, /*capture_time_ms=*/789, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); absl::optional absolute_capture_time; @@ -1311,7 +1311,7 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { vp8_header.temporalIdx = 0; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE( transport_.last_sent_packet().HasExtension()); @@ -1320,7 +1320,7 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { hdr.frame_type = VideoFrameType::kVideoFrameDelta; vp8_header.temporalIdx = 1; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); VideoPlayoutDelay received_delay = VideoPlayoutDelay(); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); @@ -1331,7 +1331,7 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { hdr.playout_delay = VideoPlayoutDelay(); // Indicates "no change". vp8_header.temporalIdx = 0; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); EXPECT_EQ(received_delay, kExpectedDelay); @@ -1339,14 +1339,14 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { // The next frame does not need the extensions since it's delivery has // already been guaranteed. rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE( transport_.last_sent_packet().HasExtension()); // Insert key-frame, we need to refresh the state here. hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs); + kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); EXPECT_EQ(received_delay, kExpectedDelay); @@ -1362,7 +1362,7 @@ TEST_F(RtpSenderVideoTest, SendGenericVideo) { video_header.frame_type = VideoFrameType::kVideoFrameKey; ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321, kPayload, video_header, - absl::nullopt)); + absl::nullopt, {})); rtc::ArrayView sent_payload = transport_.last_sent_packet().payload(); @@ -1376,7 +1376,7 @@ TEST_F(RtpSenderVideoTest, SendGenericVideo) { video_header.frame_type = VideoFrameType::kVideoFrameDelta; ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321, kDeltaPayload, video_header, - absl::nullopt)); + absl::nullopt, {})); sent_payload = sent_payload = transport_.last_sent_packet().payload(); generic_header = sent_payload[0]; @@ -1394,7 +1394,7 @@ TEST_F(RtpSenderVideoTest, SendRawVideo) { video_header.frame_type = VideoFrameType::kVideoFrameKey; ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, absl::nullopt, 1234, 4321, kPayload, video_header, - absl::nullopt)); + absl::nullopt, {})); rtc::ArrayView sent_payload = transport_.last_sent_packet().payload();