diff --git a/call/rtp_video_sender.cc b/call/rtp_video_sender.cc index 708aa81f4e..b793b24f10 100644 --- a/call/rtp_video_sender.cc +++ b/call/rtp_video_sender.cc @@ -696,15 +696,15 @@ bool RtpVideoSender::NackEnabled() const { return nack_enabled; } -uint32_t RtpVideoSender::GetPacketizationOverheadRate() const { - uint32_t packetization_overhead_bps = 0; +DataRate RtpVideoSender::GetPostEncodeOverhead() const { + DataRate post_encode_overhead = DataRate::Zero(); for (size_t i = 0; i < rtp_streams_.size(); ++i) { if (rtp_streams_[i].rtp_rtcp->SendingMedia()) { - packetization_overhead_bps += - rtp_streams_[i].sender_video->PacketizationOverheadBps(); + post_encode_overhead += + rtp_streams_[i].sender_video->PostEncodeOverhead(); } } - return packetization_overhead_bps; + return post_encode_overhead; } void RtpVideoSender::DeliverRtcp(const uint8_t* packet, size_t length) { @@ -863,13 +863,13 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update, // since `fec_allowed_` may be toggled back on at any moment. } - // Subtract packetization overhead from the encoder target. If target rate + // Subtract post encode overhead from the encoder target. If target rate // is really low, cap the overhead at 50%. This also avoids the case where // `encoder_target_rate_bps_` is 0 due to encoder pause event while the // packetization rate is positive since packets are still flowing. - uint32_t packetization_rate_bps = - std::min(GetPacketizationOverheadRate(), encoder_target_rate_bps_ / 2); - encoder_target_rate_bps_ -= packetization_rate_bps; + uint32_t post_encode_overhead_bps = std::min( + GetPostEncodeOverhead().bps(), encoder_target_rate_bps_ / 2); + encoder_target_rate_bps_ -= post_encode_overhead_bps; loss_mask_vector_.clear(); @@ -889,7 +889,7 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update, } const uint32_t media_rate = encoder_target_rate_bps_ + encoder_overhead_rate_bps + - packetization_rate_bps; + post_encode_overhead_bps; RTC_DCHECK_GE(update.target_bitrate, DataRate::BitsPerSec(media_rate)); // `protection_bitrate_bps_` includes overhead. protection_bitrate_bps_ = update.target_bitrate.bps() - media_rate; diff --git a/call/rtp_video_sender.h b/call/rtp_video_sender.h index 9666b89916..3d0dbc1db6 100644 --- a/call/rtp_video_sender.h +++ b/call/rtp_video_sender.h @@ -161,7 +161,7 @@ class RtpVideoSender : public RtpVideoSenderInterface, void ConfigureProtection(); void ConfigureSsrcs(const std::map& suspended_ssrcs); bool NackEnabled() const; - uint32_t GetPacketizationOverheadRate() const; + DataRate GetPostEncodeOverhead() const; DataRate CalculateOverheadRate(DataRate data_rate, DataSize packet_size, DataSize overhead_per_packet, diff --git a/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/modules/rtp_rtcp/source/nack_rtx_unittest.cc index db830a9a2e..6697aff66c 100644 --- a/modules/rtp_rtcp/source/nack_rtx_unittest.cc +++ b/modules/rtp_rtcp/source/nack_rtx_unittest.cc @@ -210,7 +210,8 @@ class RtpRtcpRtxNackTest : public ::testing::Test { video_header.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp, - timestamp / 90, payload_data, video_header, 0, {})); + timestamp / 90, payload_data, sizeof(payload_data), video_header, 0, + {})); // Min required delay until retransmit = 5 + RTT ms (RTT = 0). fake_clock.AdvanceTimeMilliseconds(5); int length = BuildNackList(nack_list); @@ -260,7 +261,8 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) { video_header.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp, - timestamp / 90, payload_data, video_header, 0, {})); + timestamp / 90, payload_data, sizeof(payload_data), video_header, 0, + {})); // Prepare next frame. timestamp += 3000; fake_clock.AdvanceTimeMilliseconds(33); diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc index cd7e8cc033..2b81aa34b7 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc @@ -358,7 +358,7 @@ class RtpRtcpImpl2Test : public ::testing::Test { success &= sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8, rtp_timestamp, capture_time_ms, payload, - rtp_video_header, 0, {}); + sizeof(payload), rtp_video_header, 0, {}); return success; } diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc index 9f3dd37d86..2e0ce76388 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc @@ -228,7 +228,8 @@ class RtpRtcpImplTest : public ::testing::Test { const uint8_t payload[100] = {0}; EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true)); EXPECT_TRUE(sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8, - 0, 0, payload, rtp_video_header, 0, {})); + 0, 0, payload, sizeof(payload), + rtp_video_header, 0, {})); } void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) { diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc index 70cd3382e3..a8c2831b35 100644 --- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc @@ -1346,7 +1346,8 @@ TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) { EXPECT_TRUE(rtp_sender_video.SendVideo( kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms, - kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs, {})); + kPayloadData, sizeof(kPayloadData), video_header, + kDefaultExpectedRetransmissionTimeMs, {})); time_controller_.AdvanceTime(TimeDelta::Millis(33)); } @@ -1362,7 +1363,8 @@ TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) { EXPECT_TRUE(rtp_sender_video.SendVideo( kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms, - kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs, {})); + kPayloadData, sizeof(kPayloadData), video_header, + kDefaultExpectedRetransmissionTimeMs, {})); time_controller_.AdvanceTime(TimeDelta::Millis(33)); } diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc index c863db4ccf..3d0b348fa7 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video.cc @@ -155,7 +155,7 @@ RTPSenderVideo::RTPSenderVideo(const Config& config) red_payload_type_(config.red_payload_type), fec_type_(config.fec_type), fec_overhead_bytes_(config.fec_overhead_bytes), - packetization_overhead_bitrate_(1000, RateStatistics::kBpsScale), + post_encode_overhead_bitrate_(1000, RateStatistics::kBpsScale), frame_encryptor_(config.frame_encryptor), require_frame_encryption_(config.require_frame_encryption), generic_descriptor_auth_experiment_(!absl::StartsWith( @@ -182,7 +182,7 @@ RTPSenderVideo::~RTPSenderVideo() { void RTPSenderVideo::LogAndSendToNetwork( std::vector> packets, - size_t unpacketized_payload_size) { + size_t encoder_output_size) { { MutexLock lock(&stats_mutex_); size_t packetized_payload_size = 0; @@ -193,9 +193,9 @@ void RTPSenderVideo::LogAndSendToNetwork( } // AV1 and H264 packetizers may produce less packetized bytes than // unpacketized. - if (packetized_payload_size >= unpacketized_payload_size) { - packetization_overhead_bitrate_.Update( - packetized_payload_size - unpacketized_payload_size, + if (packetized_payload_size >= encoder_output_size) { + post_encode_overhead_bitrate_.Update( + packetized_payload_size - encoder_output_size, clock_->TimeInMilliseconds()); } } @@ -471,7 +471,8 @@ bool RTPSenderVideo::SendVideo( RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms) { return SendVideo(payload_type, codec_type, rtp_timestamp, capture_time_ms, - payload, video_header, expected_retransmission_time_ms, + payload, payload.size(), video_header, + expected_retransmission_time_ms, /*csrcs=*/{}); } @@ -481,6 +482,7 @@ bool RTPSenderVideo::SendVideo( uint32_t rtp_timestamp, int64_t capture_time_ms, rtc::ArrayView payload, + size_t encoder_output_size, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms, std::vector csrcs) { @@ -745,7 +747,7 @@ bool RTPSenderVideo::SendVideo( } } - LogAndSendToNetwork(std::move(rtp_packets), payload.size()); + LogAndSendToNetwork(std::move(rtp_packets), encoder_output_size); // Update details about the last sent frame. last_rotation_ = video_header.rotation; @@ -788,14 +790,16 @@ bool RTPSenderVideo::SendEncodedImage( expected_retransmission_time_ms); } return SendVideo(payload_type, codec_type, rtp_timestamp, - encoded_image.capture_time_ms_, encoded_image, video_header, + encoded_image.capture_time_ms_, encoded_image, + encoded_image.size(), video_header, expected_retransmission_time_ms, rtp_sender_->Csrcs()); } -uint32_t RTPSenderVideo::PacketizationOverheadBps() const { +DataRate RTPSenderVideo::PostEncodeOverhead() const { MutexLock lock(&stats_mutex_); - return packetization_overhead_bitrate_.Rate(clock_->TimeInMilliseconds()) - .value_or(0); + return DataRate::BitsPerSec( + post_encode_overhead_bitrate_.Rate(clock_->TimeInMilliseconds()) + .value_or(0)); } bool RTPSenderVideo::AllowRetransmission( diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h index a1388a8b6d..9f74d15d8a 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.h +++ b/modules/rtp_rtcp/source/rtp_sender_video.h @@ -99,11 +99,14 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface { rtc::ArrayView payload, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms); + // `encoder_output_size` is the size of the video frame as it came out of the + // video encoder, excluding any additional overhead. bool SendVideo(int payload_type, absl::optional codec_type, uint32_t rtp_timestamp, int64_t capture_time_ms, rtc::ArrayView payload, + size_t encoder_output_size, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms, std::vector csrcs) override; @@ -138,12 +141,13 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface { void SetVideoLayersAllocationAfterTransformation( VideoLayersAllocation allocation) override; - // Returns the current packetization overhead rate, in bps. Note that this is - // the payload overhead, eg the VP8 payload headers, not the RTP headers - // or extension/ + // Returns the current post encode overhead rate, in bps. Note that this is + // the payload overhead, eg the VP8 payload headers and any other added + // metadata added by transforms. It does not include the RTP headers or + // extensions. // TODO(sprang): Consider moving this to RtpSenderEgress so it's in the same // place as the other rate stats. - uint32_t PacketizationOverheadBps() const; + DataRate PostEncodeOverhead() const; protected: static uint8_t GetTemporalId(const RTPVideoHeader& header); @@ -183,7 +187,7 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface { void LogAndSendToNetwork( std::vector> packets, - size_t unpacketized_payload_size); + size_t encoder_output_size); bool red_enabled() const { return red_payload_type_.has_value(); } @@ -231,7 +235,7 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface { const size_t fec_overhead_bytes_; // Per packet max FEC overhead. mutable Mutex stats_mutex_; - RateStatistics packetization_overhead_bitrate_ RTC_GUARDED_BY(stats_mutex_); + RateStatistics post_encode_overhead_bitrate_ RTC_GUARDED_BY(stats_mutex_); std::map frame_stats_by_temporal_layer_ RTC_GUARDED_BY(stats_mutex_); diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc index 7dfd7ca4ad..29c33f7b0e 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc @@ -33,6 +33,7 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { uint32_t ssrc, std::vector csrcs) : encoded_data_(encoded_image.GetEncodedData()), + pre_transform_payload_size_(encoded_image.size()), header_(video_header), frame_type_(encoded_image._frameType), payload_type_(payload_type), @@ -58,6 +59,10 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { encoded_data_ = EncodedImageBuffer::Create(data.data(), data.size()); } + size_t GetPreTransformPayloadSize() const { + return pre_transform_payload_size_; + } + uint32_t GetTimestamp() const override { return timestamp_; } uint32_t GetSsrc() const override { return ssrc_; } @@ -94,6 +99,7 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { private: rtc::scoped_refptr encoded_data_; + const size_t pre_transform_payload_size_; RTPVideoHeader header_; const VideoFrameType frame_type_; const uint8_t payload_type_; @@ -171,6 +177,7 @@ void RTPSenderVideoFrameTransformerDelegate::SendVideo( transformed_video_frame->GetTimestamp(), transformed_video_frame->GetCaptureTimeMs(), transformed_video_frame->GetData(), + transformed_video_frame->GetPreTransformPayloadSize(), transformed_video_frame->GetHeader(), transformed_video_frame->GetExpectedRetransmissionTimeMs(), transformed_video_frame->Metadata().GetCsrcs()); @@ -182,6 +189,7 @@ void RTPSenderVideoFrameTransformerDelegate::SendVideo( transformed_video_frame->GetPayloadType(), metadata.GetCodec(), transformed_video_frame->GetTimestamp(), /*capture_time_ms=*/0, transformed_video_frame->GetData(), + transformed_video_frame->GetData().size(), RTPVideoHeader::FromMetadata(metadata), /*expected_retransmission_time_ms_=*/absl::nullopt, metadata.GetCsrcs()); diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h index a397041811..ac934eb332 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h +++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h @@ -34,6 +34,7 @@ class RTPVideoFrameSenderInterface { uint32_t rtp_timestamp, int64_t capture_time_ms, rtc::ArrayView payload, + size_t encoder_output_size, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms, std::vector csrcs) = 0; diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc index a3cd81e0d1..51de035ea5 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc @@ -37,6 +37,7 @@ class MockRTPVideoFrameSenderInterface : public RTPVideoFrameSenderInterface { uint32_t rtp_timestamp, int64_t capture_time_ms, rtc::ArrayView payload, + size_t encoder_output_size, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms, std::vector csrcs), @@ -256,7 +257,7 @@ TEST_F(RtpSenderVideoFrameTransformerDelegateTest, rtc::Event event; EXPECT_CALL(test_sender_, SendVideo(payload_type, absl::make_optional(kVideoCodecVP8), - timestamp, /*capture_time_ms=*/0, buffer, _, + timestamp, /*capture_time_ms=*/0, buffer, _, _, /*expected_retransmission_time_ms_=*/ (absl::optional)absl::nullopt, frame_csrcs)) .WillOnce(WithoutArgs([&] { diff --git a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc index 683d082099..932d87c579 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc @@ -201,7 +201,8 @@ TEST_F(RtpSenderVideoTest, KeyFrameHasCVO) { RTPVideoHeader hdr; hdr.rotation = kVideoRotation_0; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoRotation rotation; @@ -228,7 +229,7 @@ TEST_F(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) { fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs); hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp, - kFrame, hdr, + kFrame, sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoSendTiming timing; EXPECT_TRUE(transport_.last_sent_packet().GetExtension( @@ -246,14 +247,14 @@ TEST_F(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) { RTPVideoHeader hdr; hdr.rotation = kVideoRotation_90; hdr.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE( - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs, {})); + EXPECT_TRUE(rtp_sender_video_->SendVideo( + kPayload, kType, kTimestamp, 0, kFrame, sizeof(kFrame), hdr, + kDefaultExpectedRetransmissionTimeMs, {})); hdr.rotation = kVideoRotation_0; hdr.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( - kPayload, kType, kTimestamp + 1, 0, kFrame, hdr, + kPayload, kType, kTimestamp + 1, 0, kFrame, sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {})); VideoRotation rotation; @@ -270,13 +271,13 @@ TEST_F(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) { RTPVideoHeader hdr; hdr.rotation = kVideoRotation_90; hdr.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE( - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs, {})); + EXPECT_TRUE(rtp_sender_video_->SendVideo( + kPayload, kType, kTimestamp, 0, kFrame, sizeof(kFrame), hdr, + kDefaultExpectedRetransmissionTimeMs, {})); hdr.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( - kPayload, kType, kTimestamp + 1, 0, kFrame, hdr, + kPayload, kType, kTimestamp + 1, 0, kFrame, sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {})); VideoRotation rotation; @@ -517,7 +518,7 @@ TEST_F(RtpSenderVideoTest, uint8_t kPayload[kMaxPacketSize] = {}; EXPECT_TRUE(rtp_sender_video_->SendVideo( kMediaPayloadId, /*codec_type=*/kVideoCodecVP8, /*rtp_timestamp=*/0, - /*capture_time_ms=*/1'000, kPayload, header, + /*capture_time_ms=*/1'000, kPayload, sizeof(kPayload), header, /*expected_retransmission_time_ms=*/absl::nullopt, /*csrcs=*/{})); ASSERT_THAT(transport_.sent_packets(), Not(IsEmpty())); // Ack media ssrc, but not rtx ssrc. @@ -536,7 +537,7 @@ TEST_F(RtpSenderVideoTest, EXPECT_TRUE(rtp_sender_video_->SendVideo( kMediaPayloadId, /*codec_type=*/kVideoCodecVP8, /*rtp_timestamp=*/0, - /*capture_time_ms=*/1'000, payload, header, + /*capture_time_ms=*/1'000, payload, frame_size, header, /*expected_retransmission_time_ms=*/1'000, /*csrcs=*/{})); const RtpPacketReceived& media_packet = transport_.last_sent_packet(); EXPECT_EQ(media_packet.Ssrc(), kSsrc); @@ -575,7 +576,8 @@ TEST_F(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { generic.decode_target_indications = {DecodeTargetIndication::kSwitch, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); @@ -601,7 +603,8 @@ TEST_F(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { generic.decode_target_indications = {DecodeTargetIndication::kNotPresent, DecodeTargetIndication::kRequired}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_EQ(transport_.packets_sent(), 2); @@ -650,7 +653,8 @@ TEST_F(RtpSenderVideoTest, generic.decode_target_indications = {DecodeTargetIndication::kSwitch, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); @@ -666,7 +670,8 @@ TEST_F(RtpSenderVideoTest, generic.decode_target_indications = {DecodeTargetIndication::kNotPresent, DecodeTargetIndication::kRequired}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_EQ(transport_.packets_sent(), 2); @@ -695,7 +700,8 @@ TEST_F(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) { DecodeTargetIndication::kSwitch}; generic.chain_diffs = {2}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); @@ -730,7 +736,8 @@ TEST_F(RtpSenderVideoTest, generic.active_decode_targets = 0b01; generic.chain_diffs = {1}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); @@ -768,7 +775,8 @@ TEST_F(RtpSenderVideoTest, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SetVideoStructure(&video_structure1); - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); // Parse 1st extension. ASSERT_EQ(transport_.packets_sent(), 1); @@ -784,7 +792,8 @@ TEST_F(RtpSenderVideoTest, generic.decode_target_indications = {DecodeTargetIndication::kDiscardable, DecodeTargetIndication::kNotPresent}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 2); @@ -796,7 +805,8 @@ TEST_F(RtpSenderVideoTest, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SetVideoStructure(&video_structure2); - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); // Parse the 2nd key frame. ASSERT_EQ(transport_.packets_sent(), 3); @@ -850,7 +860,8 @@ TEST_F(RtpSenderVideoTest, EXPECT_CALL(*encryptor, Encrypt(_, _, Not(IsEmpty()), ElementsAreArray(kFrame), _, _)); - rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); // Double check packet with the dependency descriptor is sent. ASSERT_EQ(transport_.packets_sent(), 1); @@ -872,7 +883,8 @@ TEST_F(RtpSenderVideoTest, PopulateGenericFrameDescriptor) { generic.dependencies.push_back(kFrameId - 1); generic.dependencies.push_back(kFrameId - 500); hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); RtpGenericFrameDescriptor descriptor_wire; @@ -907,7 +919,7 @@ void RtpSenderVideoTest:: generic.frame_id = kFrameId; hdr.frame_type = VideoFrameType::kVideoFrameDelta; rtp_sender_video_->SendVideo(kPayload, VideoCodecType::kVideoCodecVP8, - kTimestamp, 0, kFrame, hdr, + kTimestamp, 0, kFrame, sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_EQ(transport_.packets_sent(), 1); @@ -944,7 +956,8 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) { RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; @@ -954,7 +967,8 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) { EXPECT_THAT(sent_allocation.active_spatial_layers, ElementsAre(layer)); // Next key frame also have the allocation. - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE( transport_.last_sent_packet() @@ -981,21 +995,24 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE(transport_.last_sent_packet() .HasExtension()); // No allocation sent on delta frame unless it has been updated. hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE(transport_.last_sent_packet() .HasExtension()); // Update the allocation. rtp_sender_video_->SetVideoLayersAllocation(allocation); - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; @@ -1030,7 +1047,8 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet() .HasExtension()); @@ -1044,7 +1062,8 @@ TEST_F(RtpSenderVideoTest, allocation.active_spatial_layers.push_back(layer); rtp_sender_video_->SetVideoLayersAllocation(allocation); hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; @@ -1077,7 +1096,8 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet() .HasExtension()); @@ -1086,7 +1106,8 @@ TEST_F(RtpSenderVideoTest, allocation.active_spatial_layers[0].frame_rate_fps = 20; rtp_sender_video_->SetVideoLayersAllocation(allocation); hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; @@ -1119,7 +1140,8 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet() .HasExtension()); @@ -1128,7 +1150,8 @@ TEST_F(RtpSenderVideoTest, allocation.active_spatial_layers[0].frame_rate_fps = 9; rtp_sender_video_->SetVideoLayersAllocation(allocation); hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; @@ -1156,7 +1179,8 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) { RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoLayersAllocation sent_allocation; @@ -1166,14 +1190,16 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) { EXPECT_THAT(sent_allocation.active_spatial_layers, SizeIs(1)); // VideoLayersAllocation not sent on the next delta frame. - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE(transport_.last_sent_packet() .HasExtension()); // Update allocation. VideoLayesAllocation should be sent on the next frame. rtp_sender_video_->SetVideoLayersAllocation(allocation); - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE( transport_.last_sent_packet() @@ -1203,14 +1229,16 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationNotSentOnHigherTemporalLayers) { auto& vp8_header = hdr.video_type_header.emplace(); vp8_header.temporalIdx = 1; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE(transport_.last_sent_packet() .HasExtension()); // Send a delta frame on tl0. vp8_header.temporalIdx = 0; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_TRUE(transport_.last_sent_packet() .HasExtension()); @@ -1225,8 +1253,8 @@ TEST_F(RtpSenderVideoTest, RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, - /*capture_time_ms=*/0, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs, {}); + /*capture_time_ms=*/0, kFrame, sizeof(kFrame), + hdr, kDefaultExpectedRetransmissionTimeMs, {}); // No absolute capture time should be set as the capture_time_ms was the // default value. for (const RtpPacketReceived& packet : transport_.sent_packets()) { @@ -1245,9 +1273,9 @@ TEST_F(RtpSenderVideoTest, AbsoluteCaptureTime) { RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, - kAbsoluteCaptureTimestampMs, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs, {}); + rtp_sender_video_->SendVideo( + kPayload, kType, kTimestamp, kAbsoluteCaptureTimestampMs, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); absl::optional absolute_capture_time; @@ -1284,8 +1312,8 @@ TEST_F(RtpSenderVideoTest, AbsoluteCaptureTimeWithExtensionProvided) { hdr.frame_type = VideoFrameType::kVideoFrameKey; hdr.absolute_capture_time = kAbsoluteCaptureTime; rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, - /*capture_time_ms=*/789, kFrame, hdr, - kDefaultExpectedRetransmissionTimeMs, {}); + /*capture_time_ms=*/789, kFrame, sizeof(kFrame), + hdr, kDefaultExpectedRetransmissionTimeMs, {}); absl::optional absolute_capture_time; @@ -1319,7 +1347,8 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { auto& vp8_header = hdr.video_type_header.emplace(); vp8_header.temporalIdx = 0; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE( transport_.last_sent_packet().HasExtension()); @@ -1328,7 +1357,8 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { hdr.playout_delay = kExpectedDelay; hdr.frame_type = VideoFrameType::kVideoFrameDelta; vp8_header.temporalIdx = 1; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); VideoPlayoutDelay received_delay = VideoPlayoutDelay(); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( @@ -1339,7 +1369,8 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { // be populated since dilvery wasn't guaranteed on the last one. hdr.playout_delay = VideoPlayoutDelay(); // Indicates "no change". vp8_header.temporalIdx = 0; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); @@ -1347,14 +1378,16 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) { // The next frame does not need the extensions since it's delivery has // already been guaranteed. - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); EXPECT_FALSE( transport_.last_sent_packet().HasExtension()); // Insert key-frame, we need to refresh the state here. hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, + sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {}); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); @@ -1370,8 +1403,8 @@ TEST_F(RtpSenderVideoTest, SendGenericVideo) { RTPVideoHeader video_header; video_header.frame_type = VideoFrameType::kVideoFrameKey; ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321, - kPayload, video_header, - absl::nullopt, {})); + kPayload, sizeof(kPayload), + video_header, absl::nullopt, {})); rtc::ArrayView sent_payload = transport_.last_sent_packet().payload(); @@ -1384,8 +1417,8 @@ TEST_F(RtpSenderVideoTest, SendGenericVideo) { const uint8_t kDeltaPayload[] = {13, 42, 32, 93, 13}; video_header.frame_type = VideoFrameType::kVideoFrameDelta; ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321, - kDeltaPayload, video_header, - absl::nullopt, {})); + kDeltaPayload, sizeof(kDeltaPayload), + video_header, absl::nullopt, {})); sent_payload = sent_payload = transport_.last_sent_packet().payload(); generic_header = sent_payload[0]; @@ -1402,8 +1435,8 @@ TEST_F(RtpSenderVideoTest, SendRawVideo) { RTPVideoHeader video_header; video_header.frame_type = VideoFrameType::kVideoFrameKey; ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, absl::nullopt, 1234, - 4321, kPayload, video_header, - absl::nullopt, {})); + 4321, kPayload, sizeof(kPayload), + video_header, absl::nullopt, {})); rtc::ArrayView sent_payload = transport_.last_sent_packet().payload(); @@ -1553,6 +1586,43 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest, OnTransformedFrameSendsVideo) { EXPECT_EQ(transport_.packets_sent(), 2); } +TEST_F(RtpSenderVideoWithFrameTransformerTest, + TransformOverheadCorrectlyAccountedFor) { + auto mock_frame_transformer = + rtc::make_ref_counted>(); + rtc::scoped_refptr callback; + EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameSinkCallback) + .WillOnce(SaveArg<0>(&callback)); + std::unique_ptr rtp_sender_video = + CreateSenderWithFrameTransformer(mock_frame_transformer); + ASSERT_TRUE(callback); + + auto encoded_image = CreateDefaultEncodedImage(); + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + ON_CALL(*mock_frame_transformer, Transform) + .WillByDefault( + [&callback](std::unique_ptr frame) { + const uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16}; + frame->SetData(data); + callback->OnTransformedFrame(std::move(frame)); + }); + auto encoder_queue = time_controller_.GetTaskQueueFactory()->CreateTaskQueue( + "encoder_queue", TaskQueueFactory::Priority::NORMAL); + const int kFramesPerSecond = 25; + for (int i = 0; i < kFramesPerSecond; ++i) { + encoder_queue->PostTask([&] { + rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp, + *encoded_image, video_header, + kDefaultExpectedRetransmissionTimeMs); + }); + time_controller_.AdvanceTime(TimeDelta::Millis(1000 / kFramesPerSecond)); + } + EXPECT_EQ(transport_.packets_sent(), kFramesPerSecond); + EXPECT_GT(rtp_sender_video->PostEncodeOverhead().bps(), 2200); +} + TEST_F(RtpSenderVideoWithFrameTransformerTest, TransformableFrameMetadataHasCorrectValue) { auto mock_frame_transformer =