Compensate encoder bitrate for transformer added payload.
Bug: webrtc:15092 Change-Id: I7b4eff6f3f32ba0ae33ba8e4fc3c40425868719c Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/301500 Reviewed-by: Tony Herre <herre@google.com> Commit-Queue: Stefan Holmer <stefan@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Cr-Commit-Position: refs/heads/main@{#39967}
This commit is contained in:
parent
096427e494
commit
f5bbb2940e
@ -696,15 +696,15 @@ bool RtpVideoSender::NackEnabled() const {
|
|||||||
return nack_enabled;
|
return nack_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t RtpVideoSender::GetPacketizationOverheadRate() const {
|
DataRate RtpVideoSender::GetPostEncodeOverhead() const {
|
||||||
uint32_t packetization_overhead_bps = 0;
|
DataRate post_encode_overhead = DataRate::Zero();
|
||||||
for (size_t i = 0; i < rtp_streams_.size(); ++i) {
|
for (size_t i = 0; i < rtp_streams_.size(); ++i) {
|
||||||
if (rtp_streams_[i].rtp_rtcp->SendingMedia()) {
|
if (rtp_streams_[i].rtp_rtcp->SendingMedia()) {
|
||||||
packetization_overhead_bps +=
|
post_encode_overhead +=
|
||||||
rtp_streams_[i].sender_video->PacketizationOverheadBps();
|
rtp_streams_[i].sender_video->PostEncodeOverhead();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return packetization_overhead_bps;
|
return post_encode_overhead;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RtpVideoSender::DeliverRtcp(const uint8_t* packet, size_t length) {
|
void RtpVideoSender::DeliverRtcp(const uint8_t* packet, size_t length) {
|
||||||
@ -863,13 +863,13 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update,
|
|||||||
// since `fec_allowed_` may be toggled back on at any moment.
|
// since `fec_allowed_` may be toggled back on at any moment.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subtract packetization overhead from the encoder target. If target rate
|
// Subtract post encode overhead from the encoder target. If target rate
|
||||||
// is really low, cap the overhead at 50%. This also avoids the case where
|
// is really low, cap the overhead at 50%. This also avoids the case where
|
||||||
// `encoder_target_rate_bps_` is 0 due to encoder pause event while the
|
// `encoder_target_rate_bps_` is 0 due to encoder pause event while the
|
||||||
// packetization rate is positive since packets are still flowing.
|
// packetization rate is positive since packets are still flowing.
|
||||||
uint32_t packetization_rate_bps =
|
uint32_t post_encode_overhead_bps = std::min(
|
||||||
std::min(GetPacketizationOverheadRate(), encoder_target_rate_bps_ / 2);
|
GetPostEncodeOverhead().bps<uint32_t>(), encoder_target_rate_bps_ / 2);
|
||||||
encoder_target_rate_bps_ -= packetization_rate_bps;
|
encoder_target_rate_bps_ -= post_encode_overhead_bps;
|
||||||
|
|
||||||
loss_mask_vector_.clear();
|
loss_mask_vector_.clear();
|
||||||
|
|
||||||
@ -889,7 +889,7 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update,
|
|||||||
}
|
}
|
||||||
const uint32_t media_rate = encoder_target_rate_bps_ +
|
const uint32_t media_rate = encoder_target_rate_bps_ +
|
||||||
encoder_overhead_rate_bps +
|
encoder_overhead_rate_bps +
|
||||||
packetization_rate_bps;
|
post_encode_overhead_bps;
|
||||||
RTC_DCHECK_GE(update.target_bitrate, DataRate::BitsPerSec(media_rate));
|
RTC_DCHECK_GE(update.target_bitrate, DataRate::BitsPerSec(media_rate));
|
||||||
// `protection_bitrate_bps_` includes overhead.
|
// `protection_bitrate_bps_` includes overhead.
|
||||||
protection_bitrate_bps_ = update.target_bitrate.bps() - media_rate;
|
protection_bitrate_bps_ = update.target_bitrate.bps() - media_rate;
|
||||||
|
|||||||
@ -161,7 +161,7 @@ class RtpVideoSender : public RtpVideoSenderInterface,
|
|||||||
void ConfigureProtection();
|
void ConfigureProtection();
|
||||||
void ConfigureSsrcs(const std::map<uint32_t, RtpState>& suspended_ssrcs);
|
void ConfigureSsrcs(const std::map<uint32_t, RtpState>& suspended_ssrcs);
|
||||||
bool NackEnabled() const;
|
bool NackEnabled() const;
|
||||||
uint32_t GetPacketizationOverheadRate() const;
|
DataRate GetPostEncodeOverhead() const;
|
||||||
DataRate CalculateOverheadRate(DataRate data_rate,
|
DataRate CalculateOverheadRate(DataRate data_rate,
|
||||||
DataSize packet_size,
|
DataSize packet_size,
|
||||||
DataSize overhead_per_packet,
|
DataSize overhead_per_packet,
|
||||||
|
|||||||
@ -210,7 +210,8 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
|
|||||||
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
|
kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
|
||||||
timestamp / 90, payload_data, video_header, 0, {}));
|
timestamp / 90, payload_data, sizeof(payload_data), video_header, 0,
|
||||||
|
{}));
|
||||||
// Min required delay until retransmit = 5 + RTT ms (RTT = 0).
|
// Min required delay until retransmit = 5 + RTT ms (RTT = 0).
|
||||||
fake_clock.AdvanceTimeMilliseconds(5);
|
fake_clock.AdvanceTimeMilliseconds(5);
|
||||||
int length = BuildNackList(nack_list);
|
int length = BuildNackList(nack_list);
|
||||||
@ -260,7 +261,8 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) {
|
|||||||
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
|
kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
|
||||||
timestamp / 90, payload_data, video_header, 0, {}));
|
timestamp / 90, payload_data, sizeof(payload_data), video_header, 0,
|
||||||
|
{}));
|
||||||
// Prepare next frame.
|
// Prepare next frame.
|
||||||
timestamp += 3000;
|
timestamp += 3000;
|
||||||
fake_clock.AdvanceTimeMilliseconds(33);
|
fake_clock.AdvanceTimeMilliseconds(33);
|
||||||
|
|||||||
@ -358,7 +358,7 @@ class RtpRtcpImpl2Test : public ::testing::Test {
|
|||||||
|
|
||||||
success &= sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8,
|
success &= sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8,
|
||||||
rtp_timestamp, capture_time_ms, payload,
|
rtp_timestamp, capture_time_ms, payload,
|
||||||
rtp_video_header, 0, {});
|
sizeof(payload), rtp_video_header, 0, {});
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -228,7 +228,8 @@ class RtpRtcpImplTest : public ::testing::Test {
|
|||||||
const uint8_t payload[100] = {0};
|
const uint8_t payload[100] = {0};
|
||||||
EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true));
|
EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true));
|
||||||
EXPECT_TRUE(sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8,
|
EXPECT_TRUE(sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8,
|
||||||
0, 0, payload, rtp_video_header, 0, {}));
|
0, 0, payload, sizeof(payload),
|
||||||
|
rtp_video_header, 0, {}));
|
||||||
}
|
}
|
||||||
|
|
||||||
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
|
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
|
||||||
|
|||||||
@ -1346,7 +1346,8 @@ TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) {
|
|||||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||||
kPayloadType, kCodecType,
|
kPayloadType, kCodecType,
|
||||||
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
||||||
kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs, {}));
|
kPayloadData, sizeof(kPayloadData), video_header,
|
||||||
|
kDefaultExpectedRetransmissionTimeMs, {}));
|
||||||
|
|
||||||
time_controller_.AdvanceTime(TimeDelta::Millis(33));
|
time_controller_.AdvanceTime(TimeDelta::Millis(33));
|
||||||
}
|
}
|
||||||
@ -1362,7 +1363,8 @@ TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) {
|
|||||||
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
EXPECT_TRUE(rtp_sender_video.SendVideo(
|
||||||
kPayloadType, kCodecType,
|
kPayloadType, kCodecType,
|
||||||
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
|
||||||
kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs, {}));
|
kPayloadData, sizeof(kPayloadData), video_header,
|
||||||
|
kDefaultExpectedRetransmissionTimeMs, {}));
|
||||||
|
|
||||||
time_controller_.AdvanceTime(TimeDelta::Millis(33));
|
time_controller_.AdvanceTime(TimeDelta::Millis(33));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -155,7 +155,7 @@ RTPSenderVideo::RTPSenderVideo(const Config& config)
|
|||||||
red_payload_type_(config.red_payload_type),
|
red_payload_type_(config.red_payload_type),
|
||||||
fec_type_(config.fec_type),
|
fec_type_(config.fec_type),
|
||||||
fec_overhead_bytes_(config.fec_overhead_bytes),
|
fec_overhead_bytes_(config.fec_overhead_bytes),
|
||||||
packetization_overhead_bitrate_(1000, RateStatistics::kBpsScale),
|
post_encode_overhead_bitrate_(1000, RateStatistics::kBpsScale),
|
||||||
frame_encryptor_(config.frame_encryptor),
|
frame_encryptor_(config.frame_encryptor),
|
||||||
require_frame_encryption_(config.require_frame_encryption),
|
require_frame_encryption_(config.require_frame_encryption),
|
||||||
generic_descriptor_auth_experiment_(!absl::StartsWith(
|
generic_descriptor_auth_experiment_(!absl::StartsWith(
|
||||||
@ -182,7 +182,7 @@ RTPSenderVideo::~RTPSenderVideo() {
|
|||||||
|
|
||||||
void RTPSenderVideo::LogAndSendToNetwork(
|
void RTPSenderVideo::LogAndSendToNetwork(
|
||||||
std::vector<std::unique_ptr<RtpPacketToSend>> packets,
|
std::vector<std::unique_ptr<RtpPacketToSend>> packets,
|
||||||
size_t unpacketized_payload_size) {
|
size_t encoder_output_size) {
|
||||||
{
|
{
|
||||||
MutexLock lock(&stats_mutex_);
|
MutexLock lock(&stats_mutex_);
|
||||||
size_t packetized_payload_size = 0;
|
size_t packetized_payload_size = 0;
|
||||||
@ -193,9 +193,9 @@ void RTPSenderVideo::LogAndSendToNetwork(
|
|||||||
}
|
}
|
||||||
// AV1 and H264 packetizers may produce less packetized bytes than
|
// AV1 and H264 packetizers may produce less packetized bytes than
|
||||||
// unpacketized.
|
// unpacketized.
|
||||||
if (packetized_payload_size >= unpacketized_payload_size) {
|
if (packetized_payload_size >= encoder_output_size) {
|
||||||
packetization_overhead_bitrate_.Update(
|
post_encode_overhead_bitrate_.Update(
|
||||||
packetized_payload_size - unpacketized_payload_size,
|
packetized_payload_size - encoder_output_size,
|
||||||
clock_->TimeInMilliseconds());
|
clock_->TimeInMilliseconds());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -471,7 +471,8 @@ bool RTPSenderVideo::SendVideo(
|
|||||||
RTPVideoHeader video_header,
|
RTPVideoHeader video_header,
|
||||||
absl::optional<int64_t> expected_retransmission_time_ms) {
|
absl::optional<int64_t> expected_retransmission_time_ms) {
|
||||||
return SendVideo(payload_type, codec_type, rtp_timestamp, capture_time_ms,
|
return SendVideo(payload_type, codec_type, rtp_timestamp, capture_time_ms,
|
||||||
payload, video_header, expected_retransmission_time_ms,
|
payload, payload.size(), video_header,
|
||||||
|
expected_retransmission_time_ms,
|
||||||
/*csrcs=*/{});
|
/*csrcs=*/{});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -481,6 +482,7 @@ bool RTPSenderVideo::SendVideo(
|
|||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
int64_t capture_time_ms,
|
int64_t capture_time_ms,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
|
size_t encoder_output_size,
|
||||||
RTPVideoHeader video_header,
|
RTPVideoHeader video_header,
|
||||||
absl::optional<int64_t> expected_retransmission_time_ms,
|
absl::optional<int64_t> expected_retransmission_time_ms,
|
||||||
std::vector<uint32_t> csrcs) {
|
std::vector<uint32_t> csrcs) {
|
||||||
@ -745,7 +747,7 @@ bool RTPSenderVideo::SendVideo(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LogAndSendToNetwork(std::move(rtp_packets), payload.size());
|
LogAndSendToNetwork(std::move(rtp_packets), encoder_output_size);
|
||||||
|
|
||||||
// Update details about the last sent frame.
|
// Update details about the last sent frame.
|
||||||
last_rotation_ = video_header.rotation;
|
last_rotation_ = video_header.rotation;
|
||||||
@ -788,14 +790,16 @@ bool RTPSenderVideo::SendEncodedImage(
|
|||||||
expected_retransmission_time_ms);
|
expected_retransmission_time_ms);
|
||||||
}
|
}
|
||||||
return SendVideo(payload_type, codec_type, rtp_timestamp,
|
return SendVideo(payload_type, codec_type, rtp_timestamp,
|
||||||
encoded_image.capture_time_ms_, encoded_image, video_header,
|
encoded_image.capture_time_ms_, encoded_image,
|
||||||
|
encoded_image.size(), video_header,
|
||||||
expected_retransmission_time_ms, rtp_sender_->Csrcs());
|
expected_retransmission_time_ms, rtp_sender_->Csrcs());
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t RTPSenderVideo::PacketizationOverheadBps() const {
|
DataRate RTPSenderVideo::PostEncodeOverhead() const {
|
||||||
MutexLock lock(&stats_mutex_);
|
MutexLock lock(&stats_mutex_);
|
||||||
return packetization_overhead_bitrate_.Rate(clock_->TimeInMilliseconds())
|
return DataRate::BitsPerSec(
|
||||||
.value_or(0);
|
post_encode_overhead_bitrate_.Rate(clock_->TimeInMilliseconds())
|
||||||
|
.value_or(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RTPSenderVideo::AllowRetransmission(
|
bool RTPSenderVideo::AllowRetransmission(
|
||||||
|
|||||||
@ -99,11 +99,14 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface {
|
|||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
RTPVideoHeader video_header,
|
RTPVideoHeader video_header,
|
||||||
absl::optional<int64_t> expected_retransmission_time_ms);
|
absl::optional<int64_t> expected_retransmission_time_ms);
|
||||||
|
// `encoder_output_size` is the size of the video frame as it came out of the
|
||||||
|
// video encoder, excluding any additional overhead.
|
||||||
bool SendVideo(int payload_type,
|
bool SendVideo(int payload_type,
|
||||||
absl::optional<VideoCodecType> codec_type,
|
absl::optional<VideoCodecType> codec_type,
|
||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
int64_t capture_time_ms,
|
int64_t capture_time_ms,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
|
size_t encoder_output_size,
|
||||||
RTPVideoHeader video_header,
|
RTPVideoHeader video_header,
|
||||||
absl::optional<int64_t> expected_retransmission_time_ms,
|
absl::optional<int64_t> expected_retransmission_time_ms,
|
||||||
std::vector<uint32_t> csrcs) override;
|
std::vector<uint32_t> csrcs) override;
|
||||||
@ -138,12 +141,13 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface {
|
|||||||
void SetVideoLayersAllocationAfterTransformation(
|
void SetVideoLayersAllocationAfterTransformation(
|
||||||
VideoLayersAllocation allocation) override;
|
VideoLayersAllocation allocation) override;
|
||||||
|
|
||||||
// Returns the current packetization overhead rate, in bps. Note that this is
|
// Returns the current post encode overhead rate, in bps. Note that this is
|
||||||
// the payload overhead, eg the VP8 payload headers, not the RTP headers
|
// the payload overhead, eg the VP8 payload headers and any other added
|
||||||
// or extension/
|
// metadata added by transforms. It does not include the RTP headers or
|
||||||
|
// extensions.
|
||||||
// TODO(sprang): Consider moving this to RtpSenderEgress so it's in the same
|
// TODO(sprang): Consider moving this to RtpSenderEgress so it's in the same
|
||||||
// place as the other rate stats.
|
// place as the other rate stats.
|
||||||
uint32_t PacketizationOverheadBps() const;
|
DataRate PostEncodeOverhead() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
static uint8_t GetTemporalId(const RTPVideoHeader& header);
|
static uint8_t GetTemporalId(const RTPVideoHeader& header);
|
||||||
@ -183,7 +187,7 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface {
|
|||||||
|
|
||||||
void LogAndSendToNetwork(
|
void LogAndSendToNetwork(
|
||||||
std::vector<std::unique_ptr<RtpPacketToSend>> packets,
|
std::vector<std::unique_ptr<RtpPacketToSend>> packets,
|
||||||
size_t unpacketized_payload_size);
|
size_t encoder_output_size);
|
||||||
|
|
||||||
bool red_enabled() const { return red_payload_type_.has_value(); }
|
bool red_enabled() const { return red_payload_type_.has_value(); }
|
||||||
|
|
||||||
@ -231,7 +235,7 @@ class RTPSenderVideo : public RTPVideoFrameSenderInterface {
|
|||||||
const size_t fec_overhead_bytes_; // Per packet max FEC overhead.
|
const size_t fec_overhead_bytes_; // Per packet max FEC overhead.
|
||||||
|
|
||||||
mutable Mutex stats_mutex_;
|
mutable Mutex stats_mutex_;
|
||||||
RateStatistics packetization_overhead_bitrate_ RTC_GUARDED_BY(stats_mutex_);
|
RateStatistics post_encode_overhead_bitrate_ RTC_GUARDED_BY(stats_mutex_);
|
||||||
|
|
||||||
std::map<int, TemporalLayerStats> frame_stats_by_temporal_layer_
|
std::map<int, TemporalLayerStats> frame_stats_by_temporal_layer_
|
||||||
RTC_GUARDED_BY(stats_mutex_);
|
RTC_GUARDED_BY(stats_mutex_);
|
||||||
|
|||||||
@ -33,6 +33,7 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
|
|||||||
uint32_t ssrc,
|
uint32_t ssrc,
|
||||||
std::vector<uint32_t> csrcs)
|
std::vector<uint32_t> csrcs)
|
||||||
: encoded_data_(encoded_image.GetEncodedData()),
|
: encoded_data_(encoded_image.GetEncodedData()),
|
||||||
|
pre_transform_payload_size_(encoded_image.size()),
|
||||||
header_(video_header),
|
header_(video_header),
|
||||||
frame_type_(encoded_image._frameType),
|
frame_type_(encoded_image._frameType),
|
||||||
payload_type_(payload_type),
|
payload_type_(payload_type),
|
||||||
@ -58,6 +59,10 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
|
|||||||
encoded_data_ = EncodedImageBuffer::Create(data.data(), data.size());
|
encoded_data_ = EncodedImageBuffer::Create(data.data(), data.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t GetPreTransformPayloadSize() const {
|
||||||
|
return pre_transform_payload_size_;
|
||||||
|
}
|
||||||
|
|
||||||
uint32_t GetTimestamp() const override { return timestamp_; }
|
uint32_t GetTimestamp() const override { return timestamp_; }
|
||||||
uint32_t GetSsrc() const override { return ssrc_; }
|
uint32_t GetSsrc() const override { return ssrc_; }
|
||||||
|
|
||||||
@ -94,6 +99,7 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
rtc::scoped_refptr<EncodedImageBufferInterface> encoded_data_;
|
rtc::scoped_refptr<EncodedImageBufferInterface> encoded_data_;
|
||||||
|
const size_t pre_transform_payload_size_;
|
||||||
RTPVideoHeader header_;
|
RTPVideoHeader header_;
|
||||||
const VideoFrameType frame_type_;
|
const VideoFrameType frame_type_;
|
||||||
const uint8_t payload_type_;
|
const uint8_t payload_type_;
|
||||||
@ -171,6 +177,7 @@ void RTPSenderVideoFrameTransformerDelegate::SendVideo(
|
|||||||
transformed_video_frame->GetTimestamp(),
|
transformed_video_frame->GetTimestamp(),
|
||||||
transformed_video_frame->GetCaptureTimeMs(),
|
transformed_video_frame->GetCaptureTimeMs(),
|
||||||
transformed_video_frame->GetData(),
|
transformed_video_frame->GetData(),
|
||||||
|
transformed_video_frame->GetPreTransformPayloadSize(),
|
||||||
transformed_video_frame->GetHeader(),
|
transformed_video_frame->GetHeader(),
|
||||||
transformed_video_frame->GetExpectedRetransmissionTimeMs(),
|
transformed_video_frame->GetExpectedRetransmissionTimeMs(),
|
||||||
transformed_video_frame->Metadata().GetCsrcs());
|
transformed_video_frame->Metadata().GetCsrcs());
|
||||||
@ -182,6 +189,7 @@ void RTPSenderVideoFrameTransformerDelegate::SendVideo(
|
|||||||
transformed_video_frame->GetPayloadType(), metadata.GetCodec(),
|
transformed_video_frame->GetPayloadType(), metadata.GetCodec(),
|
||||||
transformed_video_frame->GetTimestamp(),
|
transformed_video_frame->GetTimestamp(),
|
||||||
/*capture_time_ms=*/0, transformed_video_frame->GetData(),
|
/*capture_time_ms=*/0, transformed_video_frame->GetData(),
|
||||||
|
transformed_video_frame->GetData().size(),
|
||||||
RTPVideoHeader::FromMetadata(metadata),
|
RTPVideoHeader::FromMetadata(metadata),
|
||||||
/*expected_retransmission_time_ms_=*/absl::nullopt,
|
/*expected_retransmission_time_ms_=*/absl::nullopt,
|
||||||
metadata.GetCsrcs());
|
metadata.GetCsrcs());
|
||||||
|
|||||||
@ -34,6 +34,7 @@ class RTPVideoFrameSenderInterface {
|
|||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
int64_t capture_time_ms,
|
int64_t capture_time_ms,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
|
size_t encoder_output_size,
|
||||||
RTPVideoHeader video_header,
|
RTPVideoHeader video_header,
|
||||||
absl::optional<int64_t> expected_retransmission_time_ms,
|
absl::optional<int64_t> expected_retransmission_time_ms,
|
||||||
std::vector<uint32_t> csrcs) = 0;
|
std::vector<uint32_t> csrcs) = 0;
|
||||||
|
|||||||
@ -37,6 +37,7 @@ class MockRTPVideoFrameSenderInterface : public RTPVideoFrameSenderInterface {
|
|||||||
uint32_t rtp_timestamp,
|
uint32_t rtp_timestamp,
|
||||||
int64_t capture_time_ms,
|
int64_t capture_time_ms,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload,
|
||||||
|
size_t encoder_output_size,
|
||||||
RTPVideoHeader video_header,
|
RTPVideoHeader video_header,
|
||||||
absl::optional<int64_t> expected_retransmission_time_ms,
|
absl::optional<int64_t> expected_retransmission_time_ms,
|
||||||
std::vector<uint32_t> csrcs),
|
std::vector<uint32_t> csrcs),
|
||||||
@ -256,7 +257,7 @@ TEST_F(RtpSenderVideoFrameTransformerDelegateTest,
|
|||||||
rtc::Event event;
|
rtc::Event event;
|
||||||
EXPECT_CALL(test_sender_,
|
EXPECT_CALL(test_sender_,
|
||||||
SendVideo(payload_type, absl::make_optional(kVideoCodecVP8),
|
SendVideo(payload_type, absl::make_optional(kVideoCodecVP8),
|
||||||
timestamp, /*capture_time_ms=*/0, buffer, _,
|
timestamp, /*capture_time_ms=*/0, buffer, _, _,
|
||||||
/*expected_retransmission_time_ms_=*/
|
/*expected_retransmission_time_ms_=*/
|
||||||
(absl::optional<int64_t>)absl::nullopt, frame_csrcs))
|
(absl::optional<int64_t>)absl::nullopt, frame_csrcs))
|
||||||
.WillOnce(WithoutArgs([&] {
|
.WillOnce(WithoutArgs([&] {
|
||||||
|
|||||||
@ -201,7 +201,8 @@ TEST_F(RtpSenderVideoTest, KeyFrameHasCVO) {
|
|||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.rotation = kVideoRotation_0;
|
hdr.rotation = kVideoRotation_0;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
VideoRotation rotation;
|
VideoRotation rotation;
|
||||||
@ -228,7 +229,7 @@ TEST_F(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) {
|
|||||||
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
|
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp,
|
||||||
kFrame, hdr,
|
kFrame, sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
VideoSendTiming timing;
|
VideoSendTiming timing;
|
||||||
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
|
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
|
||||||
@ -246,14 +247,14 @@ TEST_F(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) {
|
|||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.rotation = kVideoRotation_90;
|
hdr.rotation = kVideoRotation_90;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
EXPECT_TRUE(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
kPayload, kType, kTimestamp, 0, kFrame, sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {}));
|
kDefaultExpectedRetransmissionTimeMs, {}));
|
||||||
|
|
||||||
hdr.rotation = kVideoRotation_0;
|
hdr.rotation = kVideoRotation_0;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
kPayload, kType, kTimestamp + 1, 0, kFrame, hdr,
|
kPayload, kType, kTimestamp + 1, 0, kFrame, sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {}));
|
kDefaultExpectedRetransmissionTimeMs, {}));
|
||||||
|
|
||||||
VideoRotation rotation;
|
VideoRotation rotation;
|
||||||
@ -270,13 +271,13 @@ TEST_F(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) {
|
|||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.rotation = kVideoRotation_90;
|
hdr.rotation = kVideoRotation_90;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
EXPECT_TRUE(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
kPayload, kType, kTimestamp, 0, kFrame, sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {}));
|
kDefaultExpectedRetransmissionTimeMs, {}));
|
||||||
|
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
kPayload, kType, kTimestamp + 1, 0, kFrame, hdr,
|
kPayload, kType, kTimestamp + 1, 0, kFrame, sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {}));
|
kDefaultExpectedRetransmissionTimeMs, {}));
|
||||||
|
|
||||||
VideoRotation rotation;
|
VideoRotation rotation;
|
||||||
@ -517,7 +518,7 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
uint8_t kPayload[kMaxPacketSize] = {};
|
uint8_t kPayload[kMaxPacketSize] = {};
|
||||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
kMediaPayloadId, /*codec_type=*/kVideoCodecVP8, /*rtp_timestamp=*/0,
|
kMediaPayloadId, /*codec_type=*/kVideoCodecVP8, /*rtp_timestamp=*/0,
|
||||||
/*capture_time_ms=*/1'000, kPayload, header,
|
/*capture_time_ms=*/1'000, kPayload, sizeof(kPayload), header,
|
||||||
/*expected_retransmission_time_ms=*/absl::nullopt, /*csrcs=*/{}));
|
/*expected_retransmission_time_ms=*/absl::nullopt, /*csrcs=*/{}));
|
||||||
ASSERT_THAT(transport_.sent_packets(), Not(IsEmpty()));
|
ASSERT_THAT(transport_.sent_packets(), Not(IsEmpty()));
|
||||||
// Ack media ssrc, but not rtx ssrc.
|
// Ack media ssrc, but not rtx ssrc.
|
||||||
@ -536,7 +537,7 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
|
|
||||||
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
EXPECT_TRUE(rtp_sender_video_->SendVideo(
|
||||||
kMediaPayloadId, /*codec_type=*/kVideoCodecVP8, /*rtp_timestamp=*/0,
|
kMediaPayloadId, /*codec_type=*/kVideoCodecVP8, /*rtp_timestamp=*/0,
|
||||||
/*capture_time_ms=*/1'000, payload, header,
|
/*capture_time_ms=*/1'000, payload, frame_size, header,
|
||||||
/*expected_retransmission_time_ms=*/1'000, /*csrcs=*/{}));
|
/*expected_retransmission_time_ms=*/1'000, /*csrcs=*/{}));
|
||||||
const RtpPacketReceived& media_packet = transport_.last_sent_packet();
|
const RtpPacketReceived& media_packet = transport_.last_sent_packet();
|
||||||
EXPECT_EQ(media_packet.Ssrc(), kSsrc);
|
EXPECT_EQ(media_packet.Ssrc(), kSsrc);
|
||||||
@ -575,7 +576,8 @@ TEST_F(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
|
|||||||
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
|
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
|
||||||
DecodeTargetIndication::kSwitch};
|
DecodeTargetIndication::kSwitch};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||||
@ -601,7 +603,8 @@ TEST_F(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
|
|||||||
generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
|
generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
|
||||||
DecodeTargetIndication::kRequired};
|
DecodeTargetIndication::kRequired};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
EXPECT_EQ(transport_.packets_sent(), 2);
|
EXPECT_EQ(transport_.packets_sent(), 2);
|
||||||
@ -650,7 +653,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
|
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
|
||||||
DecodeTargetIndication::kSwitch};
|
DecodeTargetIndication::kSwitch};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||||
@ -666,7 +670,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
|
generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
|
||||||
DecodeTargetIndication::kRequired};
|
DecodeTargetIndication::kRequired};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
EXPECT_EQ(transport_.packets_sent(), 2);
|
EXPECT_EQ(transport_.packets_sent(), 2);
|
||||||
@ -695,7 +700,8 @@ TEST_F(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) {
|
|||||||
DecodeTargetIndication::kSwitch};
|
DecodeTargetIndication::kSwitch};
|
||||||
generic.chain_diffs = {2};
|
generic.chain_diffs = {2};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||||
@ -730,7 +736,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
generic.active_decode_targets = 0b01;
|
generic.active_decode_targets = 0b01;
|
||||||
generic.chain_diffs = {1};
|
generic.chain_diffs = {1};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||||
@ -768,7 +775,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
DecodeTargetIndication::kSwitch};
|
DecodeTargetIndication::kSwitch};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SetVideoStructure(&video_structure1);
|
rtp_sender_video_->SetVideoStructure(&video_structure1);
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
// Parse 1st extension.
|
// Parse 1st extension.
|
||||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||||
@ -784,7 +792,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
generic.decode_target_indications = {DecodeTargetIndication::kDiscardable,
|
generic.decode_target_indications = {DecodeTargetIndication::kDiscardable,
|
||||||
DecodeTargetIndication::kNotPresent};
|
DecodeTargetIndication::kNotPresent};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
ASSERT_EQ(transport_.packets_sent(), 2);
|
ASSERT_EQ(transport_.packets_sent(), 2);
|
||||||
@ -796,7 +805,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
DecodeTargetIndication::kSwitch};
|
DecodeTargetIndication::kSwitch};
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SetVideoStructure(&video_structure2);
|
rtp_sender_video_->SetVideoStructure(&video_structure2);
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
// Parse the 2nd key frame.
|
// Parse the 2nd key frame.
|
||||||
ASSERT_EQ(transport_.packets_sent(), 3);
|
ASSERT_EQ(transport_.packets_sent(), 3);
|
||||||
@ -850,7 +860,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
|
|
||||||
EXPECT_CALL(*encryptor,
|
EXPECT_CALL(*encryptor,
|
||||||
Encrypt(_, _, Not(IsEmpty()), ElementsAreArray(kFrame), _, _));
|
Encrypt(_, _, Not(IsEmpty()), ElementsAreArray(kFrame), _, _));
|
||||||
rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
// Double check packet with the dependency descriptor is sent.
|
// Double check packet with the dependency descriptor is sent.
|
||||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||||
@ -872,7 +883,8 @@ TEST_F(RtpSenderVideoTest, PopulateGenericFrameDescriptor) {
|
|||||||
generic.dependencies.push_back(kFrameId - 1);
|
generic.dependencies.push_back(kFrameId - 1);
|
||||||
generic.dependencies.push_back(kFrameId - 500);
|
generic.dependencies.push_back(kFrameId - 500);
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
RtpGenericFrameDescriptor descriptor_wire;
|
RtpGenericFrameDescriptor descriptor_wire;
|
||||||
@ -907,7 +919,7 @@ void RtpSenderVideoTest::
|
|||||||
generic.frame_id = kFrameId;
|
generic.frame_id = kFrameId;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, VideoCodecType::kVideoCodecVP8,
|
rtp_sender_video_->SendVideo(kPayload, VideoCodecType::kVideoCodecVP8,
|
||||||
kTimestamp, 0, kFrame, hdr,
|
kTimestamp, 0, kFrame, sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
ASSERT_EQ(transport_.packets_sent(), 1);
|
ASSERT_EQ(transport_.packets_sent(), 1);
|
||||||
@ -944,7 +956,8 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) {
|
|||||||
|
|
||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
VideoLayersAllocation sent_allocation;
|
VideoLayersAllocation sent_allocation;
|
||||||
@ -954,7 +967,8 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) {
|
|||||||
EXPECT_THAT(sent_allocation.active_spatial_layers, ElementsAre(layer));
|
EXPECT_THAT(sent_allocation.active_spatial_layers, ElementsAre(layer));
|
||||||
|
|
||||||
// Next key frame also have the allocation.
|
// Next key frame also have the allocation.
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_TRUE(
|
EXPECT_TRUE(
|
||||||
transport_.last_sent_packet()
|
transport_.last_sent_packet()
|
||||||
@ -981,21 +995,24 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
|
|
||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_TRUE(transport_.last_sent_packet()
|
EXPECT_TRUE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
|
|
||||||
// No allocation sent on delta frame unless it has been updated.
|
// No allocation sent on delta frame unless it has been updated.
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_FALSE(transport_.last_sent_packet()
|
EXPECT_FALSE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
|
|
||||||
// Update the allocation.
|
// Update the allocation.
|
||||||
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
VideoLayersAllocation sent_allocation;
|
VideoLayersAllocation sent_allocation;
|
||||||
@ -1030,7 +1047,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
|
|
||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
ASSERT_TRUE(transport_.last_sent_packet()
|
ASSERT_TRUE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
@ -1044,7 +1062,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
allocation.active_spatial_layers.push_back(layer);
|
allocation.active_spatial_layers.push_back(layer);
|
||||||
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
VideoLayersAllocation sent_allocation;
|
VideoLayersAllocation sent_allocation;
|
||||||
@ -1077,7 +1096,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
|
|
||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
ASSERT_TRUE(transport_.last_sent_packet()
|
ASSERT_TRUE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
@ -1086,7 +1106,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
allocation.active_spatial_layers[0].frame_rate_fps = 20;
|
allocation.active_spatial_layers[0].frame_rate_fps = 20;
|
||||||
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
VideoLayersAllocation sent_allocation;
|
VideoLayersAllocation sent_allocation;
|
||||||
@ -1119,7 +1140,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
|
|
||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
ASSERT_TRUE(transport_.last_sent_packet()
|
ASSERT_TRUE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
@ -1128,7 +1150,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
allocation.active_spatial_layers[0].frame_rate_fps = 9;
|
allocation.active_spatial_layers[0].frame_rate_fps = 9;
|
||||||
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
VideoLayersAllocation sent_allocation;
|
VideoLayersAllocation sent_allocation;
|
||||||
@ -1156,7 +1179,8 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) {
|
|||||||
|
|
||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
VideoLayersAllocation sent_allocation;
|
VideoLayersAllocation sent_allocation;
|
||||||
@ -1166,14 +1190,16 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) {
|
|||||||
EXPECT_THAT(sent_allocation.active_spatial_layers, SizeIs(1));
|
EXPECT_THAT(sent_allocation.active_spatial_layers, SizeIs(1));
|
||||||
|
|
||||||
// VideoLayersAllocation not sent on the next delta frame.
|
// VideoLayersAllocation not sent on the next delta frame.
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_FALSE(transport_.last_sent_packet()
|
EXPECT_FALSE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
|
|
||||||
// Update allocation. VideoLayesAllocation should be sent on the next frame.
|
// Update allocation. VideoLayesAllocation should be sent on the next frame.
|
||||||
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
rtp_sender_video_->SetVideoLayersAllocation(allocation);
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_TRUE(
|
EXPECT_TRUE(
|
||||||
transport_.last_sent_packet()
|
transport_.last_sent_packet()
|
||||||
@ -1203,14 +1229,16 @@ TEST_F(RtpSenderVideoTest, VideoLayersAllocationNotSentOnHigherTemporalLayers) {
|
|||||||
auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
|
auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||||
vp8_header.temporalIdx = 1;
|
vp8_header.temporalIdx = 1;
|
||||||
|
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_FALSE(transport_.last_sent_packet()
|
EXPECT_FALSE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
|
|
||||||
// Send a delta frame on tl0.
|
// Send a delta frame on tl0.
|
||||||
vp8_header.temporalIdx = 0;
|
vp8_header.temporalIdx = 0;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_TRUE(transport_.last_sent_packet()
|
EXPECT_TRUE(transport_.last_sent_packet()
|
||||||
.HasExtension<RtpVideoLayersAllocationExtension>());
|
.HasExtension<RtpVideoLayersAllocationExtension>());
|
||||||
@ -1225,8 +1253,8 @@ TEST_F(RtpSenderVideoTest,
|
|||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
|
||||||
/*capture_time_ms=*/0, kFrame, hdr,
|
/*capture_time_ms=*/0, kFrame, sizeof(kFrame),
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
hdr, kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
// No absolute capture time should be set as the capture_time_ms was the
|
// No absolute capture time should be set as the capture_time_ms was the
|
||||||
// default value.
|
// default value.
|
||||||
for (const RtpPacketReceived& packet : transport_.sent_packets()) {
|
for (const RtpPacketReceived& packet : transport_.sent_packets()) {
|
||||||
@ -1245,9 +1273,9 @@ TEST_F(RtpSenderVideoTest, AbsoluteCaptureTime) {
|
|||||||
|
|
||||||
RTPVideoHeader hdr;
|
RTPVideoHeader hdr;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
|
rtp_sender_video_->SendVideo(
|
||||||
kAbsoluteCaptureTimestampMs, kFrame, hdr,
|
kPayload, kType, kTimestamp, kAbsoluteCaptureTimestampMs, kFrame,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
sizeof(kFrame), hdr, kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
absl::optional<AbsoluteCaptureTime> absolute_capture_time;
|
absl::optional<AbsoluteCaptureTime> absolute_capture_time;
|
||||||
|
|
||||||
@ -1284,8 +1312,8 @@ TEST_F(RtpSenderVideoTest, AbsoluteCaptureTimeWithExtensionProvided) {
|
|||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
hdr.absolute_capture_time = kAbsoluteCaptureTime;
|
hdr.absolute_capture_time = kAbsoluteCaptureTime;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
|
||||||
/*capture_time_ms=*/789, kFrame, hdr,
|
/*capture_time_ms=*/789, kFrame, sizeof(kFrame),
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
hdr, kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
|
|
||||||
absl::optional<AbsoluteCaptureTime> absolute_capture_time;
|
absl::optional<AbsoluteCaptureTime> absolute_capture_time;
|
||||||
|
|
||||||
@ -1319,7 +1347,8 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) {
|
|||||||
auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
|
auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||||
vp8_header.temporalIdx = 0;
|
vp8_header.temporalIdx = 0;
|
||||||
|
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_FALSE(
|
EXPECT_FALSE(
|
||||||
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
|
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
|
||||||
@ -1328,7 +1357,8 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) {
|
|||||||
hdr.playout_delay = kExpectedDelay;
|
hdr.playout_delay = kExpectedDelay;
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
vp8_header.temporalIdx = 1;
|
vp8_header.temporalIdx = 1;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
VideoPlayoutDelay received_delay = VideoPlayoutDelay();
|
VideoPlayoutDelay received_delay = VideoPlayoutDelay();
|
||||||
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
|
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
|
||||||
@ -1339,7 +1369,8 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) {
|
|||||||
// be populated since dilvery wasn't guaranteed on the last one.
|
// be populated since dilvery wasn't guaranteed on the last one.
|
||||||
hdr.playout_delay = VideoPlayoutDelay(); // Indicates "no change".
|
hdr.playout_delay = VideoPlayoutDelay(); // Indicates "no change".
|
||||||
vp8_header.temporalIdx = 0;
|
vp8_header.temporalIdx = 0;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
|
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
|
||||||
&received_delay));
|
&received_delay));
|
||||||
@ -1347,14 +1378,16 @@ TEST_F(RtpSenderVideoTest, PopulatesPlayoutDelay) {
|
|||||||
|
|
||||||
// The next frame does not need the extensions since it's delivery has
|
// The next frame does not need the extensions since it's delivery has
|
||||||
// already been guaranteed.
|
// already been guaranteed.
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
EXPECT_FALSE(
|
EXPECT_FALSE(
|
||||||
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
|
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
|
||||||
|
|
||||||
// Insert key-frame, we need to refresh the state here.
|
// Insert key-frame, we need to refresh the state here.
|
||||||
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
hdr.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
|
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame,
|
||||||
|
sizeof(kFrame), hdr,
|
||||||
kDefaultExpectedRetransmissionTimeMs, {});
|
kDefaultExpectedRetransmissionTimeMs, {});
|
||||||
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
|
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
|
||||||
&received_delay));
|
&received_delay));
|
||||||
@ -1370,8 +1403,8 @@ TEST_F(RtpSenderVideoTest, SendGenericVideo) {
|
|||||||
RTPVideoHeader video_header;
|
RTPVideoHeader video_header;
|
||||||
video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321,
|
ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321,
|
||||||
kPayload, video_header,
|
kPayload, sizeof(kPayload),
|
||||||
absl::nullopt, {}));
|
video_header, absl::nullopt, {}));
|
||||||
|
|
||||||
rtc::ArrayView<const uint8_t> sent_payload =
|
rtc::ArrayView<const uint8_t> sent_payload =
|
||||||
transport_.last_sent_packet().payload();
|
transport_.last_sent_packet().payload();
|
||||||
@ -1384,8 +1417,8 @@ TEST_F(RtpSenderVideoTest, SendGenericVideo) {
|
|||||||
const uint8_t kDeltaPayload[] = {13, 42, 32, 93, 13};
|
const uint8_t kDeltaPayload[] = {13, 42, 32, 93, 13};
|
||||||
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||||
ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321,
|
ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321,
|
||||||
kDeltaPayload, video_header,
|
kDeltaPayload, sizeof(kDeltaPayload),
|
||||||
absl::nullopt, {}));
|
video_header, absl::nullopt, {}));
|
||||||
|
|
||||||
sent_payload = sent_payload = transport_.last_sent_packet().payload();
|
sent_payload = sent_payload = transport_.last_sent_packet().payload();
|
||||||
generic_header = sent_payload[0];
|
generic_header = sent_payload[0];
|
||||||
@ -1402,8 +1435,8 @@ TEST_F(RtpSenderVideoTest, SendRawVideo) {
|
|||||||
RTPVideoHeader video_header;
|
RTPVideoHeader video_header;
|
||||||
video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, absl::nullopt, 1234,
|
ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, absl::nullopt, 1234,
|
||||||
4321, kPayload, video_header,
|
4321, kPayload, sizeof(kPayload),
|
||||||
absl::nullopt, {}));
|
video_header, absl::nullopt, {}));
|
||||||
|
|
||||||
rtc::ArrayView<const uint8_t> sent_payload =
|
rtc::ArrayView<const uint8_t> sent_payload =
|
||||||
transport_.last_sent_packet().payload();
|
transport_.last_sent_packet().payload();
|
||||||
@ -1553,6 +1586,43 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest, OnTransformedFrameSendsVideo) {
|
|||||||
EXPECT_EQ(transport_.packets_sent(), 2);
|
EXPECT_EQ(transport_.packets_sent(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(RtpSenderVideoWithFrameTransformerTest,
|
||||||
|
TransformOverheadCorrectlyAccountedFor) {
|
||||||
|
auto mock_frame_transformer =
|
||||||
|
rtc::make_ref_counted<NiceMock<MockFrameTransformer>>();
|
||||||
|
rtc::scoped_refptr<TransformedFrameCallback> callback;
|
||||||
|
EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameSinkCallback)
|
||||||
|
.WillOnce(SaveArg<0>(&callback));
|
||||||
|
std::unique_ptr<RTPSenderVideo> rtp_sender_video =
|
||||||
|
CreateSenderWithFrameTransformer(mock_frame_transformer);
|
||||||
|
ASSERT_TRUE(callback);
|
||||||
|
|
||||||
|
auto encoded_image = CreateDefaultEncodedImage();
|
||||||
|
RTPVideoHeader video_header;
|
||||||
|
video_header.frame_type = VideoFrameType::kVideoFrameKey;
|
||||||
|
ON_CALL(*mock_frame_transformer, Transform)
|
||||||
|
.WillByDefault(
|
||||||
|
[&callback](std::unique_ptr<TransformableFrameInterface> frame) {
|
||||||
|
const uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8,
|
||||||
|
9, 10, 11, 12, 13, 14, 15, 16};
|
||||||
|
frame->SetData(data);
|
||||||
|
callback->OnTransformedFrame(std::move(frame));
|
||||||
|
});
|
||||||
|
auto encoder_queue = time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
|
||||||
|
"encoder_queue", TaskQueueFactory::Priority::NORMAL);
|
||||||
|
const int kFramesPerSecond = 25;
|
||||||
|
for (int i = 0; i < kFramesPerSecond; ++i) {
|
||||||
|
encoder_queue->PostTask([&] {
|
||||||
|
rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp,
|
||||||
|
*encoded_image, video_header,
|
||||||
|
kDefaultExpectedRetransmissionTimeMs);
|
||||||
|
});
|
||||||
|
time_controller_.AdvanceTime(TimeDelta::Millis(1000 / kFramesPerSecond));
|
||||||
|
}
|
||||||
|
EXPECT_EQ(transport_.packets_sent(), kFramesPerSecond);
|
||||||
|
EXPECT_GT(rtp_sender_video->PostEncodeOverhead().bps(), 2200);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(RtpSenderVideoWithFrameTransformerTest,
|
TEST_F(RtpSenderVideoWithFrameTransformerTest,
|
||||||
TransformableFrameMetadataHasCorrectValue) {
|
TransformableFrameMetadataHasCorrectValue) {
|
||||||
auto mock_frame_transformer =
|
auto mock_frame_transformer =
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user