Stash and retry packets that are waiting for the dependency descriptor template structure.

Bug: b/317178411
Change-Id: Idf4d0eb9740753ba587ec81c1071cb25fb42c36d
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/334646
Auto-Submit: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Reviewed-by: Per Kjellander <perkj@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#41554}
This commit is contained in:
philipel 2024-01-17 19:26:58 +01:00 committed by WebRTC LUCI CQ
parent 787c8f8845
commit 7aff4d1a40
6 changed files with 245 additions and 63 deletions

View File

@ -78,6 +78,27 @@ struct FrameDependencyStructure {
std::vector<FrameDependencyTemplate> templates;
};
class DependencyDescriptorMandatory {
public:
void set_frame_number(int frame_number) { frame_number_ = frame_number; }
int frame_number() const { return frame_number_; }
void set_template_id(int template_id) { template_id_ = template_id; }
int template_id() const { return template_id_; }
void set_first_packet_in_frame(bool first) { first_packet_in_frame_ = first; }
bool first_packet_in_frame() const { return first_packet_in_frame_; }
void set_last_packet_in_frame(bool last) { last_packet_in_frame_ = last; }
bool last_packet_in_frame() const { return last_packet_in_frame_; }
private:
int frame_number_;
int template_id_;
bool first_packet_in_frame_;
bool last_packet_in_frame_;
};
struct DependencyDescriptor {
static constexpr int kMaxSpatialIds = 4;
static constexpr int kMaxTemporalIds = 8;

View File

@ -52,4 +52,17 @@ bool RtpDependencyDescriptorExtension::Write(
return writer.Write();
}
bool RtpDependencyDescriptorExtensionMandatory::Parse(
rtc::ArrayView<const uint8_t> data,
DependencyDescriptorMandatory* descriptor) {
if (data.size() < 3) {
return false;
}
descriptor->set_first_packet_in_frame(data[0] & 0b1000'0000);
descriptor->set_last_packet_in_frame(data[0] & 0b0100'0000);
descriptor->set_template_id(data[0] & 0b0011'1111);
descriptor->set_frame_number((uint16_t{data[1]} << 8) | data[2]);
return true;
}
} // namespace webrtc

View File

@ -54,6 +54,16 @@ class RtpDependencyDescriptorExtension {
static constexpr std::bitset<32> kAllChainsAreActive = ~uint32_t{0};
};
// Trait to only read the mandatory part of the descriptor.
class RtpDependencyDescriptorExtensionMandatory {
public:
static constexpr webrtc::RTPExtensionType kId =
webrtc::RtpDependencyDescriptorExtension::kId;
static bool Parse(rtc::ArrayView<const uint8_t> data,
DependencyDescriptorMandatory* descriptor);
};
} // namespace webrtc
#endif // MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_EXTENSION_H_

View File

@ -428,23 +428,21 @@ RtpVideoStreamReceiver2::ParseGenericDependenciesExtension(
const RtpPacketReceived& rtp_packet,
RTPVideoHeader* video_header) {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
if (rtp_packet.HasExtension<RtpDependencyDescriptorExtension>()) {
webrtc::DependencyDescriptor dependency_descriptor;
if (DependencyDescriptorMandatory dd_mandatory;
rtp_packet.GetExtension<RtpDependencyDescriptorExtensionMandatory>(
&dd_mandatory)) {
const int64_t frame_id =
frame_id_unwrapper_.Unwrap(dd_mandatory.frame_number());
DependencyDescriptor dependency_descriptor;
if (!rtp_packet.GetExtension<RtpDependencyDescriptorExtension>(
video_structure_.get(), &dependency_descriptor)) {
// Descriptor is there, but failed to parse. Either it is invalid,
// or too old packet (after relevant video_structure_ changed),
// or too new packet (before relevant video_structure_ arrived).
// Drop such packet to be on the safe side.
// TODO(bugs.webrtc.org/10342): Stash too new packet.
Timestamp now = clock_->CurrentTime();
if (now - last_logged_failed_to_parse_dd_ > TimeDelta::Seconds(1)) {
last_logged_failed_to_parse_dd_ = now;
RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
<< " Failed to parse dependency descriptor.";
if (!video_structure_frame_id_ || frame_id < video_structure_frame_id_) {
return kDropPacket;
} else {
return kStashPacket;
}
return kDropPacket;
}
if (dependency_descriptor.attached_structure != nullptr &&
!dependency_descriptor.first_packet_in_frame) {
RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
@ -457,8 +455,6 @@ RtpVideoStreamReceiver2::ParseGenericDependenciesExtension(
video_header->is_last_packet_in_frame =
dependency_descriptor.last_packet_in_frame;
int64_t frame_id =
frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number);
auto& generic_descriptor_info = video_header->generic.emplace();
generic_descriptor_info.frame_id = frame_id;
generic_descriptor_info.spatial_index =
@ -533,10 +529,11 @@ RtpVideoStreamReceiver2::ParseGenericDependenciesExtension(
return kHasGenericDescriptor;
}
void RtpVideoStreamReceiver2::OnReceivedPayloadData(
bool RtpVideoStreamReceiver2::OnReceivedPayloadData(
rtc::CopyOnWriteBuffer codec_payload,
const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video) {
const RTPVideoHeader& video,
int times_nacked) {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
auto packet =
@ -589,16 +586,23 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData(
video_header.playout_delay = rtp_packet.GetExtension<PlayoutDelayLimits>();
}
ParseGenericDependenciesResult generic_descriptor_state =
ParseGenericDependenciesExtension(rtp_packet, &video_header);
if (!rtp_packet.recovered()) {
UpdatePacketReceiveTimestamps(
rtp_packet, video_header.frame_type == VideoFrameType::kVideoFrameKey);
}
if (generic_descriptor_state == kDropPacket) {
ParseGenericDependenciesResult generic_descriptor_state =
ParseGenericDependenciesExtension(rtp_packet, &video_header);
if (generic_descriptor_state == kStashPacket) {
return true;
} else if (generic_descriptor_state == kDropPacket) {
Timestamp now = clock_->CurrentTime();
if (now - last_logged_failed_to_parse_dd_ > TimeDelta::Seconds(1)) {
last_logged_failed_to_parse_dd_ = now;
RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
<< " Failed to parse dependency descriptor.";
}
if (video_structure_ == nullptr &&
next_keyframe_request_for_missing_video_structure_ < now) {
// No video structure received yet, most likely part of the initial
@ -607,7 +611,7 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData(
next_keyframe_request_for_missing_video_structure_ =
now + TimeDelta::Seconds(1);
}
return;
return false;
}
// Color space should only be transmitted in the last packet of a frame,
@ -653,17 +657,12 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData(
}
}
if (nack_module_) {
packet->times_nacked = nack_module_->OnReceivedPacket(
rtp_packet.SequenceNumber(), rtp_packet.recovered());
} else {
packet->times_nacked = -1;
}
packet->times_nacked = times_nacked;
if (codec_payload.size() == 0) {
NotifyReceiverOfEmptyPacket(packet->seq_num);
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
return;
return false;
}
if (packet->codec() == kVideoCodecH264) {
@ -686,7 +685,7 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData(
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
[[fallthrough]];
case video_coding::H264SpsPpsTracker::kDrop:
return;
return false;
case video_coding::H264SpsPpsTracker::kInsert:
packet->video_payload = std::move(fixed.bitstream);
break;
@ -699,6 +698,7 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData(
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
frame_counter_.Add(packet->timestamp);
OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
return false;
}
void RtpVideoStreamReceiver2::OnRecoveredPacket(
@ -1085,15 +1085,51 @@ void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) {
if (type_it == payload_type_map_.end()) {
return;
}
absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
type_it->second->Parse(packet.PayloadBuffer());
if (parsed_payload == absl::nullopt) {
RTC_LOG(LS_WARNING) << "Failed parsing payload.";
return;
}
OnReceivedPayloadData(std::move(parsed_payload->video_payload), packet,
parsed_payload->video_header);
auto parse_and_insert = [&](const RtpPacketReceived& packet) {
RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
type_it->second->Parse(packet.PayloadBuffer());
if (parsed_payload == absl::nullopt) {
RTC_LOG(LS_WARNING) << "Failed parsing payload.";
return false;
}
int times_nacked = nack_module_
? nack_module_->OnReceivedPacket(
packet.SequenceNumber(), packet.recovered())
: -1;
return OnReceivedPayloadData(std::move(parsed_payload->video_payload),
packet, parsed_payload->video_header,
times_nacked);
};
// When the dependency descriptor is used and the descriptor fail to parse
// then `OnReceivedPayloadData` may return true to signal the the packet
// should be retried at a later stage, which is why they are stashed here.
//
// TODO(bugs.webrtc.org/15782):
// This is an ugly solution. The way things should work is for the
// `RtpFrameReferenceFinder` to stash assembled frames until the keyframe with
// the relevant template structure has been received, but unfortunately the
// `frame_transformer_delegate_` is called before the frames are inserted into
// the `RtpFrameReferenceFinder`, and it expects the dependency descriptor to
// be parsed at that stage.
if (parse_and_insert(packet)) {
if (stashed_packets_.size() == 100) {
stashed_packets_.clear();
}
stashed_packets_.push_back(packet);
} else {
for (auto it = stashed_packets_.begin(); it != stashed_packets_.end();) {
if (parse_and_insert(*it)) {
++it; // keep in the stash.
} else {
it = stashed_packets_.erase(it);
}
}
}
}
void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader(

View File

@ -133,9 +133,11 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
void OnRtpPacket(const RtpPacketReceived& packet) override;
// Public only for tests.
void OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,
// Returns true if the packet should be stashed and retried at a later stage.
bool OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,
const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video);
const RTPVideoHeader& video,
int times_nacked);
// Implements RecoveredPacketReceiver.
void OnRecoveredPacket(const RtpPacketReceived& packet) override;
@ -279,6 +281,7 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
RTC_GUARDED_BY(packet_sequence_checker_);
};
enum ParseGenericDependenciesResult {
kStashPacket,
kDropPacket,
kHasGenericDescriptor,
kNoGenericDescriptor
@ -430,6 +433,8 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender,
RTC_GUARDED_BY(packet_sequence_checker_);
std::map<int64_t, RtpPacketInfo> packet_infos_
RTC_GUARDED_BY(packet_sequence_checker_);
std::vector<RtpPacketReceived> stashed_packets_
RTC_GUARDED_BY(packet_sequence_checker_);
Timestamp next_keyframe_request_for_missing_video_structure_ =
Timestamp::MinusInfinity();

View File

@ -368,7 +368,7 @@ TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrame) {
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
}
TEST_F(RtpVideoStreamReceiver2Test, SetProtectionPayloadTypes) {
@ -407,7 +407,7 @@ TEST_F(RtpVideoStreamReceiver2Test, PacketInfoIsPropagatedIntoVideoFrames) {
ElementsAre(kAbsoluteCaptureTimestamp));
}));
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
}
TEST_F(RtpVideoStreamReceiver2Test,
@ -436,7 +436,7 @@ TEST_F(RtpVideoStreamReceiver2Test,
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
// Rtp packet without absolute capture time.
rtp_packet = RtpPacketReceived(&extension_map);
@ -453,7 +453,7 @@ TEST_F(RtpVideoStreamReceiver2Test,
EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1));
}));
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
}
TEST_F(RtpVideoStreamReceiver2Test,
@ -508,7 +508,7 @@ TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrameBitstreamError) {
EXPECT_CALL(mock_on_complete_frame_callback_,
DoOnCompleteFrameFailBitstream(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
}
class RtpVideoStreamReceiver2TestH264
@ -536,7 +536,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, InBandSpsPps) {
mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
sps_data.size());
rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
sps_video_header);
sps_video_header, 0);
rtc::CopyOnWriteBuffer pps_data;
RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
@ -549,7 +549,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, InBandSpsPps) {
mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
pps_data.size());
rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
pps_video_header);
pps_video_header, 0);
rtc::CopyOnWriteBuffer idr_data;
RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
@ -566,7 +566,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, InBandSpsPps) {
idr_data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
idr_video_header);
idr_video_header, 0);
}
TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
@ -607,7 +607,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
}
TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
@ -633,7 +633,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
sps_data.size());
rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
sps_video_header);
sps_video_header, 0);
rtc::CopyOnWriteBuffer pps_data;
RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
@ -646,7 +646,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
pps_data.size());
rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
pps_video_header);
pps_video_header, 0);
rtc::CopyOnWriteBuffer idr_data;
RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
@ -665,7 +665,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_TRUE(frame->is_keyframe()); });
rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
idr_video_header);
idr_video_header, 0);
mock_on_complete_frame_callback_.ClearExpectedBitstream();
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
@ -676,7 +676,7 @@ TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_FALSE(frame->is_keyframe()); });
rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
idr_video_header);
idr_video_header, 0);
}
TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) {
@ -694,26 +694,26 @@ TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) {
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
rtp_packet.SetSequenceNumber(3);
rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
video_header);
video_header, 0);
rtp_packet.SetSequenceNumber(4);
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
rtp_packet.SetSequenceNumber(6);
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_packet.SetSequenceNumber(5);
rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
video_header);
video_header, 0);
}
TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeIfFirstFrameIsDelta) {
@ -725,7 +725,7 @@ TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeIfFirstFrameIsDelta) {
GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
}
@ -744,12 +744,12 @@ TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeWhenPacketBufferGetsFull) {
while (rtp_packet.SequenceNumber() - start_sequence_number <
kPacketBufferMaxSize) {
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
}
rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
video_header);
video_header, 0);
EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(1));
}
@ -1144,6 +1144,103 @@ TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
EXPECT_THAT(rtcp_packet_parser_.pli()->num_packets(), Eq(2));
}
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
RetryStashedPacketsAfterReceivingScalabilityStructure) {
FrameDependencyStructure stream_structure1 = CreateStreamStructure();
FrameDependencyStructure stream_structure2 = CreateStreamStructure();
// Make sure template ids for these two structures do not collide:
// adjust structure_id (that is also used as template id offset).
stream_structure1.structure_id = 13;
stream_structure2.structure_id =
stream_structure1.structure_id + stream_structure1.templates.size();
DependencyDescriptor keyframe1_descriptor;
keyframe1_descriptor.attached_structure =
std::make_unique<FrameDependencyStructure>(stream_structure1);
keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
keyframe1_descriptor.frame_number = 1;
DependencyDescriptor keyframe2_descriptor;
keyframe2_descriptor.attached_structure =
std::make_unique<FrameDependencyStructure>(stream_structure2);
keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
keyframe2_descriptor.frame_number = 2;
DependencyDescriptor deltaframe_descriptor;
deltaframe_descriptor.frame_dependencies = stream_structure2.templates[1];
deltaframe_descriptor.frame_number = 3;
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 1); })
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 2); })
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 3); });
InjectPacketWith(stream_structure1, keyframe1_descriptor);
InjectPacketWith(stream_structure2, deltaframe_descriptor);
InjectPacketWith(stream_structure2, keyframe2_descriptor);
}
TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
RetryStashedPacketsAfterReceivingEarlierScalabilityStructure) {
FrameDependencyStructure stream_structure1 = CreateStreamStructure();
FrameDependencyStructure stream_structure2 = CreateStreamStructure();
FrameDependencyStructure stream_structure3 = CreateStreamStructure();
// Make sure template ids for these two structures do not collide:
// adjust structure_id (that is also used as template id offset).
stream_structure1.structure_id = 13;
stream_structure2.structure_id =
stream_structure1.structure_id + stream_structure1.templates.size();
stream_structure3.structure_id =
stream_structure2.structure_id + stream_structure2.templates.size();
DependencyDescriptor keyframe1_descriptor;
keyframe1_descriptor.attached_structure =
std::make_unique<FrameDependencyStructure>(stream_structure1);
keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
keyframe1_descriptor.frame_number = 1;
DependencyDescriptor keyframe2_descriptor;
keyframe2_descriptor.attached_structure =
std::make_unique<FrameDependencyStructure>(stream_structure2);
keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
keyframe2_descriptor.frame_number = 2;
DependencyDescriptor deltaframe2_descriptor;
deltaframe2_descriptor.frame_dependencies = stream_structure2.templates[1];
deltaframe2_descriptor.frame_number = 3;
DependencyDescriptor keyframe3_descriptor;
keyframe3_descriptor.attached_structure =
std::make_unique<FrameDependencyStructure>(stream_structure3);
keyframe3_descriptor.frame_dependencies = stream_structure3.templates[0];
keyframe3_descriptor.frame_number = 4;
DependencyDescriptor deltaframe3_descriptor;
deltaframe3_descriptor.frame_dependencies = stream_structure3.templates[1];
deltaframe3_descriptor.frame_number = 5;
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 1); })
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 2); })
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 3); })
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 4); })
.WillOnce(
[&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 5); });
InjectPacketWith(stream_structure1, keyframe1_descriptor);
InjectPacketWith(stream_structure2, deltaframe2_descriptor);
InjectPacketWith(stream_structure3, deltaframe3_descriptor);
InjectPacketWith(stream_structure2, keyframe2_descriptor);
InjectPacketWith(stream_structure3, keyframe3_descriptor);
}
TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) {
rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
rtc::make_ref_counted<testing::NiceMock<MockFrameTransformer>>();
@ -1166,7 +1263,7 @@ TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) {
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
EXPECT_CALL(*mock_frame_transformer, Transform(_));
receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
receiver->OnReceivedPayloadData(data, rtp_packet, video_header, 0);
EXPECT_CALL(*mock_frame_transformer,
UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
@ -1233,7 +1330,7 @@ TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay, PlayoutDelay) {
EXPECT_EQ(frame->EncodedImage().PlayoutDelay(), expected_playout_delay);
}));
rtp_video_stream_receiver_->OnReceivedPayloadData(
received_packet.PayloadBuffer(), received_packet, video_header);
received_packet.PayloadBuffer(), received_packet, video_header, 0);
}
} // namespace webrtc