Move frame_type member from RtpDepacketizer::ParsedPayload to RTPVideoHeader

The latter is also a member of the former. This cleanup is also
a preparation for dropping WebRtcRTPHeader::frameType (or deleting
WebRtcRTPHeader right away), now that it's a video-specific member.


Tbr: kwiberg@webrtc.org # Comment change in modules/include/
Bug: None
Change-Id: I5c1f3f981f0d750713fc9b9b145278150fe32b5d
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/133024
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Åsa Persson <asapersson@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27740}
This commit is contained in:
Niels Möller 2019-04-24 09:41:16 +02:00 committed by Commit Bot
parent a1e4fbb253
commit abbc50e9b2
31 changed files with 200 additions and 187 deletions

View File

@ -31,6 +31,7 @@ struct WebRtcRTPHeader {
RTPHeader header;
// Used for video only.
// TODO(nisse): Delete, now included on RTPVideoHeader.
VideoFrameType frameType;
// NTP time of the capture time in local timebase in milliseconds.
int64_t ntp_time_ms;

View File

@ -284,6 +284,7 @@ rtc_source_set("rtp_video_header") {
deps = [
"../../:webrtc_common",
"../../api/video:video_frame",
"../../api/video:video_frame_type",
"../../modules/video_coding:codec_globals_headers",
"//third_party/abseil-cpp/absl/container:inlined_vector",
"//third_party/abseil-cpp/absl/types:optional",

View File

@ -70,14 +70,13 @@ class RtpDepacketizer {
// TODO(bugs.webrtc.org/10397): These are temporary accessors, to enable
// move of the frame_type member to inside RTPVideoHeader, without breaking
// downstream code.
VideoFrameType FrameType() const { return frame_type; }
void SetFrameType(VideoFrameType type) { frame_type = type; }
VideoFrameType FrameType() const { return video_header().frame_type; }
void SetFrameType(VideoFrameType type) { video_header().frame_type = type; }
RTPVideoHeader video;
const uint8_t* payload;
size_t payload_length;
VideoFrameType frame_type;
};
static RtpDepacketizer* Create(VideoCodecType type);

View File

@ -435,7 +435,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
nalu_start_offsets.push_back(0);
}
h264_header.nalu_type = nal_type;
parsed_payload->frame_type = VideoFrameType::kVideoFrameDelta;
parsed_payload->video_header().frame_type = VideoFrameType::kVideoFrameDelta;
nalu_start_offsets.push_back(length_ + kLengthFieldSize); // End offset.
for (size_t i = 0; i < nalu_start_offsets.size() - 1; ++i) {
@ -505,7 +505,8 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
} else {
RTC_LOG(LS_WARNING) << "Failed to parse SPS id from SPS slice.";
}
parsed_payload->frame_type = VideoFrameType::kVideoFrameKey;
parsed_payload->video_header().frame_type =
VideoFrameType::kVideoFrameKey;
break;
}
case H264::NaluType::kPps: {
@ -523,7 +524,8 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
break;
}
case H264::NaluType::kIdr:
parsed_payload->frame_type = VideoFrameType::kVideoFrameKey;
parsed_payload->video_header().frame_type =
VideoFrameType::kVideoFrameKey;
RTC_FALLTHROUGH();
case H264::NaluType::kSlice: {
absl::optional<uint32_t> pps_id = PpsParser::ParsePpsIdFromSlice(
@ -598,9 +600,10 @@ bool RtpDepacketizerH264::ParseFuaNalu(
}
if (original_nal_type == H264::NaluType::kIdr) {
parsed_payload->frame_type = VideoFrameType::kVideoFrameKey;
parsed_payload->video_header().frame_type = VideoFrameType::kVideoFrameKey;
} else {
parsed_payload->frame_type = VideoFrameType::kVideoFrameDelta;
parsed_payload->video_header().frame_type =
VideoFrameType::kVideoFrameDelta;
}
parsed_payload->video_header().width = 0;
parsed_payload->video_header().height = 0;

View File

@ -608,7 +608,7 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264SingleNalu, payload.h264().packetization_type);
@ -623,7 +623,7 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264SingleNalu, payload.h264().packetization_type);
@ -652,7 +652,7 @@ TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
H264ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
const RTPVideoHeaderH264& h264 = payload.h264();
@ -683,7 +683,7 @@ TEST_F(RtpDepacketizerH264Test, TestStapANaluSpsWithResolution) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264StapA, payload.h264().packetization_type);
@ -810,7 +810,8 @@ TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet, sizeof(packet));
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264StapA, payload.h264().packetization_type);
@ -849,7 +850,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
// has been replaced by the original nal header.
ASSERT_TRUE(depacketizer_->Parse(&payload, packet1, sizeof(packet1)));
ExpectPacket(&payload, kExpected1, sizeof(kExpected1));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
const RTPVideoHeaderH264& h264 = payload.h264();
@ -865,7 +866,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
payload = H264ParsedPayload();
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
{
@ -879,7 +880,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
payload = H264ParsedPayload();
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
{
@ -936,7 +937,8 @@ TEST_F(RtpDepacketizerH264Test, TestSeiPacket) {
H264ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
const RTPVideoHeaderH264& h264 = payload.h264();
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
payload.video_header().frame_type);
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
EXPECT_EQ(kSei, h264.nalu_type);
ASSERT_EQ(1u, h264.nalus_length);

View File

@ -103,7 +103,7 @@ bool RtpDepacketizerGeneric::Parse(ParsedPayload* parsed_payload,
uint8_t generic_header = *payload_data++;
--payload_data_length;
parsed_payload->frame_type =
parsed_payload->video_header().frame_type =
((generic_header & RtpFormatVideoGeneric::kKeyFrameBit) != 0)
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;

View File

@ -130,7 +130,8 @@ int ParseVP8Extension(RTPVideoHeaderVP8* vp8,
int ParseVP8FrameSize(RtpDepacketizer::ParsedPayload* parsed_payload,
const uint8_t* data,
size_t data_length) {
if (parsed_payload->frame_type != VideoFrameType::kVideoFrameKey) {
if (parsed_payload->video_header().frame_type !=
VideoFrameType::kVideoFrameKey) {
// Included in payload header for I-frames.
return 0;
}
@ -356,11 +357,12 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
// Read P bit from payload header (only at beginning of first partition).
if (beginning_of_partition && partition_id == 0) {
parsed_payload->frame_type = (*payload_data & 0x01)
? VideoFrameType::kVideoFrameDelta
: VideoFrameType::kVideoFrameKey;
parsed_payload->video_header().frame_type =
(*payload_data & 0x01) ? VideoFrameType::kVideoFrameDelta
: VideoFrameType::kVideoFrameKey;
} else {
parsed_payload->frame_type = VideoFrameType::kVideoFrameDelta;
parsed_payload->video_header().frame_type =
VideoFrameType::kVideoFrameDelta;
}
if (ParseVP8FrameSize(parsed_payload, payload_data, payload_data_length) !=

View File

@ -198,7 +198,8 @@ TEST_F(RtpDepacketizerVp8Test, BasicHeader) {
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
VerifyBasicHeader(&payload.video_header(), 0, 1, 4);
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
@ -218,7 +219,8 @@ TEST_F(RtpDepacketizerVp8Test, PictureID) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet + kHeaderLength1,
sizeof(packet) - kHeaderLength1);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
VerifyBasicHeader(&payload.video_header(), 1, 0, 0);
VerifyExtensions(&payload.video_header(), kPictureId, kNoTl0PicIdx,
@ -249,7 +251,7 @@ TEST_F(RtpDepacketizerVp8Test, Tl0PicIdx) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
VerifyBasicHeader(&payload.video_header(), 0, 1, 0);
VerifyExtensions(&payload.video_header(), kNoPictureId, kTl0PicIdx,
@ -267,7 +269,8 @@ TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx, 2,
@ -289,7 +292,8 @@ TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
@ -310,7 +314,8 @@ TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
ExpectPacket(&payload, packet + kHeaderLength,
sizeof(packet) - kHeaderLength);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta,
payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
VerifyExtensions(&payload.video_header(), (17 << 8) + 17, 42, 1, 17);
@ -351,7 +356,7 @@ TEST_F(RtpDepacketizerVp8Test, TestWithPacketizer) {
depacketizer_->Parse(&payload, rtp_payload.data(), rtp_payload.size()));
auto vp8_payload = rtp_payload.subview(kHeaderLength);
ExpectPacket(&payload, vp8_payload.data(), vp8_payload.size());
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, payload.video_header().frame_type);
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
VerifyBasicHeader(&payload.video_header(), 1, 1, 0);
VerifyExtensions(&payload.video_header(), input_header.pictureId,

View File

@ -607,7 +607,7 @@ bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
parsed_payload->video_header().simulcastIdx = 0;
parsed_payload->video_header().codec = kVideoCodecVP9;
parsed_payload->frame_type =
parsed_payload->video_header().frame_type =
p_bit ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey;
auto& vp9_header = parsed_payload->video_header()

View File

@ -749,7 +749,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseFirstPacketInKeyFrame) {
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
EXPECT_EQ(VideoFrameType::kVideoFrameKey, parsed.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameKey, parsed.video_header().frame_type);
EXPECT_TRUE(parsed.video_header().is_first_packet_in_frame);
}
@ -759,7 +759,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, parsed.frame_type);
EXPECT_EQ(VideoFrameType::kVideoFrameDelta, parsed.video_header().frame_type);
EXPECT_FALSE(parsed.video_header().is_first_packet_in_frame);
}

View File

@ -19,6 +19,7 @@
#include "api/video/video_codec_type.h"
#include "api/video/video_content_type.h"
#include "api/video/video_frame_marking.h"
#include "api/video/video_frame_type.h"
#include "api/video/video_rotation.h"
#include "api/video/video_timing.h"
#include "common_types.h" // NOLINT(build/include)
@ -53,6 +54,7 @@ struct RTPVideoHeader {
absl::optional<GenericDescriptorInfo> generic;
VideoFrameType frame_type = VideoFrameType::kEmptyFrame;
uint16_t width = 0;
uint16_t height = 0;
VideoRotation rotation = VideoRotation::kVideoRotation_0;

View File

@ -37,7 +37,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet.video_header.is_first_packet_in_frame = true;
packet.timestamp = 1;
packet.seqNum = 0xffff;
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.codec = kVideoCodecVP8;
auto& vp8_header =
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
@ -49,12 +49,12 @@ TEST(TestDecodingState, FrameContinuity) {
// Always start with a key frame.
dec_state.Reset();
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_LE(0, frame_key.InsertPacket(packet, 0, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame_key));
dec_state.SetState(&frame);
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
// Use pictureId
packet.video_header.is_first_packet_in_frame = false;
vp8_header.pictureId = 0x0002;
@ -170,7 +170,7 @@ TEST(TestDecodingState, UpdateOldPacket) {
VCMPacket packet;
packet.timestamp = 1;
packet.seqNum = 1;
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -185,14 +185,14 @@ TEST(TestDecodingState, UpdateOldPacket) {
// Now insert empty packet belonging to the same frame.
packet.timestamp = 1;
packet.seqNum = 2;
packet.frameType = VideoFrameType::kEmptyFrame;
packet.video_header.frame_type = VideoFrameType::kEmptyFrame;
packet.sizeBytes = 0;
dec_state.UpdateOldPacket(&packet);
EXPECT_EQ(dec_state.sequence_num(), 2);
// Now insert delta packet belonging to the same frame.
packet.timestamp = 1;
packet.seqNum = 3;
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.sizeBytes = 1400;
dec_state.UpdateOldPacket(&packet);
EXPECT_EQ(dec_state.sequence_num(), 3);
@ -200,7 +200,7 @@ TEST(TestDecodingState, UpdateOldPacket) {
// sequence number.
packet.timestamp = 0;
packet.seqNum = 4;
packet.frameType = VideoFrameType::kEmptyFrame;
packet.video_header.frame_type = VideoFrameType::kEmptyFrame;
packet.sizeBytes = 0;
dec_state.UpdateOldPacket(&packet);
EXPECT_EQ(dec_state.sequence_num(), 3);
@ -214,7 +214,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
// tl0PicIdx 0, temporal id 0.
VCMFrameBuffer frame;
VCMPacket packet;
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
@ -265,7 +265,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
// Insert key frame - should update sync value.
// A key frame is always a base layer.
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet.video_header.is_first_packet_in_frame = true;
packet.timestamp = 5;
packet.seqNum = 5;
@ -279,7 +279,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
// After sync, a continuous PictureId is required
// (continuous base layer is not enough )
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.timestamp = 6;
packet.seqNum = 6;
vp8_header.tl0PicIdx = 3;
@ -289,7 +289,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.is_first_packet_in_frame = true;
packet.timestamp = 8;
packet.seqNum = 8;
@ -304,7 +304,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
// Insert a non-ref frame - should update sync value.
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.is_first_packet_in_frame = true;
packet.timestamp = 9;
packet.seqNum = 9;
@ -324,7 +324,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
// Base layer.
frame.Reset();
dec_state.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.is_first_packet_in_frame = true;
packet.markerBit = 1;
packet.timestamp = 0;
@ -338,7 +338,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
EXPECT_TRUE(dec_state.full_sync());
// Layer 2 - 2 packets (insert one, lose one).
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.is_first_packet_in_frame = true;
packet.markerBit = 0;
packet.timestamp = 1;
@ -351,7 +351,7 @@ TEST(TestDecodingState, MultiLayerBehavior) {
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Layer 1
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.is_first_packet_in_frame = true;
packet.markerBit = 1;
packet.timestamp = 2;
@ -370,7 +370,7 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
VCMFrameBuffer frame;
VCMPacket packet;
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
@ -389,7 +389,7 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
// Continuous sequence number but discontinuous picture id. This implies a
// a loss and we have to fall back to only decoding the base layer.
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.timestamp += 3000;
++packet.seqNum;
vp8_header.temporalIdx = 1;
@ -425,7 +425,7 @@ TEST(TestDecodingState, PictureIdRepeat) {
VCMDecodingState dec_state;
VCMFrameBuffer frame;
VCMPacket packet;
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
@ -478,7 +478,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
frame_data.rolling_average_packets_per_frame = -1;
// Key frame as first frame
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -492,7 +492,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
// Ref to 11, continuous
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_hdr.picture_id = 12;
vp9_hdr.num_ref_pics = 1;
vp9_hdr.pid_diff[0] = 1;
@ -522,14 +522,14 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
frame_data.rolling_average_packets_per_frame = -1;
// Key frame as first frame
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
// Ref to 10, continuous
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_hdr.picture_id = 15;
vp9_hdr.num_ref_pics = 1;
vp9_hdr.pid_diff[0] = 5;
@ -578,23 +578,23 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
frame_data.rolling_average_packets_per_frame = -1;
// Key frame as first frame
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Delta frame as first frame
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
// Key frame then delta frame
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
dec_state.SetState(&frame);
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_hdr.num_ref_pics = 1;
vp9_hdr.picture_id = 15;
vp9_hdr.pid_diff[0] = 5;
@ -638,7 +638,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
// Key Frame, continuous
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
vp9_hdr.num_ref_pics = 0;
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
@ -647,7 +647,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
// Frame at last index, ref to KF, continuous
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
vp9_hdr.num_ref_pics = 1;
vp9_hdr.pid_diff[0] = 1;
@ -683,7 +683,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
// Key frame, continuous
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
vp9_hdr.picture_id = 25;
vp9_hdr.num_ref_pics = 0;
EXPECT_LE(0, frame.InsertPacket(packet, 0, frame_data));
@ -692,7 +692,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
// Ref to KF, continuous
frame.Reset();
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_hdr.picture_id = 26;
vp9_hdr.num_ref_pics = 1;
vp9_hdr.pid_diff[0] = 1;

View File

@ -94,7 +94,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
// We only take the ntp timestamp of the first packet of a frame.
ntp_time_ms_ = packet.ntp_time_ms_;
_codec = packet.codec();
if (packet.frameType != VideoFrameType::kEmptyFrame) {
if (packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
// first media packet
SetState(kStateIncomplete);
}

View File

@ -38,7 +38,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
RTC_CHECK(first_packet);
// EncodedFrame members
frame_type_ = first_packet->frameType;
frame_type_ = first_packet->video_header.frame_type;
codec_type_ = first_packet->codec();
// TODO(philipel): Remove when encoded image is replaced by EncodedFrame.
@ -48,7 +48,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
_payloadType = first_packet->payloadType;
SetTimestamp(first_packet->timestamp);
ntp_time_ms_ = first_packet->ntp_time_ms_;
_frameType = first_packet->frameType;
_frameType = first_packet->video_header.frame_type;
// Setting frame's playout delays to the same values
// as of the first packet's.

View File

@ -413,7 +413,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
// Empty packets may bias the jitter estimate (lacking size component),
// therefore don't let empty packet trigger the following updates:
if (packet.frameType != VideoFrameType::kEmptyFrame) {
if (packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
if (waiting_for_completion_.timestamp == packet.timestamp) {
// This can get bad if we have a lot of duplicate packets,
// we will then count some packet multiple times.
@ -446,7 +446,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
frame->IncrementNackCount();
}
if (!UpdateNackList(packet.seqNum) &&
packet.frameType != VideoFrameType::kVideoFrameKey) {
packet.video_header.frame_type != VideoFrameType::kVideoFrameKey) {
buffer_state = kFlushIndicator;
}

View File

@ -65,8 +65,8 @@ class TestBasicJitterBuffer : public ::testing::Test {
rtp_header.markerBit = true;
video_header.codec = kVideoCodecGeneric;
video_header.is_first_packet_in_frame = true;
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_.reset(new VCMPacket(data_, size_, rtp_header, video_header,
VideoFrameType::kVideoFrameDelta,
/*ntp_time_ms=*/0));
}
@ -241,7 +241,7 @@ TEST_F(TestBasicJitterBuffer, StopRunning) {
TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
// Always start with a complete key frame when not allowing errors.
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->timestamp += 123 * 90;
@ -257,7 +257,7 @@ TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
}
TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
@ -284,7 +284,7 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
}
TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
@ -328,7 +328,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
// Always start with a complete key frame.
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
@ -342,7 +342,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
++seq_num_;
packet_->seqNum = seq_num_;
packet_->markerBit = false;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->timestamp += 33 * 90;
EXPECT_EQ(kIncomplete,
@ -385,7 +385,7 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
// Insert the "first" packet last.
seq_num_ += 100;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = false;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -430,7 +430,7 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
}
TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
@ -456,7 +456,7 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
seq_num_ -= 3;
timestamp_ -= 33 * 90;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
@ -491,7 +491,7 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
@ -505,7 +505,7 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
// Now send in a complete delta frame (Frame C), but with a sequence number
// gap. No pic index either, so no temporal scalability cheating :)
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
// Leave a gap of 2 sequence numbers and two frames.
packet_->seqNum = seq_num_ + 3;
packet_->timestamp = timestamp_ + (66 * 90);
@ -540,8 +540,9 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
rtp_header.timestamp = timestamp_ + (33 * 90);
rtp_header.markerBit = false;
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kEmptyFrame;
VCMPacket empty_packet(data_, 0, rtp_header, video_header,
VideoFrameType::kEmptyFrame, /*ntp_time_ms=*/0);
/*ntp_time_ms=*/0);
EXPECT_EQ(kOldPacket,
jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
empty_packet.seqNum += 1;
@ -555,7 +556,7 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
}
TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
@ -598,7 +599,7 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
}
TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -621,7 +622,7 @@ TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
for (uint16_t i = 1; i <= 3; ++i) {
packet_->seqNum = seq_num_ + i;
packet_->timestamp = timestamp_ + (i * 33) * 90;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_EQ(kCompleteSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_EQ(i + 1, jitter_buffer_->num_packets());
@ -674,7 +675,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->seqNum = 65485;
packet_->timestamp = 1000;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
vp9_header.picture_id = 5;
vp9_header.tl0_pic_idx = 200;
vp9_header.temporal_idx = 0;
@ -686,7 +687,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
// Insert next temporal layer 0.
packet_->seqNum = 65489;
packet_->timestamp = 13000;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_header.picture_id = 9;
vp9_header.tl0_pic_idx = 201;
vp9_header.temporal_idx = 0;
@ -729,7 +730,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_header.picture_id = 6;
vp9_header.temporal_idx = 2;
vp9_header.temporal_up_switch = true;
@ -737,7 +738,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->seqNum = 65487;
packet_->timestamp = 9000;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_header.picture_id = 7;
vp9_header.temporal_idx = 1;
vp9_header.temporal_up_switch = true;
@ -746,7 +747,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
// Insert first frame with SS data.
packet_->seqNum = 65485;
packet_->timestamp = 3000;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.width = 352;
packet_->video_header.height = 288;
vp9_header.picture_id = 5;
@ -806,7 +807,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->markerBit = false;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_header.spatial_idx = 0;
vp9_header.picture_id = 6;
vp9_header.temporal_idx = 1;
@ -816,7 +817,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->video_header.is_first_packet_in_frame = false;
packet_->markerBit = true;
packet_->seqNum = 65487;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
vp9_header.spatial_idx = 1;
vp9_header.picture_id = 6;
vp9_header.temporal_idx = 1;
@ -827,7 +828,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->markerBit = true;
packet_->seqNum = 65485;
packet_->timestamp = 3000;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
vp9_header.spatial_idx = 1;
vp9_header.picture_id = 5;
vp9_header.temporal_idx = 0;
@ -838,7 +839,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = 65484;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.width = 352;
packet_->video_header.height = 288;
vp9_header.spatial_idx = 0;
@ -867,7 +868,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
}
TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
@ -901,7 +902,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
auto& h264_header =
packet_->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
packet_->timestamp = timestamp_;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->video_header.codec = kVideoCodecH264;
@ -920,7 +921,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
packet_->timestamp = timestamp_;
++seq_num_;
packet_->seqNum = seq_num_;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->video_header.codec = kVideoCodecH264;
@ -938,7 +939,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
++seq_num_;
packet_->seqNum = seq_num_;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = false;
packet_->markerBit = true;
packet_->video_header.codec = kVideoCodecH264;
@ -959,7 +960,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
packet_->timestamp = timestamp_;
++seq_num_;
packet_->seqNum = seq_num_;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->video_header.codec = kVideoCodecH264;
@ -978,7 +979,7 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
seq_num_ = 0xfff0;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
@ -1028,7 +1029,7 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
// Insert "first" packet last seqnum.
seq_num_ = 10;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = false;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -1081,7 +1082,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
// t = 3000 t = 2000
seq_num_ = 2;
timestamp_ = 3000;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->timestamp = timestamp_;
@ -1099,7 +1100,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
seq_num_--;
timestamp_ = 2000;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -1116,7 +1117,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
seq_num_ = 2;
timestamp_ = 3000;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -1137,7 +1138,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
seq_num_--;
timestamp_ = 0xffffff00;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -1154,7 +1155,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
// t = 0xffffff00 t = 33*90
timestamp_ = 0xffffff00;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
@ -1181,7 +1182,7 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
seq_num_++;
timestamp_ += 33 * 90;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
@ -1214,7 +1215,7 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
// t = 0xffffff00 t = 2700
timestamp_ = 0xffffff00;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->timestamp = timestamp_;
@ -1227,7 +1228,7 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
// Insert next frame.
seq_num_++;
timestamp_ = 2700;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -1257,7 +1258,7 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
seq_num_ = 2;
timestamp_ = 2700;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -1270,7 +1271,7 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
// Insert second frame
seq_num_--;
timestamp_ = 0xffffff00;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->seqNum = seq_num_;
@ -1355,7 +1356,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
if (loop == 50) {
first_key_frame_timestamp = packet_->timestamp;
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
}
// Insert frame.
@ -1399,7 +1400,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
packet_->frameType = VideoFrameType::kEmptyFrame;
packet_->video_header.frame_type = VideoFrameType::kEmptyFrame;
EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
@ -1411,7 +1412,7 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
// timestamp.
// Start with a complete key frame - insert and decode.
jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
packet_->frameType = VideoFrameType::kVideoFrameKey;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet_->video_header.is_first_packet_in_frame = true;
packet_->markerBit = true;
bool retransmitted = false;
@ -1424,7 +1425,7 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
packet_->seqNum += 2;
packet_->timestamp += 33 * 90;
packet_->frameType = VideoFrameType::kVideoFrameDelta;
packet_->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_->video_header.is_first_packet_in_frame = false;
packet_->markerBit = false;

View File

@ -23,7 +23,6 @@ VCMPacket::VCMPacket()
sizeBytes(0),
markerBit(false),
timesNacked(-1),
frameType(VideoFrameType::kEmptyFrame),
completeNALU(kNaluUnset),
insertStartCode(false),
video_header(),
@ -35,7 +34,6 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
size_t size,
const RTPHeader& rtp_header,
const RTPVideoHeader& videoHeader,
VideoFrameType frame_type,
int64_t ntp_time_ms)
: payloadType(rtp_header.payloadType),
timestamp(rtp_header.timestamp),
@ -45,7 +43,6 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
sizeBytes(size),
markerBit(rtp_header.markerBit),
timesNacked(-1),
frameType(frame_type),
completeNALU(kNaluIncomplete),
insertStartCode(videoHeader.codec == kVideoCodecH264 &&
videoHeader.is_first_packet_in_frame),

View File

@ -39,7 +39,6 @@ class VCMPacket {
size_t size,
const RTPHeader& rtp_header,
const RTPVideoHeader& video_header,
VideoFrameType frame_type,
int64_t ntp_time_ms);
~VCMPacket();
@ -65,8 +64,6 @@ class VCMPacket {
bool markerBit;
int timesNacked;
VideoFrameType frameType;
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
bool insertStartCode; // True if a start code should be inserted before this
// packet.

View File

@ -125,7 +125,7 @@ bool PacketBuffer::InsertPacket(VCMPacket* packet) {
int64_t now_ms = clock_->TimeInMilliseconds();
last_received_packet_ms_ = now_ms;
if (packet->frameType == VideoFrameType::kVideoFrameKey)
if (packet->video_header.frame_type == VideoFrameType::kVideoFrameKey)
last_received_keyframe_packet_ms_ = now_ms;
found_frames = FindFrames(seq_num);
@ -377,10 +377,10 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
const size_t first_packet_index = start_seq_num % size_;
RTC_CHECK_LT(first_packet_index, size_);
if (is_h264_keyframe) {
data_buffer_[first_packet_index].frameType =
data_buffer_[first_packet_index].video_header.frame_type =
VideoFrameType::kVideoFrameKey;
} else {
data_buffer_[first_packet_index].frameType =
data_buffer_[first_packet_index].video_header.frame_type =
VideoFrameType::kVideoFrameDelta;
}

View File

@ -82,8 +82,9 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
VCMPacket packet;
packet.video_header.codec = kVideoCodecGeneric;
packet.seqNum = seq_num_start;
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = keyframe
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
ref_packet_buffer_->InsertPacket(&packet);
packet.seqNum = seq_num_end;
@ -107,8 +108,9 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
packet.seqNum = seq_num_start;
packet.video_header.is_last_packet_in_frame =
(seq_num_start == seq_num_end);
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = keyframe
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
auto& vp8_header =
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.pictureId = pid % (1 << 15);
@ -146,8 +148,9 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
packet.seqNum = seq_num_start;
packet.video_header.is_last_packet_in_frame =
(seq_num_start == seq_num_end);
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = keyframe
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
vp9_header.flexible_mode = false;
vp9_header.picture_id = pid % (1 << 15);
vp9_header.temporal_idx = tid;
@ -189,8 +192,9 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
packet.seqNum = seq_num_start;
packet.video_header.is_last_packet_in_frame =
(seq_num_start == seq_num_end);
packet.frameType = keyframe ? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = keyframe
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
vp9_header.inter_layer_predicted = inter;
vp9_header.flexible_mode = true;
vp9_header.picture_id = pid % (1 << 15);

View File

@ -426,7 +426,7 @@ bool VCMSessionInfo::HaveLastPacket() const {
int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
uint8_t* frame_buffer,
const FrameData& frame_data) {
if (packet.frameType == VideoFrameType::kEmptyFrame) {
if (packet.video_header.frame_type == VideoFrameType::kEmptyFrame) {
// Update sequence number of an empty packet.
// Only media packets are inserted into the packet list.
InformOfEmptyPacket(packet.seqNum);
@ -451,7 +451,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
return -2;
if (packet.codec() == kVideoCodecH264) {
frame_type_ = packet.frameType;
frame_type_ = packet.video_header.frame_type;
if (packet.is_first_packet_in_frame() &&
(first_packet_seq_num_ == -1 ||
IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum))) {
@ -470,7 +470,7 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
// Should only be set for one packet per session.
if (packet.is_first_packet_in_frame() && first_packet_seq_num_ == -1) {
// The first packet in a frame signals the frame type.
frame_type_ = packet.frameType;
frame_type_ = packet.video_header.frame_type;
// Store the sequence number for the first packet.
first_packet_seq_num_ = static_cast<int>(packet.seqNum);
} else if (first_packet_seq_num_ != -1 &&
@ -480,10 +480,10 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
"of frame boundaries";
return -3;
} else if (frame_type_ == VideoFrameType::kEmptyFrame &&
packet.frameType != VideoFrameType::kEmptyFrame) {
packet.video_header.frame_type != VideoFrameType::kEmptyFrame) {
// Update the frame type with the type of the first media packet.
// TODO(mikhal): Can this trigger?
frame_type_ = packet.frameType;
frame_type_ = packet.video_header.frame_type;
}
// Track the marker bit, should only be set for one packet per session.

View File

@ -23,7 +23,7 @@ class TestSessionInfo : public ::testing::Test {
memset(packet_buffer_, 0, sizeof(packet_buffer_));
memset(frame_buffer_, 0, sizeof(frame_buffer_));
session_.Reset();
packet_.frameType = VideoFrameType::kVideoFrameDelta;
packet_.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
packet_.sizeBytes = packet_buffer_size();
packet_.dataPtr = packet_buffer_;
packet_.seqNum = 0;
@ -116,7 +116,7 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
packet_.video_header.is_first_packet_in_frame = true;
packet_.seqNum = 0xFFFE;
packet_.sizeBytes = packet_buffer_size();
packet_.frameType = VideoFrameType::kVideoFrameKey;
packet_.video_header.frame_type = VideoFrameType::kVideoFrameKey;
FillPacket(0);
EXPECT_EQ(packet_buffer_size(), static_cast<size_t>(session_.InsertPacket(
packet_, frame_buffer_, frame_data)));
@ -138,7 +138,7 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
packet_.markerBit = true;
packet_.seqNum = 2;
packet_.sizeBytes = 0;
packet_.frameType = VideoFrameType::kEmptyFrame;
packet_.video_header.frame_type = VideoFrameType::kEmptyFrame;
EXPECT_EQ(0, session_.InsertPacket(packet_, frame_buffer_, frame_data));
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
}
@ -309,7 +309,7 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
packet_.video_header.is_first_packet_in_frame = false;
packet_.completeNALU = kNaluComplete;
packet_.frameType = VideoFrameType::kEmptyFrame;
packet_.video_header.frame_type = VideoFrameType::kEmptyFrame;
packet_.sizeBytes = 0;
packet_.seqNum = 0;
packet_.markerBit = false;

View File

@ -59,7 +59,7 @@ VCMPacket StreamGenerator::GeneratePacket(uint16_t sequence_number,
VCMPacket packet;
packet.seqNum = sequence_number;
packet.timestamp = timestamp;
packet.frameType = type;
packet.video_header.frame_type = type;
packet.video_header.is_first_packet_in_frame = first_packet;
packet.markerBit = marker_bit;
packet.sizeBytes = size;

View File

@ -65,8 +65,9 @@ class TestPacketBuffer : public ::testing::Test,
packet.video_header.codec = kVideoCodecGeneric;
packet.timestamp = timestamp;
packet.seqNum = seq_num;
packet.frameType = keyframe == kKeyFrame ? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = keyframe == kKeyFrame
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.video_header.is_first_packet_in_frame = first == kFirst;
packet.video_header.is_last_packet_in_frame = last == kLast;
packet.sizeBytes = data_size;
@ -163,7 +164,7 @@ TEST_F(TestPacketBuffer, NackCount) {
VCMPacket packet;
packet.video_header.codec = kVideoCodecGeneric;
packet.seqNum = seq_num;
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet.video_header.is_first_packet_in_frame = true;
packet.video_header.is_last_packet_in_frame = false;
packet.timesNacked = 0;
@ -788,7 +789,7 @@ TEST_F(TestPacketBuffer, IncomingCodecChange) {
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
packet.timestamp = 1;
packet.seqNum = 1;
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
packet.video_header.codec = kVideoCodecH264;
@ -803,7 +804,7 @@ TEST_F(TestPacketBuffer, IncomingCodecChange) {
packet.video_header.video_type_header.emplace<RTPVideoHeaderVP8>();
packet.timestamp = 2;
packet.seqNum = 2;
packet.frameType = VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
@ -815,7 +816,7 @@ TEST_F(TestPacketBuffer, TooManyNalusInPacket) {
packet.video_header.codec = kVideoCodecH264;
packet.timestamp = 1;
packet.seqNum = 1;
packet.frameType = VideoFrameType::kVideoFrameKey;
packet.video_header.frame_type = VideoFrameType::kVideoFrameKey;
packet.video_header.is_first_packet_in_frame = true;
packet.video_header.is_last_packet_in_frame = true;
auto& h264_header =

View File

@ -336,8 +336,7 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
payloadLength = 0;
}
const VCMPacket packet(incomingPayload, payloadLength, rtpInfo.header,
rtpInfo.video_header(), rtpInfo.frameType,
rtpInfo.ntp_time_ms);
rtpInfo.video_header(), rtpInfo.ntp_time_ms);
int32_t ret = _receiver.InsertPacket(packet);
// TODO(holmer): Investigate if this somehow should use the key frame

View File

@ -121,14 +121,14 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
header.video_header().video_type_header.emplace<RTPVideoHeaderVP8>();
// Insert one video frame to get one frame decoded.
header.frameType = VideoFrameType::kVideoFrameKey;
header.video_header().frame_type = VideoFrameType::kVideoFrameKey;
header.video_header().is_first_packet_in_frame = true;
header.header.markerBit = true;
InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header);
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
header.frameType = VideoFrameType::kEmptyFrame;
header.video_header().frame_type = VideoFrameType::kEmptyFrame;
header.video_header().is_first_packet_in_frame = false;
header.header.markerBit = false;
// Insert padding frames.
@ -171,9 +171,9 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
// Insert 2 video frames.
for (int j = 0; j < 2; ++j) {
if (i == 0 && j == 0) // First frame should be a key frame.
header.frameType = VideoFrameType::kVideoFrameKey;
header.video_header().frame_type = VideoFrameType::kVideoFrameKey;
else
header.frameType = VideoFrameType::kVideoFrameDelta;
header.video_header().frame_type = VideoFrameType::kVideoFrameDelta;
header.video_header().is_first_packet_in_frame = true;
header.header.markerBit = true;
InsertAndVerifyDecodableFrame(kPayload, kFrameSize, &header);
@ -182,7 +182,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
}
// Insert 2 padding only frames.
header.frameType = VideoFrameType::kEmptyFrame;
header.video_header().frame_type = VideoFrameType::kEmptyFrame;
header.video_header().is_first_packet_in_frame = false;
header.header.markerBit = false;
for (int j = 0; j < 2; ++j) {

View File

@ -96,8 +96,9 @@ class BufferedFrameDecryptorTest
VCMPacket packet;
packet.video_header.codec = kVideoCodecGeneric;
packet.seqNum = seq_num_;
packet.frameType = key_frame ? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.video_header.frame_type = key_frame
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
packet.generic_descriptor = RtpGenericFrameDescriptor();
fake_packet_buffer_->InsertPacket(&packet);
packet.seqNum = seq_num_;

View File

@ -122,7 +122,7 @@ class PictureIdObserver : public test::RtpRtcpObserver {
break;
}
parsed->frame_type = parsed_payload.frame_type;
parsed->frame_type = parsed_payload.video_header().frame_type;
return true;
}

View File

@ -231,16 +231,16 @@ int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
size_t payload_size,
const RTPHeader& rtp_header,
const RTPVideoHeader& video_header,
VideoFrameType frame_type,
const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
bool is_recovered) {
VCMPacket packet(payload_data, payload_size, rtp_header, video_header,
frame_type, ntp_estimator_.Estimate(rtp_header.timestamp));
ntp_estimator_.Estimate(rtp_header.timestamp));
packet.generic_descriptor = generic_descriptor;
if (nack_module_) {
const bool is_keyframe = video_header.is_first_packet_in_frame &&
frame_type == VideoFrameType::kVideoFrameKey;
const bool is_keyframe =
video_header.is_first_packet_in_frame &&
video_header.frame_type == VideoFrameType::kVideoFrameKey;
packet.timesNacked = nack_module_->OnReceivedPacket(
rtp_header.sequenceNumber, is_keyframe, is_recovered);
@ -556,7 +556,7 @@ void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
if (video_header.is_last_packet_in_frame) {
video_header.color_space = packet.GetExtension<ColorSpaceExtension>();
if (video_header.color_space ||
parsed_payload.frame_type == VideoFrameType::kVideoFrameKey) {
video_header.frame_type == VideoFrameType::kVideoFrameKey) {
// Store color space since it's only transmitted when changed or for key
// frames. Color space will be cleared if a key frame is transmitted
// without color space information.
@ -594,7 +594,7 @@ void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
rtp_header.markerBit || generic_descriptor_wire->LastPacketInSubFrame();
if (generic_descriptor_wire->FirstPacketInSubFrame()) {
parsed_payload.frame_type =
video_header.frame_type =
generic_descriptor_wire->FrameDependenciesDiffs().empty()
? VideoFrameType::kVideoFrameKey
: VideoFrameType::kVideoFrameDelta;
@ -607,8 +607,8 @@ void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
}
OnReceivedPayloadData(parsed_payload.payload, parsed_payload.payload_length,
rtp_header, video_header, parsed_payload.frame_type,
generic_descriptor_wire, packet.recovered());
rtp_header, video_header, generic_descriptor_wire,
packet.recovered());
}
void RtpVideoStreamReceiver::ParseAndHandleEncapsulatingHeader(

View File

@ -108,7 +108,6 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
size_t payload_size,
const RTPHeader& rtp_header,
const RTPVideoHeader& video_header,
VideoFrameType frame_type,
const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
bool is_recovered);

View File

@ -335,12 +335,12 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(
data.data(), data.size(), rtp_header, video_header,
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
}
TEST_F(RtpVideoStreamReceiverTest, NoInfiniteRecursionOnEncapsulatedRedPacket) {
@ -393,14 +393,14 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
mock_on_complete_frame_callback_.AppendExpectedBitstream(
expected_bitsteam, sizeof(expected_bitsteam));
EXPECT_CALL(mock_on_complete_frame_callback_,
DoOnCompleteFrameFailBitstream(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(
data.data(), data.size(), rtp_header, video_header,
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
}
class RtpVideoStreamReceiverTestH264
@ -421,26 +421,28 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
AddSps(&sps_video_header, 0, &sps_data);
rtp_header.sequenceNumber = 0;
sps_video_header.is_first_packet_in_frame = true;
sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
sps_data.size());
rtp_video_stream_receiver_->OnReceivedPayloadData(
sps_data.data(), sps_data.size(), rtp_header, sps_video_header,
VideoFrameType::kEmptyFrame, absl::nullopt, false);
absl::nullopt, false);
std::vector<uint8_t> pps_data;
RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
AddPps(&pps_video_header, 0, 1, &pps_data);
rtp_header.sequenceNumber = 1;
pps_video_header.is_first_packet_in_frame = true;
pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
pps_data.size());
rtp_video_stream_receiver_->OnReceivedPayloadData(
pps_data.data(), pps_data.size(), rtp_header, pps_video_header,
VideoFrameType::kEmptyFrame, absl::nullopt, false);
absl::nullopt, false);
std::vector<uint8_t> idr_data;
RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
@ -448,6 +450,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
rtp_header.sequenceNumber = 2;
idr_video_header.is_first_packet_in_frame = true;
idr_video_header.is_last_packet_in_frame = true;
idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
idr_data.insert(idr_data.end(), {0x65, 1, 2, 3});
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
@ -456,7 +459,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(
idr_data.data(), idr_data.size(), rtp_header, idr_video_header,
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
absl::nullopt, false);
}
TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
@ -490,6 +493,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecH264;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
data.insert(data.end(), {1, 2, 3});
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
@ -497,8 +501,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(
data.data(), data.size(), rtp_header, video_header,
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
}
TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
@ -511,35 +514,32 @@ TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_video_stream_receiver_->OnReceivedPayloadData(
data.data(), data.size(), rtp_header, video_header,
VideoFrameType::kVideoFrameKey, absl::nullopt, false);
data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
rtp_header.sequenceNumber = 3;
rtp_video_stream_receiver_->OnReceivedPayloadData(
nullptr, 0, rtp_header, video_header, VideoFrameType::kVideoFrameKey,
absl::nullopt, false);
nullptr, 0, rtp_header, video_header, absl::nullopt, false);
rtp_header.sequenceNumber = 4;
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
rtp_video_stream_receiver_->OnReceivedPayloadData(
data.data(), data.size(), rtp_header, video_header,
VideoFrameType::kVideoFrameDelta, absl::nullopt, false);
data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
rtp_header.sequenceNumber = 6;
rtp_video_stream_receiver_->OnReceivedPayloadData(
data.data(), data.size(), rtp_header, video_header,
VideoFrameType::kVideoFrameDelta, absl::nullopt, false);
data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
rtp_header.sequenceNumber = 5;
rtp_video_stream_receiver_->OnReceivedPayloadData(
nullptr, 0, rtp_header, video_header, VideoFrameType::kVideoFrameDelta,
absl::nullopt, false);
nullptr, 0, rtp_header, video_header, absl::nullopt, false);
}
TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
@ -550,11 +550,10 @@ TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
rtp_video_stream_receiver_->OnReceivedPayloadData(
data.data(), data.size(), rtp_header, video_header,
VideoFrameType::kVideoFrameDelta, absl::nullopt, false);
data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
}
TEST_F(RtpVideoStreamReceiverTest, SecondarySinksGetRtpNotifications) {