Add ParsedPayload::video_header() accessor.
Preparation CL to remove RTPTypeHeader. Bug: none Change-Id: I695acf20082b94744a2f6c7692f1b2128932cd79 Reviewed-on: https://webrtc-review.googlesource.com/86132 Reviewed-by: Stefan Holmer <stefan@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Commit-Queue: Philip Eliasson <philipel@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23835}
This commit is contained in:
parent
d92288f5ba
commit
cb96ad8f0e
@ -55,6 +55,8 @@ class RtpDepacketizer {
|
|||||||
const uint8_t* payload;
|
const uint8_t* payload;
|
||||||
size_t payload_length;
|
size_t payload_length;
|
||||||
FrameType frame_type;
|
FrameType frame_type;
|
||||||
|
RTPVideoHeader& video_header() { return type.Video; }
|
||||||
|
const RTPVideoHeader& video_header() const { return type.Video; }
|
||||||
RTPTypeHeader type;
|
RTPTypeHeader type;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -429,7 +429,7 @@ bool RtpDepacketizerH264::Parse(ParsedPayload* parsed_payload,
|
|||||||
modified_buffer_.reset();
|
modified_buffer_.reset();
|
||||||
|
|
||||||
uint8_t nal_type = payload_data[0] & kTypeMask;
|
uint8_t nal_type = payload_data[0] & kTypeMask;
|
||||||
parsed_payload->type.Video.codecHeader.H264.nalus_length = 0;
|
parsed_payload->video_header().codecHeader.H264.nalus_length = 0;
|
||||||
if (nal_type == H264::NaluType::kFuA) {
|
if (nal_type == H264::NaluType::kFuA) {
|
||||||
// Fragmented NAL units (FU-A).
|
// Fragmented NAL units (FU-A).
|
||||||
if (!ParseFuaNalu(parsed_payload, payload_data))
|
if (!ParseFuaNalu(parsed_payload, payload_data))
|
||||||
@ -453,13 +453,13 @@ bool RtpDepacketizerH264::Parse(ParsedPayload* parsed_payload,
|
|||||||
bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
||||||
ParsedPayload* parsed_payload,
|
ParsedPayload* parsed_payload,
|
||||||
const uint8_t* payload_data) {
|
const uint8_t* payload_data) {
|
||||||
parsed_payload->type.Video.width = 0;
|
parsed_payload->video_header().width = 0;
|
||||||
parsed_payload->type.Video.height = 0;
|
parsed_payload->video_header().height = 0;
|
||||||
parsed_payload->type.Video.codec = kVideoCodecH264;
|
parsed_payload->video_header().codec = kVideoCodecH264;
|
||||||
parsed_payload->type.Video.simulcastIdx = 0;
|
parsed_payload->video_header().simulcastIdx = 0;
|
||||||
parsed_payload->type.Video.is_first_packet_in_frame = true;
|
parsed_payload->video_header().is_first_packet_in_frame = true;
|
||||||
RTPVideoHeaderH264* h264_header =
|
RTPVideoHeaderH264* h264_header =
|
||||||
&parsed_payload->type.Video.codecHeader.H264;
|
&parsed_payload->video_header().codecHeader.H264;
|
||||||
|
|
||||||
const uint8_t* nalu_start = payload_data + kNalHeaderSize;
|
const uint8_t* nalu_start = payload_data + kNalHeaderSize;
|
||||||
const size_t nalu_length = length_ - kNalHeaderSize;
|
const size_t nalu_length = length_ - kNalHeaderSize;
|
||||||
@ -569,8 +569,8 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sps) {
|
if (sps) {
|
||||||
parsed_payload->type.Video.width = sps->width;
|
parsed_payload->video_header().width = sps->width;
|
||||||
parsed_payload->type.Video.height = sps->height;
|
parsed_payload->video_header().height = sps->height;
|
||||||
nalu.sps_id = sps->id;
|
nalu.sps_id = sps->id;
|
||||||
} else {
|
} else {
|
||||||
RTC_LOG(LS_WARNING) << "Failed to parse SPS id from SPS slice.";
|
RTC_LOG(LS_WARNING) << "Failed to parse SPS id from SPS slice.";
|
||||||
@ -618,7 +618,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||||||
RTC_LOG(LS_WARNING) << "Unexpected STAP-A or FU-A received.";
|
RTC_LOG(LS_WARNING) << "Unexpected STAP-A or FU-A received.";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
RTPVideoHeaderH264* h264 = &parsed_payload->type.Video.codecHeader.H264;
|
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().codecHeader.H264;
|
||||||
if (h264->nalus_length == kMaxNalusPerPacket) {
|
if (h264->nalus_length == kMaxNalusPerPacket) {
|
||||||
RTC_LOG(LS_WARNING)
|
RTC_LOG(LS_WARNING)
|
||||||
<< "Received packet containing more than " << kMaxNalusPerPacket
|
<< "Received packet containing more than " << kMaxNalusPerPacket
|
||||||
@ -672,12 +672,12 @@ bool RtpDepacketizerH264::ParseFuaNalu(
|
|||||||
} else {
|
} else {
|
||||||
parsed_payload->frame_type = kVideoFrameDelta;
|
parsed_payload->frame_type = kVideoFrameDelta;
|
||||||
}
|
}
|
||||||
parsed_payload->type.Video.width = 0;
|
parsed_payload->video_header().width = 0;
|
||||||
parsed_payload->type.Video.height = 0;
|
parsed_payload->video_header().height = 0;
|
||||||
parsed_payload->type.Video.codec = kVideoCodecH264;
|
parsed_payload->video_header().codec = kVideoCodecH264;
|
||||||
parsed_payload->type.Video.simulcastIdx = 0;
|
parsed_payload->video_header().simulcastIdx = 0;
|
||||||
parsed_payload->type.Video.is_first_packet_in_frame = first_fragment;
|
parsed_payload->video_header().is_first_packet_in_frame = first_fragment;
|
||||||
RTPVideoHeaderH264* h264 = &parsed_payload->type.Video.codecHeader.H264;
|
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().codecHeader.H264;
|
||||||
h264->packetization_type = kH264FuA;
|
h264->packetization_type = kH264FuA;
|
||||||
h264->nalu_type = original_nal_type;
|
h264->nalu_type = original_nal_type;
|
||||||
if (first_fragment) {
|
if (first_fragment) {
|
||||||
|
|||||||
@ -600,11 +600,11 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264SingleNalu,
|
EXPECT_EQ(kH264SingleNalu,
|
||||||
payload.type.Video.codecHeader.H264.packetization_type);
|
payload.video_header().codecHeader.H264.packetization_type);
|
||||||
EXPECT_EQ(kIdr, payload.type.Video.codecHeader.H264.nalu_type);
|
EXPECT_EQ(kIdr, payload.video_header().codecHeader.H264.nalu_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
|
TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
|
||||||
@ -616,12 +616,12 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264SingleNalu,
|
EXPECT_EQ(kH264SingleNalu,
|
||||||
payload.type.Video.codecHeader.H264.packetization_type);
|
payload.video_header().codecHeader.H264.packetization_type);
|
||||||
EXPECT_EQ(1280u, payload.type.Video.width);
|
EXPECT_EQ(1280u, payload.video_header().width);
|
||||||
EXPECT_EQ(720u, payload.type.Video.height);
|
EXPECT_EQ(720u, payload.video_header().height);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
|
TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
|
||||||
@ -646,9 +646,9 @@ TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
|
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
|
||||||
EXPECT_EQ(kH264StapA, h264.packetization_type);
|
EXPECT_EQ(kH264StapA, h264.packetization_type);
|
||||||
// NALU type for aggregated packets is the type of the first packet only.
|
// NALU type for aggregated packets is the type of the first packet only.
|
||||||
EXPECT_EQ(kSps, h264.nalu_type);
|
EXPECT_EQ(kSps, h264.nalu_type);
|
||||||
@ -677,11 +677,12 @@ TEST_F(RtpDepacketizerH264Test, TestStapANaluSpsWithResolution) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264StapA, payload.type.Video.codecHeader.H264.packetization_type);
|
EXPECT_EQ(kH264StapA,
|
||||||
EXPECT_EQ(1280u, payload.type.Video.width);
|
payload.video_header().codecHeader.H264.packetization_type);
|
||||||
EXPECT_EQ(720u, payload.type.Video.height);
|
EXPECT_EQ(1280u, payload.video_header().width);
|
||||||
|
EXPECT_EQ(720u, payload.video_header().height);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestEmptyStapARejected) {
|
TEST_F(RtpDepacketizerH264Test, TestEmptyStapARejected) {
|
||||||
@ -804,11 +805,12 @@ TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264StapA, payload.type.Video.codecHeader.H264.packetization_type);
|
EXPECT_EQ(kH264StapA,
|
||||||
|
payload.video_header().codecHeader.H264.packetization_type);
|
||||||
// NALU type for aggregated packets is the type of the first packet only.
|
// NALU type for aggregated packets is the type of the first packet only.
|
||||||
EXPECT_EQ(kSlice, payload.type.Video.codecHeader.H264.nalu_type);
|
EXPECT_EQ(kSlice, payload.video_header().codecHeader.H264.nalu_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
||||||
@ -843,9 +845,9 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet1, sizeof(packet1)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet1, sizeof(packet1)));
|
||||||
ExpectPacket(&payload, kExpected1, sizeof(kExpected1));
|
ExpectPacket(&payload, kExpected1, sizeof(kExpected1));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
|
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
|
||||||
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
||||||
EXPECT_EQ(kIdr, h264.nalu_type);
|
EXPECT_EQ(kIdr, h264.nalu_type);
|
||||||
ASSERT_EQ(1u, h264.nalus_length);
|
ASSERT_EQ(1u, h264.nalus_length);
|
||||||
@ -859,10 +861,10 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
|
||||||
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
|
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_FALSE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
||||||
{
|
{
|
||||||
const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
|
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
|
||||||
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
||||||
EXPECT_EQ(kIdr, h264.nalu_type);
|
EXPECT_EQ(kIdr, h264.nalu_type);
|
||||||
// NALU info is only expected for the first FU-A packet.
|
// NALU info is only expected for the first FU-A packet.
|
||||||
@ -873,10 +875,10 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
|
||||||
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
|
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_FALSE(payload.type.Video.is_first_packet_in_frame);
|
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
||||||
{
|
{
|
||||||
const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
|
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
|
||||||
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
||||||
EXPECT_EQ(kIdr, h264.nalu_type);
|
EXPECT_EQ(kIdr, h264.nalu_type);
|
||||||
// NALU info is only expected for the first FU-A packet.
|
// NALU info is only expected for the first FU-A packet.
|
||||||
@ -928,7 +930,7 @@ TEST_F(RtpDepacketizerH264Test, TestSeiPacket) {
|
|||||||
};
|
};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
RtpDepacketizer::ParsedPayload payload;
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||||
const RTPVideoHeaderH264& h264 = payload.type.Video.codecHeader.H264;
|
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
|
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
|
||||||
EXPECT_EQ(kSei, h264.nalu_type);
|
EXPECT_EQ(kSei, h264.nalu_type);
|
||||||
|
|||||||
@ -129,11 +129,11 @@ bool RtpDepacketizerGeneric::Parse(ParsedPayload* parsed_payload,
|
|||||||
((generic_header & RtpFormatVideoGeneric::kKeyFrameBit) != 0)
|
((generic_header & RtpFormatVideoGeneric::kKeyFrameBit) != 0)
|
||||||
? kVideoFrameKey
|
? kVideoFrameKey
|
||||||
: kVideoFrameDelta;
|
: kVideoFrameDelta;
|
||||||
parsed_payload->type.Video.is_first_packet_in_frame =
|
parsed_payload->video_header().is_first_packet_in_frame =
|
||||||
(generic_header & RtpFormatVideoGeneric::kFirstPacketBit) != 0;
|
(generic_header & RtpFormatVideoGeneric::kFirstPacketBit) != 0;
|
||||||
parsed_payload->type.Video.codec = kVideoCodecGeneric;
|
parsed_payload->video_header().codec = kVideoCodecGeneric;
|
||||||
parsed_payload->type.Video.width = 0;
|
parsed_payload->video_header().width = 0;
|
||||||
parsed_payload->type.Video.height = 0;
|
parsed_payload->video_header().height = 0;
|
||||||
|
|
||||||
parsed_payload->payload = payload_data;
|
parsed_payload->payload = payload_data;
|
||||||
parsed_payload->payload_length = payload_data_length;
|
parsed_payload->payload_length = payload_data_length;
|
||||||
|
|||||||
@ -129,8 +129,8 @@ int ParseVP8FrameSize(RtpDepacketizer::ParsedPayload* parsed_payload,
|
|||||||
// in the beginning of the partition.
|
// in the beginning of the partition.
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
parsed_payload->type.Video.width = ((data[7] << 8) + data[6]) & 0x3FFF;
|
parsed_payload->video_header().width = ((data[7] << 8) + data[6]) & 0x3FFF;
|
||||||
parsed_payload->type.Video.height = ((data[9] << 8) + data[8]) & 0x3FFF;
|
parsed_payload->video_header().height = ((data[9] << 8) + data[8]) & 0x3FFF;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -480,22 +480,22 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
|
|||||||
bool beginning_of_partition = (*payload_data & 0x10) ? true : false; // S bit
|
bool beginning_of_partition = (*payload_data & 0x10) ? true : false; // S bit
|
||||||
int partition_id = (*payload_data & 0x0F); // PartID field
|
int partition_id = (*payload_data & 0x0F); // PartID field
|
||||||
|
|
||||||
parsed_payload->type.Video.width = 0;
|
parsed_payload->video_header().width = 0;
|
||||||
parsed_payload->type.Video.height = 0;
|
parsed_payload->video_header().height = 0;
|
||||||
parsed_payload->type.Video.is_first_packet_in_frame =
|
parsed_payload->video_header().is_first_packet_in_frame =
|
||||||
beginning_of_partition && (partition_id == 0);
|
beginning_of_partition && (partition_id == 0);
|
||||||
parsed_payload->type.Video.simulcastIdx = 0;
|
parsed_payload->video_header().simulcastIdx = 0;
|
||||||
parsed_payload->type.Video.codec = kVideoCodecVP8;
|
parsed_payload->video_header().codec = kVideoCodecVP8;
|
||||||
parsed_payload->type.Video.codecHeader.VP8.nonReference =
|
parsed_payload->video_header().codecHeader.VP8.nonReference =
|
||||||
(*payload_data & 0x20) ? true : false; // N bit
|
(*payload_data & 0x20) ? true : false; // N bit
|
||||||
parsed_payload->type.Video.codecHeader.VP8.partitionId = partition_id;
|
parsed_payload->video_header().codecHeader.VP8.partitionId = partition_id;
|
||||||
parsed_payload->type.Video.codecHeader.VP8.beginningOfPartition =
|
parsed_payload->video_header().codecHeader.VP8.beginningOfPartition =
|
||||||
beginning_of_partition;
|
beginning_of_partition;
|
||||||
parsed_payload->type.Video.codecHeader.VP8.pictureId = kNoPictureId;
|
parsed_payload->video_header().codecHeader.VP8.pictureId = kNoPictureId;
|
||||||
parsed_payload->type.Video.codecHeader.VP8.tl0PicIdx = kNoTl0PicIdx;
|
parsed_payload->video_header().codecHeader.VP8.tl0PicIdx = kNoTl0PicIdx;
|
||||||
parsed_payload->type.Video.codecHeader.VP8.temporalIdx = kNoTemporalIdx;
|
parsed_payload->video_header().codecHeader.VP8.temporalIdx = kNoTemporalIdx;
|
||||||
parsed_payload->type.Video.codecHeader.VP8.layerSync = false;
|
parsed_payload->video_header().codecHeader.VP8.layerSync = false;
|
||||||
parsed_payload->type.Video.codecHeader.VP8.keyIdx = kNoKeyIdx;
|
parsed_payload->video_header().codecHeader.VP8.keyIdx = kNoKeyIdx;
|
||||||
|
|
||||||
if (partition_id > 8) {
|
if (partition_id > 8) {
|
||||||
// Weak check for corrupt payload_data: PartID MUST NOT be larger than 8.
|
// Weak check for corrupt payload_data: PartID MUST NOT be larger than 8.
|
||||||
@ -512,7 +512,7 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
|
|||||||
|
|
||||||
if (extension) {
|
if (extension) {
|
||||||
const int parsed_bytes =
|
const int parsed_bytes =
|
||||||
ParseVP8Extension(&parsed_payload->type.Video.codecHeader.VP8,
|
ParseVP8Extension(&parsed_payload->video_header().codecHeader.VP8,
|
||||||
payload_data, payload_data_length);
|
payload_data, payload_data_length);
|
||||||
if (parsed_bytes < 0)
|
if (parsed_bytes < 0)
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -59,23 +59,23 @@ constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
|
|||||||
// | padding |
|
// | padding |
|
||||||
// : :
|
// : :
|
||||||
// +-+-+-+-+-+-+-+-+
|
// +-+-+-+-+-+-+-+-+
|
||||||
void VerifyBasicHeader(RTPTypeHeader* type, bool N, bool S, int part_id) {
|
void VerifyBasicHeader(RTPVideoHeader* header, bool N, bool S, int part_id) {
|
||||||
ASSERT_TRUE(type != NULL);
|
ASSERT_TRUE(header != NULL);
|
||||||
EXPECT_EQ(N, type->Video.codecHeader.VP8.nonReference);
|
EXPECT_EQ(N, header->codecHeader.VP8.nonReference);
|
||||||
EXPECT_EQ(S, type->Video.codecHeader.VP8.beginningOfPartition);
|
EXPECT_EQ(S, header->codecHeader.VP8.beginningOfPartition);
|
||||||
EXPECT_EQ(part_id, type->Video.codecHeader.VP8.partitionId);
|
EXPECT_EQ(part_id, header->codecHeader.VP8.partitionId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void VerifyExtensions(RTPTypeHeader* type,
|
void VerifyExtensions(RTPVideoHeader* header,
|
||||||
int16_t picture_id, /* I */
|
int16_t picture_id, /* I */
|
||||||
int16_t tl0_pic_idx, /* L */
|
int16_t tl0_pic_idx, /* L */
|
||||||
uint8_t temporal_idx, /* T */
|
uint8_t temporal_idx, /* T */
|
||||||
int key_idx /* K */) {
|
int key_idx /* K */) {
|
||||||
ASSERT_TRUE(type != NULL);
|
ASSERT_TRUE(header != NULL);
|
||||||
EXPECT_EQ(picture_id, type->Video.codecHeader.VP8.pictureId);
|
EXPECT_EQ(picture_id, header->codecHeader.VP8.pictureId);
|
||||||
EXPECT_EQ(tl0_pic_idx, type->Video.codecHeader.VP8.tl0PicIdx);
|
EXPECT_EQ(tl0_pic_idx, header->codecHeader.VP8.tl0PicIdx);
|
||||||
EXPECT_EQ(temporal_idx, type->Video.codecHeader.VP8.temporalIdx);
|
EXPECT_EQ(temporal_idx, header->codecHeader.VP8.temporalIdx);
|
||||||
EXPECT_EQ(key_idx, type->Video.codecHeader.VP8.keyIdx);
|
EXPECT_EQ(key_idx, header->codecHeader.VP8.keyIdx);
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
@ -299,10 +299,10 @@ TEST_F(RtpDepacketizerVp8Test, BasicHeader) {
|
|||||||
sizeof(packet) - kHeaderLength);
|
sizeof(packet) - kHeaderLength);
|
||||||
|
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||||
VerifyBasicHeader(&payload.type, 0, 1, 4);
|
VerifyBasicHeader(&payload.video_header(), 0, 1, 4);
|
||||||
VerifyExtensions(&payload.type, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx,
|
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
|
||||||
kNoKeyIdx);
|
kNoTemporalIdx, kNoKeyIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp8Test, PictureID) {
|
TEST_F(RtpDepacketizerVp8Test, PictureID) {
|
||||||
@ -319,10 +319,10 @@ TEST_F(RtpDepacketizerVp8Test, PictureID) {
|
|||||||
ExpectPacket(&payload, packet + kHeaderLength1,
|
ExpectPacket(&payload, packet + kHeaderLength1,
|
||||||
sizeof(packet) - kHeaderLength1);
|
sizeof(packet) - kHeaderLength1);
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||||
VerifyBasicHeader(&payload.type, 1, 0, 0);
|
VerifyBasicHeader(&payload.video_header(), 1, 0, 0);
|
||||||
VerifyExtensions(&payload.type, kPictureId, kNoTl0PicIdx, kNoTemporalIdx,
|
VerifyExtensions(&payload.video_header(), kPictureId, kNoTl0PicIdx,
|
||||||
kNoKeyIdx);
|
kNoTemporalIdx, kNoKeyIdx);
|
||||||
|
|
||||||
// Re-use packet, but change to long PictureID.
|
// Re-use packet, but change to long PictureID.
|
||||||
packet[2] = 0x80 | kPictureId;
|
packet[2] = 0x80 | kPictureId;
|
||||||
@ -332,9 +332,9 @@ TEST_F(RtpDepacketizerVp8Test, PictureID) {
|
|||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet + kHeaderLength2,
|
ExpectPacket(&payload, packet + kHeaderLength2,
|
||||||
sizeof(packet) - kHeaderLength2);
|
sizeof(packet) - kHeaderLength2);
|
||||||
VerifyBasicHeader(&payload.type, 1, 0, 0);
|
VerifyBasicHeader(&payload.video_header(), 1, 0, 0);
|
||||||
VerifyExtensions(&payload.type, (kPictureId << 8) + kPictureId, kNoTl0PicIdx,
|
VerifyExtensions(&payload.video_header(), (kPictureId << 8) + kPictureId,
|
||||||
kNoTemporalIdx, kNoKeyIdx);
|
kNoTl0PicIdx, kNoTemporalIdx, kNoKeyIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp8Test, Tl0PicIdx) {
|
TEST_F(RtpDepacketizerVp8Test, Tl0PicIdx) {
|
||||||
@ -350,10 +350,10 @@ TEST_F(RtpDepacketizerVp8Test, Tl0PicIdx) {
|
|||||||
ExpectPacket(&payload, packet + kHeaderLength,
|
ExpectPacket(&payload, packet + kHeaderLength,
|
||||||
sizeof(packet) - kHeaderLength);
|
sizeof(packet) - kHeaderLength);
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||||
VerifyBasicHeader(&payload.type, 0, 1, 0);
|
VerifyBasicHeader(&payload.video_header(), 0, 1, 0);
|
||||||
VerifyExtensions(&payload.type, kNoPictureId, kTl0PicIdx, kNoTemporalIdx,
|
VerifyExtensions(&payload.video_header(), kNoPictureId, kTl0PicIdx,
|
||||||
kNoKeyIdx);
|
kNoTemporalIdx, kNoKeyIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
|
TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
|
||||||
@ -368,10 +368,11 @@ TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
|
|||||||
ExpectPacket(&payload, packet + kHeaderLength,
|
ExpectPacket(&payload, packet + kHeaderLength,
|
||||||
sizeof(packet) - kHeaderLength);
|
sizeof(packet) - kHeaderLength);
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||||
VerifyBasicHeader(&payload.type, 0, 0, 8);
|
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
|
||||||
VerifyExtensions(&payload.type, kNoPictureId, kNoTl0PicIdx, 2, kNoKeyIdx);
|
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx, 2,
|
||||||
EXPECT_FALSE(payload.type.Video.codecHeader.VP8.layerSync);
|
kNoKeyIdx);
|
||||||
|
EXPECT_FALSE(payload.video_header().codecHeader.VP8.layerSync);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
|
TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
|
||||||
@ -387,10 +388,10 @@ TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
|
|||||||
ExpectPacket(&payload, packet + kHeaderLength,
|
ExpectPacket(&payload, packet + kHeaderLength,
|
||||||
sizeof(packet) - kHeaderLength);
|
sizeof(packet) - kHeaderLength);
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||||
VerifyBasicHeader(&payload.type, 0, 0, 8);
|
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
|
||||||
VerifyExtensions(&payload.type, kNoPictureId, kNoTl0PicIdx, kNoTemporalIdx,
|
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx,
|
||||||
kKeyIdx);
|
kNoTemporalIdx, kKeyIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
|
TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
|
||||||
@ -408,9 +409,9 @@ TEST_F(RtpDepacketizerVp8Test, MultipleExtensions) {
|
|||||||
ExpectPacket(&payload, packet + kHeaderLength,
|
ExpectPacket(&payload, packet + kHeaderLength,
|
||||||
sizeof(packet) - kHeaderLength);
|
sizeof(packet) - kHeaderLength);
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||||
VerifyBasicHeader(&payload.type, 0, 0, 8);
|
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
|
||||||
VerifyExtensions(&payload.type, (17 << 8) + 17, 42, 1, 17);
|
VerifyExtensions(&payload.video_header(), (17 << 8) + 17, 42, 1, 17);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp8Test, TooShortHeader) {
|
TEST_F(RtpDepacketizerVp8Test, TooShortHeader) {
|
||||||
@ -447,12 +448,12 @@ TEST_F(RtpDepacketizerVp8Test, TestWithPacketizer) {
|
|||||||
auto vp8_payload = rtp_payload.subview(kHeaderLength);
|
auto vp8_payload = rtp_payload.subview(kHeaderLength);
|
||||||
ExpectPacket(&payload, vp8_payload.data(), vp8_payload.size());
|
ExpectPacket(&payload, vp8_payload.data(), vp8_payload.size());
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecVP8, payload.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP8, payload.video_header().codec);
|
||||||
VerifyBasicHeader(&payload.type, 1, 1, 0);
|
VerifyBasicHeader(&payload.video_header(), 1, 1, 0);
|
||||||
VerifyExtensions(&payload.type, input_header.pictureId,
|
VerifyExtensions(&payload.video_header(), input_header.pictureId,
|
||||||
input_header.tl0PicIdx, input_header.temporalIdx,
|
input_header.tl0PicIdx, input_header.temporalIdx,
|
||||||
input_header.keyIdx);
|
input_header.keyIdx);
|
||||||
EXPECT_EQ(payload.type.Video.codecHeader.VP8.layerSync,
|
EXPECT_EQ(payload.video_header().codecHeader.VP8.layerSync,
|
||||||
input_header.layerSync);
|
input_header.layerSync);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -712,14 +712,14 @@ bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
|
|||||||
RETURN_FALSE_ON_ERROR(parser.ReadBits(&z_bit, 1));
|
RETURN_FALSE_ON_ERROR(parser.ReadBits(&z_bit, 1));
|
||||||
|
|
||||||
// Parsed payload.
|
// Parsed payload.
|
||||||
parsed_payload->type.Video.width = 0;
|
parsed_payload->video_header().width = 0;
|
||||||
parsed_payload->type.Video.height = 0;
|
parsed_payload->video_header().height = 0;
|
||||||
parsed_payload->type.Video.simulcastIdx = 0;
|
parsed_payload->video_header().simulcastIdx = 0;
|
||||||
parsed_payload->type.Video.codec = kVideoCodecVP9;
|
parsed_payload->video_header().codec = kVideoCodecVP9;
|
||||||
|
|
||||||
parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
|
parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
|
||||||
|
|
||||||
RTPVideoHeaderVP9* vp9 = &parsed_payload->type.Video.codecHeader.VP9;
|
RTPVideoHeaderVP9* vp9 = &parsed_payload->video_header().codecHeader.VP9;
|
||||||
vp9->InitRTPVideoHeaderVP9();
|
vp9->InitRTPVideoHeaderVP9();
|
||||||
vp9->inter_pic_predicted = p_bit ? true : false;
|
vp9->inter_pic_predicted = p_bit ? true : false;
|
||||||
vp9->flexible_mode = f_bit ? true : false;
|
vp9->flexible_mode = f_bit ? true : false;
|
||||||
@ -748,11 +748,11 @@ bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
|
|||||||
}
|
}
|
||||||
if (vp9->spatial_layer_resolution_present) {
|
if (vp9->spatial_layer_resolution_present) {
|
||||||
// TODO(asapersson): Add support for spatial layers.
|
// TODO(asapersson): Add support for spatial layers.
|
||||||
parsed_payload->type.Video.width = vp9->width[0];
|
parsed_payload->video_header().width = vp9->width[0];
|
||||||
parsed_payload->type.Video.height = vp9->height[0];
|
parsed_payload->video_header().height = vp9->height[0];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
parsed_payload->type.Video.is_first_packet_in_frame =
|
parsed_payload->video_header().is_first_packet_in_frame =
|
||||||
b_bit && (!l_bit || !vp9->inter_layer_predicted);
|
b_bit && (!l_bit || !vp9->inter_layer_predicted);
|
||||||
|
|
||||||
uint64_t rem_bits = parser.RemainingBitCount();
|
uint64_t rem_bits = parser.RemainingBitCount();
|
||||||
|
|||||||
@ -82,8 +82,8 @@ void ParseAndCheckPacket(const uint8_t* packet,
|
|||||||
std::unique_ptr<RtpDepacketizer> depacketizer(new RtpDepacketizerVp9());
|
std::unique_ptr<RtpDepacketizer> depacketizer(new RtpDepacketizerVp9());
|
||||||
RtpDepacketizer::ParsedPayload parsed;
|
RtpDepacketizer::ParsedPayload parsed;
|
||||||
ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length));
|
ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length));
|
||||||
EXPECT_EQ(kVideoCodecVP9, parsed.type.Video.codec);
|
EXPECT_EQ(kVideoCodecVP9, parsed.video_header().codec);
|
||||||
VerifyHeader(expected, parsed.type.Video.codecHeader.VP9);
|
VerifyHeader(expected, parsed.video_header().codecHeader.VP9);
|
||||||
const size_t kExpectedPayloadLength = expected_length - expected_hdr_length;
|
const size_t kExpectedPayloadLength = expected_length - expected_hdr_length;
|
||||||
VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength);
|
VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength);
|
||||||
}
|
}
|
||||||
@ -760,7 +760,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseFirstPacketInKeyFrame) {
|
|||||||
RtpDepacketizer::ParsedPayload parsed;
|
RtpDepacketizer::ParsedPayload parsed;
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
||||||
EXPECT_EQ(kVideoFrameKey, parsed.frame_type);
|
EXPECT_EQ(kVideoFrameKey, parsed.frame_type);
|
||||||
EXPECT_TRUE(parsed.type.Video.is_first_packet_in_frame);
|
EXPECT_TRUE(parsed.video_header().is_first_packet_in_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
|
TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
|
||||||
@ -770,7 +770,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
|
|||||||
RtpDepacketizer::ParsedPayload parsed;
|
RtpDepacketizer::ParsedPayload parsed;
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
||||||
EXPECT_EQ(kVideoFrameDelta, parsed.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, parsed.frame_type);
|
||||||
EXPECT_FALSE(parsed.type.Video.is_first_packet_in_frame);
|
EXPECT_FALSE(parsed.video_header().is_first_packet_in_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp9Test, ParseResolution) {
|
TEST_F(RtpDepacketizerVp9Test, ParseResolution) {
|
||||||
@ -790,8 +790,8 @@ TEST_F(RtpDepacketizerVp9Test, ParseResolution) {
|
|||||||
|
|
||||||
RtpDepacketizer::ParsedPayload parsed;
|
RtpDepacketizer::ParsedPayload parsed;
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
|
||||||
EXPECT_EQ(kWidth[0], parsed.type.Video.width);
|
EXPECT_EQ(kWidth[0], parsed.video_header().width);
|
||||||
EXPECT_EQ(kHeight[0], parsed.type.Video.height);
|
EXPECT_EQ(kHeight[0], parsed.video_header().height);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerVp9Test, ParseFailsForNoPayloadLength) {
|
TEST_F(RtpDepacketizerVp9Test, ParseFailsForNoPayloadLength) {
|
||||||
|
|||||||
@ -48,7 +48,7 @@ int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
|
|||||||
const uint8_t* payload,
|
const uint8_t* payload,
|
||||||
size_t payload_length,
|
size_t payload_length,
|
||||||
int64_t timestamp_ms) {
|
int64_t timestamp_ms) {
|
||||||
rtp_header->type.Video.codec =
|
rtp_header->video_header().codec =
|
||||||
specific_payload.video_payload().videoCodecType;
|
specific_payload.video_payload().videoCodecType;
|
||||||
|
|
||||||
RTC_DCHECK_GE(payload_length, rtp_header->header.paddingLength);
|
RTC_DCHECK_GE(payload_length, rtp_header->header.paddingLength);
|
||||||
@ -66,7 +66,7 @@ int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
|
|||||||
|
|
||||||
// We are not allowed to hold a critical section when calling below functions.
|
// We are not allowed to hold a critical section when calling below functions.
|
||||||
std::unique_ptr<RtpDepacketizer> depacketizer(
|
std::unique_ptr<RtpDepacketizer> depacketizer(
|
||||||
RtpDepacketizer::Create(rtp_header->type.Video.codec));
|
RtpDepacketizer::Create(rtp_header->video_header().codec));
|
||||||
if (depacketizer.get() == NULL) {
|
if (depacketizer.get() == NULL) {
|
||||||
RTC_LOG(LS_ERROR) << "Failed to create depacketizer.";
|
RTC_LOG(LS_ERROR) << "Failed to create depacketizer.";
|
||||||
return -1;
|
return -1;
|
||||||
@ -77,28 +77,28 @@ int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
|
|||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
rtp_header->frameType = parsed_payload.frame_type;
|
rtp_header->frameType = parsed_payload.frame_type;
|
||||||
rtp_header->type = parsed_payload.type;
|
rtp_header->video_header() = parsed_payload.video_header();
|
||||||
rtp_header->type.Video.rotation = kVideoRotation_0;
|
rtp_header->video_header().rotation = kVideoRotation_0;
|
||||||
rtp_header->type.Video.content_type = VideoContentType::UNSPECIFIED;
|
rtp_header->video_header().content_type = VideoContentType::UNSPECIFIED;
|
||||||
rtp_header->type.Video.video_timing.flags = VideoSendTiming::kInvalid;
|
rtp_header->video_header().video_timing.flags = VideoSendTiming::kInvalid;
|
||||||
|
|
||||||
// Retrieve the video rotation information.
|
// Retrieve the video rotation information.
|
||||||
if (rtp_header->header.extension.hasVideoRotation) {
|
if (rtp_header->header.extension.hasVideoRotation) {
|
||||||
rtp_header->type.Video.rotation =
|
rtp_header->video_header().rotation =
|
||||||
rtp_header->header.extension.videoRotation;
|
rtp_header->header.extension.videoRotation;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rtp_header->header.extension.hasVideoContentType) {
|
if (rtp_header->header.extension.hasVideoContentType) {
|
||||||
rtp_header->type.Video.content_type =
|
rtp_header->video_header().content_type =
|
||||||
rtp_header->header.extension.videoContentType;
|
rtp_header->header.extension.videoContentType;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rtp_header->header.extension.has_video_timing) {
|
if (rtp_header->header.extension.has_video_timing) {
|
||||||
rtp_header->type.Video.video_timing =
|
rtp_header->video_header().video_timing =
|
||||||
rtp_header->header.extension.video_timing;
|
rtp_header->header.extension.video_timing;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtp_header->type.Video.playout_delay =
|
rtp_header->video_header().playout_delay =
|
||||||
rtp_header->header.extension.playout_delay;
|
rtp_header->header.extension.playout_delay;
|
||||||
|
|
||||||
return data_callback_->OnReceivedPayloadData(parsed_payload.payload,
|
return data_callback_->OnReceivedPayloadData(parsed_payload.payload,
|
||||||
|
|||||||
@ -242,7 +242,7 @@ class TestBasicJitterBuffer : public ::testing::TestWithParam<std::string>,
|
|||||||
rtpHeader.header.timestamp = timestamp_;
|
rtpHeader.header.timestamp = timestamp_;
|
||||||
rtpHeader.header.markerBit = true;
|
rtpHeader.header.markerBit = true;
|
||||||
rtpHeader.frameType = kVideoFrameDelta;
|
rtpHeader.frameType = kVideoFrameDelta;
|
||||||
rtpHeader.type.Video.codec = kVideoCodecUnknown;
|
rtpHeader.video_header().codec = kVideoCodecUnknown;
|
||||||
packet_.reset(new VCMPacket(data_, size_, rtpHeader));
|
packet_.reset(new VCMPacket(data_, size_, rtpHeader));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -800,7 +800,7 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
|
|||||||
rtpHeader.header.sequenceNumber = seq_num_ + 2;
|
rtpHeader.header.sequenceNumber = seq_num_ + 2;
|
||||||
rtpHeader.header.timestamp = timestamp_ + (33 * 90);
|
rtpHeader.header.timestamp = timestamp_ + (33 * 90);
|
||||||
rtpHeader.header.markerBit = false;
|
rtpHeader.header.markerBit = false;
|
||||||
rtpHeader.type.Video.codec = kVideoCodecUnknown;
|
rtpHeader.video_header().codec = kVideoCodecUnknown;
|
||||||
VCMPacket empty_packet(data_, 0, rtpHeader);
|
VCMPacket empty_packet(data_, 0, rtpHeader);
|
||||||
EXPECT_EQ(kOldPacket,
|
EXPECT_EQ(kOldPacket,
|
||||||
jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
|
jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
|
||||||
@ -2164,7 +2164,7 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
|
|||||||
timestamp_ += 33 * 90;
|
timestamp_ += 33 * 90;
|
||||||
WebRtcRTPHeader rtpHeader;
|
WebRtcRTPHeader rtpHeader;
|
||||||
memset(&rtpHeader, 0, sizeof(rtpHeader));
|
memset(&rtpHeader, 0, sizeof(rtpHeader));
|
||||||
rtpHeader.type.Video.codec = kVideoCodecUnknown;
|
rtpHeader.video_header().codec = kVideoCodecUnknown;
|
||||||
VCMPacket emptypacket(data_, 0, rtpHeader);
|
VCMPacket emptypacket(data_, 0, rtpHeader);
|
||||||
emptypacket.seqNum = seq_num_;
|
emptypacket.seqNum = seq_num_;
|
||||||
emptypacket.timestamp = timestamp_;
|
emptypacket.timestamp = timestamp_;
|
||||||
|
|||||||
@ -50,20 +50,21 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
|
|||||||
timesNacked(-1),
|
timesNacked(-1),
|
||||||
frameType(rtpHeader.frameType),
|
frameType(rtpHeader.frameType),
|
||||||
codec(kVideoCodecUnknown),
|
codec(kVideoCodecUnknown),
|
||||||
is_first_packet_in_frame(rtpHeader.type.Video.is_first_packet_in_frame),
|
is_first_packet_in_frame(
|
||||||
|
rtpHeader.video_header().is_first_packet_in_frame),
|
||||||
completeNALU(kNaluComplete),
|
completeNALU(kNaluComplete),
|
||||||
insertStartCode(false),
|
insertStartCode(false),
|
||||||
width(rtpHeader.type.Video.width),
|
width(rtpHeader.video_header().width),
|
||||||
height(rtpHeader.type.Video.height),
|
height(rtpHeader.video_header().height),
|
||||||
video_header(rtpHeader.type.Video) {
|
video_header(rtpHeader.video_header()) {
|
||||||
CopyCodecSpecifics(rtpHeader.type.Video);
|
CopyCodecSpecifics(rtpHeader.video_header());
|
||||||
|
|
||||||
if (markerBit) {
|
if (markerBit) {
|
||||||
video_header.rotation = rtpHeader.type.Video.rotation;
|
video_header.rotation = rtpHeader.video_header().rotation;
|
||||||
}
|
}
|
||||||
// Playout decisions are made entirely based on first packet in a frame.
|
// Playout decisions are made entirely based on first packet in a frame.
|
||||||
if (is_first_packet_in_frame) {
|
if (is_first_packet_in_frame) {
|
||||||
video_header.playout_delay = rtpHeader.type.Video.playout_delay;
|
video_header.playout_delay = rtpHeader.video_header().playout_delay;
|
||||||
} else {
|
} else {
|
||||||
video_header.playout_delay = {-1, -1};
|
video_header.playout_delay = {-1, -1};
|
||||||
}
|
}
|
||||||
|
|||||||
@ -106,7 +106,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
|
|||||||
header.header.payloadType = kUnusedPayloadType;
|
header.header.payloadType = kUnusedPayloadType;
|
||||||
header.header.ssrc = 1;
|
header.header.ssrc = 1;
|
||||||
header.header.headerLength = 12;
|
header.header.headerLength = 12;
|
||||||
header.type.Video.codec = kVideoCodecVP8;
|
header.video_header().codec = kVideoCodecVP8;
|
||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
|
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
|
||||||
InsertAndVerifyPaddingFrame(payload, &header);
|
InsertAndVerifyPaddingFrame(payload, &header);
|
||||||
@ -130,17 +130,17 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
|
|||||||
header.header.payloadType = kUnusedPayloadType;
|
header.header.payloadType = kUnusedPayloadType;
|
||||||
header.header.ssrc = 1;
|
header.header.ssrc = 1;
|
||||||
header.header.headerLength = 12;
|
header.header.headerLength = 12;
|
||||||
header.type.Video.codec = kVideoCodecVP8;
|
header.video_header().codec = kVideoCodecVP8;
|
||||||
// Insert one video frame to get one frame decoded.
|
// Insert one video frame to get one frame decoded.
|
||||||
header.frameType = kVideoFrameKey;
|
header.frameType = kVideoFrameKey;
|
||||||
header.type.Video.is_first_packet_in_frame = true;
|
header.video_header().is_first_packet_in_frame = true;
|
||||||
header.header.markerBit = true;
|
header.header.markerBit = true;
|
||||||
InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
|
InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
|
||||||
clock_.AdvanceTimeMilliseconds(33);
|
clock_.AdvanceTimeMilliseconds(33);
|
||||||
header.header.timestamp += 3000;
|
header.header.timestamp += 3000;
|
||||||
|
|
||||||
header.frameType = kEmptyFrame;
|
header.frameType = kEmptyFrame;
|
||||||
header.type.Video.is_first_packet_in_frame = false;
|
header.video_header().is_first_packet_in_frame = false;
|
||||||
header.header.markerBit = false;
|
header.header.markerBit = false;
|
||||||
// Insert padding frames.
|
// Insert padding frames.
|
||||||
for (int i = 0; i < 10; ++i) {
|
for (int i = 0; i < 10; ++i) {
|
||||||
@ -176,15 +176,15 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
|||||||
WebRtcRTPHeader header;
|
WebRtcRTPHeader header;
|
||||||
memset(&header, 0, sizeof(header));
|
memset(&header, 0, sizeof(header));
|
||||||
header.frameType = kEmptyFrame;
|
header.frameType = kEmptyFrame;
|
||||||
header.type.Video.is_first_packet_in_frame = false;
|
header.video_header().is_first_packet_in_frame = false;
|
||||||
header.header.markerBit = false;
|
header.header.markerBit = false;
|
||||||
header.header.paddingLength = kPaddingSize;
|
header.header.paddingLength = kPaddingSize;
|
||||||
header.header.payloadType = kUnusedPayloadType;
|
header.header.payloadType = kUnusedPayloadType;
|
||||||
header.header.ssrc = 1;
|
header.header.ssrc = 1;
|
||||||
header.header.headerLength = 12;
|
header.header.headerLength = 12;
|
||||||
header.type.Video.codec = kVideoCodecVP8;
|
header.video_header().codec = kVideoCodecVP8;
|
||||||
header.type.Video.codecHeader.VP8.pictureId = -1;
|
header.video_header().codecHeader.VP8.pictureId = -1;
|
||||||
header.type.Video.codecHeader.VP8.tl0PicIdx = -1;
|
header.video_header().codecHeader.VP8.tl0PicIdx = -1;
|
||||||
for (int i = 0; i < 3; ++i) {
|
for (int i = 0; i < 3; ++i) {
|
||||||
// Insert 2 video frames.
|
// Insert 2 video frames.
|
||||||
for (int j = 0; j < 2; ++j) {
|
for (int j = 0; j < 2; ++j) {
|
||||||
@ -192,7 +192,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
|||||||
header.frameType = kVideoFrameKey;
|
header.frameType = kVideoFrameKey;
|
||||||
else
|
else
|
||||||
header.frameType = kVideoFrameDelta;
|
header.frameType = kVideoFrameDelta;
|
||||||
header.type.Video.is_first_packet_in_frame = true;
|
header.video_header().is_first_packet_in_frame = true;
|
||||||
header.header.markerBit = true;
|
header.header.markerBit = true;
|
||||||
InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
|
InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
|
||||||
clock_.AdvanceTimeMilliseconds(33);
|
clock_.AdvanceTimeMilliseconds(33);
|
||||||
@ -201,7 +201,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
|||||||
|
|
||||||
// Insert 2 padding only frames.
|
// Insert 2 padding only frames.
|
||||||
header.frameType = kEmptyFrame;
|
header.frameType = kEmptyFrame;
|
||||||
header.type.Video.is_first_packet_in_frame = false;
|
header.video_header().is_first_packet_in_frame = false;
|
||||||
header.header.markerBit = false;
|
header.header.markerBit = false;
|
||||||
for (int j = 0; j < 2; ++j) {
|
for (int j = 0; j < 2; ++j) {
|
||||||
// InsertAndVerifyPaddingFrame(payload, &header);
|
// InsertAndVerifyPaddingFrame(payload, &header);
|
||||||
|
|||||||
@ -136,22 +136,23 @@ bool LayerFilteringTransport::SendRtp(const uint8_t* packet,
|
|||||||
RtpDepacketizer::ParsedPayload parsed_payload;
|
RtpDepacketizer::ParsedPayload parsed_payload;
|
||||||
if (depacketizer->Parse(&parsed_payload, payload, payload_data_length)) {
|
if (depacketizer->Parse(&parsed_payload, payload, payload_data_length)) {
|
||||||
const int temporal_idx = static_cast<int>(
|
const int temporal_idx = static_cast<int>(
|
||||||
is_vp8 ? parsed_payload.type.Video.codecHeader.VP8.temporalIdx
|
is_vp8 ? parsed_payload.video_header().codecHeader.VP8.temporalIdx
|
||||||
: parsed_payload.type.Video.codecHeader.VP9.temporal_idx);
|
: parsed_payload.video_header().codecHeader.VP9.temporal_idx);
|
||||||
const int spatial_idx = static_cast<int>(
|
const int spatial_idx = static_cast<int>(
|
||||||
is_vp8 ? kNoSpatialIdx
|
is_vp8 ? kNoSpatialIdx
|
||||||
: parsed_payload.type.Video.codecHeader.VP9.spatial_idx);
|
: parsed_payload.video_header().codecHeader.VP9.spatial_idx);
|
||||||
const bool non_ref_for_inter_layer_pred =
|
const bool non_ref_for_inter_layer_pred =
|
||||||
is_vp8 ? false
|
is_vp8 ? false
|
||||||
: parsed_payload.type.Video.codecHeader.VP9
|
: parsed_payload.video_header()
|
||||||
.non_ref_for_inter_layer_pred;
|
.codecHeader.VP9.non_ref_for_inter_layer_pred;
|
||||||
// The number of spatial layers is sent in ssData, which is included only
|
// The number of spatial layers is sent in ssData, which is included only
|
||||||
// in the first packet of the first spatial layer of a key frame.
|
// in the first packet of the first spatial layer of a key frame.
|
||||||
if (!parsed_payload.type.Video.codecHeader.VP9.inter_pic_predicted &&
|
if (!parsed_payload.video_header().codecHeader.VP9.inter_pic_predicted &&
|
||||||
parsed_payload.type.Video.codecHeader.VP9.beginning_of_frame == 1 &&
|
parsed_payload.video_header().codecHeader.VP9.beginning_of_frame ==
|
||||||
|
1 &&
|
||||||
spatial_idx == 0) {
|
spatial_idx == 0) {
|
||||||
num_active_spatial_layers_ =
|
num_active_spatial_layers_ =
|
||||||
parsed_payload.type.Video.codecHeader.VP9.num_spatial_layers;
|
parsed_payload.video_header().codecHeader.VP9.num_spatial_layers;
|
||||||
} else if (spatial_idx == kNoSpatialIdx)
|
} else if (spatial_idx == kNoSpatialIdx)
|
||||||
num_active_spatial_layers_ = 1;
|
num_active_spatial_layers_ = 1;
|
||||||
RTC_CHECK_GT(num_active_spatial_layers_, 0);
|
RTC_CHECK_GT(num_active_spatial_layers_, 0);
|
||||||
@ -159,7 +160,7 @@ bool LayerFilteringTransport::SendRtp(const uint8_t* packet,
|
|||||||
if (selected_sl_ >= 0 &&
|
if (selected_sl_ >= 0 &&
|
||||||
spatial_idx ==
|
spatial_idx ==
|
||||||
std::min(num_active_spatial_layers_ - 1, selected_sl_) &&
|
std::min(num_active_spatial_layers_ - 1, selected_sl_) &&
|
||||||
parsed_payload.type.Video.codecHeader.VP9.end_of_frame) {
|
parsed_payload.video_header().codecHeader.VP9.end_of_frame) {
|
||||||
// This layer is now the last in the superframe.
|
// This layer is now the last in the superframe.
|
||||||
set_marker_bit = true;
|
set_marker_bit = true;
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -98,19 +98,19 @@ class PictureIdObserver : public test::RtpRtcpObserver {
|
|||||||
switch (codec_type_) {
|
switch (codec_type_) {
|
||||||
case kVideoCodecVP8:
|
case kVideoCodecVP8:
|
||||||
parsed->picture_id =
|
parsed->picture_id =
|
||||||
parsed_payload.type.Video.codecHeader.VP8.pictureId;
|
parsed_payload.video_header().codecHeader.VP8.pictureId;
|
||||||
parsed->tl0_pic_idx =
|
parsed->tl0_pic_idx =
|
||||||
parsed_payload.type.Video.codecHeader.VP8.tl0PicIdx;
|
parsed_payload.video_header().codecHeader.VP8.tl0PicIdx;
|
||||||
parsed->temporal_idx =
|
parsed->temporal_idx =
|
||||||
parsed_payload.type.Video.codecHeader.VP8.temporalIdx;
|
parsed_payload.video_header().codecHeader.VP8.temporalIdx;
|
||||||
break;
|
break;
|
||||||
case kVideoCodecVP9:
|
case kVideoCodecVP9:
|
||||||
parsed->picture_id =
|
parsed->picture_id =
|
||||||
parsed_payload.type.Video.codecHeader.VP9.picture_id;
|
parsed_payload.video_header().codecHeader.VP9.picture_id;
|
||||||
parsed->tl0_pic_idx =
|
parsed->tl0_pic_idx =
|
||||||
parsed_payload.type.Video.codecHeader.VP9.tl0_pic_idx;
|
parsed_payload.video_header().codecHeader.VP9.tl0_pic_idx;
|
||||||
parsed->temporal_idx =
|
parsed->temporal_idx =
|
||||||
parsed_payload.type.Video.codecHeader.VP9.temporal_idx;
|
parsed_payload.video_header().codecHeader.VP9.temporal_idx;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
RTC_NOTREACHED();
|
RTC_NOTREACHED();
|
||||||
|
|||||||
@ -136,7 +136,7 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
WebRtcRTPHeader GetDefaultPacket() {
|
WebRtcRTPHeader GetDefaultPacket() {
|
||||||
WebRtcRTPHeader packet;
|
WebRtcRTPHeader packet;
|
||||||
memset(&packet, 0, sizeof(packet));
|
memset(&packet, 0, sizeof(packet));
|
||||||
packet.type.Video.codec = kVideoCodecH264;
|
packet.video_header().codec = kVideoCodecH264;
|
||||||
return packet;
|
return packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,8 +151,9 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
info.pps_id = -1;
|
info.pps_id = -1;
|
||||||
data->push_back(H264::NaluType::kSps);
|
data->push_back(H264::NaluType::kSps);
|
||||||
data->push_back(sps_id);
|
data->push_back(sps_id);
|
||||||
packet->type.Video.codecHeader.H264
|
packet->video_header()
|
||||||
.nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
|
.codecHeader.H264
|
||||||
|
.nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddPps(WebRtcRTPHeader* packet,
|
void AddPps(WebRtcRTPHeader* packet,
|
||||||
@ -165,8 +166,9 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
info.pps_id = pps_id;
|
info.pps_id = pps_id;
|
||||||
data->push_back(H264::NaluType::kPps);
|
data->push_back(H264::NaluType::kPps);
|
||||||
data->push_back(pps_id);
|
data->push_back(pps_id);
|
||||||
packet->type.Video.codecHeader.H264
|
packet->video_header()
|
||||||
.nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
|
.codecHeader.H264
|
||||||
|
.nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddIdr(WebRtcRTPHeader* packet, int pps_id) {
|
void AddIdr(WebRtcRTPHeader* packet, int pps_id) {
|
||||||
@ -174,8 +176,9 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
info.type = H264::NaluType::kIdr;
|
info.type = H264::NaluType::kIdr;
|
||||||
info.sps_id = -1;
|
info.sps_id = -1;
|
||||||
info.pps_id = pps_id;
|
info.pps_id = pps_id;
|
||||||
packet->type.Video.codecHeader.H264
|
packet->video_header()
|
||||||
.nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
|
.codecHeader.H264
|
||||||
|
.nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -204,9 +207,9 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
|
|||||||
memset(&rtp_header, 0, sizeof(rtp_header));
|
memset(&rtp_header, 0, sizeof(rtp_header));
|
||||||
rtp_header.header.sequenceNumber = 1;
|
rtp_header.header.sequenceNumber = 1;
|
||||||
rtp_header.header.markerBit = 1;
|
rtp_header.header.markerBit = 1;
|
||||||
rtp_header.type.Video.is_first_packet_in_frame = true;
|
rtp_header.video_header().is_first_packet_in_frame = true;
|
||||||
rtp_header.frameType = kVideoFrameKey;
|
rtp_header.frameType = kVideoFrameKey;
|
||||||
rtp_header.type.Video.codec = kVideoCodecGeneric;
|
rtp_header.video_header().codec = kVideoCodecGeneric;
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
|
||||||
data.size());
|
data.size());
|
||||||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
|
||||||
@ -239,9 +242,9 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
|
|||||||
memset(&rtp_header, 0, sizeof(rtp_header));
|
memset(&rtp_header, 0, sizeof(rtp_header));
|
||||||
rtp_header.header.sequenceNumber = 1;
|
rtp_header.header.sequenceNumber = 1;
|
||||||
rtp_header.header.markerBit = 1;
|
rtp_header.header.markerBit = 1;
|
||||||
rtp_header.type.Video.is_first_packet_in_frame = true;
|
rtp_header.video_header().is_first_packet_in_frame = true;
|
||||||
rtp_header.frameType = kVideoFrameKey;
|
rtp_header.frameType = kVideoFrameKey;
|
||||||
rtp_header.type.Video.codec = kVideoCodecGeneric;
|
rtp_header.video_header().codec = kVideoCodecGeneric;
|
||||||
constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
|
constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
||||||
expected_bitsteam, sizeof(expected_bitsteam));
|
expected_bitsteam, sizeof(expected_bitsteam));
|
||||||
@ -268,7 +271,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
|
|||||||
WebRtcRTPHeader sps_packet = GetDefaultPacket();
|
WebRtcRTPHeader sps_packet = GetDefaultPacket();
|
||||||
AddSps(&sps_packet, 0, &sps_data);
|
AddSps(&sps_packet, 0, &sps_data);
|
||||||
sps_packet.header.sequenceNumber = 0;
|
sps_packet.header.sequenceNumber = 0;
|
||||||
sps_packet.type.Video.is_first_packet_in_frame = true;
|
sps_packet.video_header().is_first_packet_in_frame = true;
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
||||||
kH264StartCode, sizeof(kH264StartCode));
|
kH264StartCode, sizeof(kH264StartCode));
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
|
||||||
@ -280,7 +283,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
|
|||||||
WebRtcRTPHeader pps_packet = GetDefaultPacket();
|
WebRtcRTPHeader pps_packet = GetDefaultPacket();
|
||||||
AddPps(&pps_packet, 0, 1, &pps_data);
|
AddPps(&pps_packet, 0, 1, &pps_data);
|
||||||
pps_packet.header.sequenceNumber = 1;
|
pps_packet.header.sequenceNumber = 1;
|
||||||
pps_packet.type.Video.is_first_packet_in_frame = true;
|
pps_packet.video_header().is_first_packet_in_frame = true;
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
||||||
kH264StartCode, sizeof(kH264StartCode));
|
kH264StartCode, sizeof(kH264StartCode));
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
|
||||||
@ -291,7 +294,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
|
|||||||
std::vector<uint8_t> idr_data;
|
std::vector<uint8_t> idr_data;
|
||||||
WebRtcRTPHeader idr_packet = GetDefaultPacket();
|
WebRtcRTPHeader idr_packet = GetDefaultPacket();
|
||||||
AddIdr(&idr_packet, 1);
|
AddIdr(&idr_packet, 1);
|
||||||
idr_packet.type.Video.is_first_packet_in_frame = true;
|
idr_packet.video_header().is_first_packet_in_frame = true;
|
||||||
idr_packet.header.sequenceNumber = 2;
|
idr_packet.header.sequenceNumber = 2;
|
||||||
idr_packet.header.markerBit = 1;
|
idr_packet.header.markerBit = 1;
|
||||||
idr_packet.frameType = kVideoFrameKey;
|
idr_packet.frameType = kVideoFrameKey;
|
||||||
@ -331,12 +334,12 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
|
|||||||
WebRtcRTPHeader idr_packet = GetDefaultPacket();
|
WebRtcRTPHeader idr_packet = GetDefaultPacket();
|
||||||
AddIdr(&idr_packet, 0);
|
AddIdr(&idr_packet, 0);
|
||||||
idr_packet.header.payloadType = kPayloadType;
|
idr_packet.header.payloadType = kPayloadType;
|
||||||
idr_packet.type.Video.is_first_packet_in_frame = true;
|
idr_packet.video_header().is_first_packet_in_frame = true;
|
||||||
idr_packet.header.sequenceNumber = 2;
|
idr_packet.header.sequenceNumber = 2;
|
||||||
idr_packet.header.markerBit = 1;
|
idr_packet.header.markerBit = 1;
|
||||||
idr_packet.type.Video.is_first_packet_in_frame = true;
|
idr_packet.video_header().is_first_packet_in_frame = true;
|
||||||
idr_packet.frameType = kVideoFrameKey;
|
idr_packet.frameType = kVideoFrameKey;
|
||||||
idr_packet.type.Video.codec = kVideoCodecH264;
|
idr_packet.video_header().codec = kVideoCodecH264;
|
||||||
data.insert(data.end(), {1, 2, 3});
|
data.insert(data.end(), {1, 2, 3});
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(
|
||||||
kH264StartCode, sizeof(kH264StartCode));
|
kH264StartCode, sizeof(kH264StartCode));
|
||||||
@ -352,11 +355,11 @@ TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
|
|||||||
std::vector<uint8_t> data;
|
std::vector<uint8_t> data;
|
||||||
data.insert(data.end(), {1, 2, 3});
|
data.insert(data.end(), {1, 2, 3});
|
||||||
header.header.payloadType = 99;
|
header.header.payloadType = 99;
|
||||||
header.type.Video.is_first_packet_in_frame = true;
|
header.video_header().is_first_packet_in_frame = true;
|
||||||
header.header.sequenceNumber = 2;
|
header.header.sequenceNumber = 2;
|
||||||
header.header.markerBit = true;
|
header.header.markerBit = true;
|
||||||
header.frameType = kVideoFrameKey;
|
header.frameType = kVideoFrameKey;
|
||||||
header.type.Video.codec = kVideoCodecGeneric;
|
header.video_header().codec = kVideoCodecGeneric;
|
||||||
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
|
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
|
||||||
data.size());
|
data.size());
|
||||||
|
|
||||||
@ -388,9 +391,9 @@ TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
|
|||||||
memset(&rtp_header, 0, sizeof(rtp_header));
|
memset(&rtp_header, 0, sizeof(rtp_header));
|
||||||
rtp_header.header.sequenceNumber = 1;
|
rtp_header.header.sequenceNumber = 1;
|
||||||
rtp_header.header.markerBit = 1;
|
rtp_header.header.markerBit = 1;
|
||||||
rtp_header.type.Video.is_first_packet_in_frame = true;
|
rtp_header.video_header().is_first_packet_in_frame = true;
|
||||||
rtp_header.frameType = kVideoFrameDelta;
|
rtp_header.frameType = kVideoFrameDelta;
|
||||||
rtp_header.type.Video.codec = kVideoCodecGeneric;
|
rtp_header.video_header().codec = kVideoCodecGeneric;
|
||||||
|
|
||||||
EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
|
EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
|
||||||
rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
|
rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
|
||||||
|
|||||||
@ -561,11 +561,11 @@ class VideoAnalyzer : public PacketReceiver,
|
|||||||
depacketizer->Parse(&parsed_payload, payload, payload_data_length);
|
depacketizer->Parse(&parsed_payload, payload, payload_data_length);
|
||||||
RTC_DCHECK(result);
|
RTC_DCHECK(result);
|
||||||
const int temporal_idx = static_cast<int>(
|
const int temporal_idx = static_cast<int>(
|
||||||
is_vp8 ? parsed_payload.type.Video.codecHeader.VP8.temporalIdx
|
is_vp8 ? parsed_payload.video_header().codecHeader.VP8.temporalIdx
|
||||||
: parsed_payload.type.Video.codecHeader.VP9.temporal_idx);
|
: parsed_payload.video_header().codecHeader.VP9.temporal_idx);
|
||||||
const int spatial_idx = static_cast<int>(
|
const int spatial_idx = static_cast<int>(
|
||||||
is_vp8 ? kNoSpatialIdx
|
is_vp8 ? kNoSpatialIdx
|
||||||
: parsed_payload.type.Video.codecHeader.VP9.spatial_idx);
|
: parsed_payload.video_header().codecHeader.VP9.spatial_idx);
|
||||||
return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
|
return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
|
||||||
temporal_idx <= selected_tl_) &&
|
temporal_idx <= selected_tl_) &&
|
||||||
(selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||
|
(selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||
|
||||||
|
|||||||
@ -3170,19 +3170,19 @@ class Vp9HeaderObserver : public test::SendTest {
|
|||||||
RtpDepacketizer::ParsedPayload parsed;
|
RtpDepacketizer::ParsedPayload parsed;
|
||||||
RtpDepacketizerVp9 depacketizer;
|
RtpDepacketizerVp9 depacketizer;
|
||||||
EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
|
EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
|
||||||
EXPECT_EQ(VideoCodecType::kVideoCodecVP9, parsed.type.Video.codec);
|
EXPECT_EQ(VideoCodecType::kVideoCodecVP9, parsed.video_header().codec);
|
||||||
// Verify common fields for all configurations.
|
// Verify common fields for all configurations.
|
||||||
VerifyCommonHeader(parsed.type.Video.codecHeader.VP9);
|
VerifyCommonHeader(parsed.video_header().codecHeader.VP9);
|
||||||
CompareConsecutiveFrames(header, parsed.type.Video);
|
CompareConsecutiveFrames(header, parsed.video_header());
|
||||||
// Verify configuration specific settings.
|
// Verify configuration specific settings.
|
||||||
InspectHeader(parsed.type.Video.codecHeader.VP9);
|
InspectHeader(parsed.video_header().codecHeader.VP9);
|
||||||
|
|
||||||
++packets_sent_;
|
++packets_sent_;
|
||||||
if (header.markerBit) {
|
if (header.markerBit) {
|
||||||
++frames_sent_;
|
++frames_sent_;
|
||||||
}
|
}
|
||||||
last_header_ = header;
|
last_header_ = header;
|
||||||
last_vp9_ = parsed.type.Video.codecHeader.VP9;
|
last_vp9_ = parsed.video_header().codecHeader.VP9;
|
||||||
}
|
}
|
||||||
return SEND_PACKET;
|
return SEND_PACKET;
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user