Add accessors to the types in the RTPVideoTypeHeader in RTPVideoHeader.

This CL is in preparation to change the RTPVideoTypeHeader into an absl::variant.

Bug: none
Change-Id: I1672d866df0395f3417d8e278cc67f017ab0ff98
Reviewed-on: https://webrtc-review.googlesource.com/87261
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23856}
This commit is contained in:
philipel 2018-07-05 12:27:04 +02:00 committed by Commit Bot
parent 011dc6427a
commit 5ab67a5d71
30 changed files with 464 additions and 485 deletions

View File

@ -38,6 +38,7 @@ namespace webrtc {
// TODO(nisse): Deprecated, use webrtc::VideoCodecType instead.
using RtpVideoCodecTypes = VideoCodecType;
// TODO(philipel): Change from union to abls::variant.
union RTPVideoTypeHeader {
RTPVideoHeaderVP8 VP8;
RTPVideoHeaderVP9 VP9;
@ -47,6 +48,13 @@ union RTPVideoTypeHeader {
// Since RTPVideoHeader is used as a member of a union, it can't have a
// non-trivial default constructor.
struct RTPVideoHeader {
RTPVideoHeaderVP8& vp8() { return codecHeader.VP8; }
const RTPVideoHeaderVP8& vp8() const { return codecHeader.VP8; }
RTPVideoHeaderVP9& vp9() { return codecHeader.VP9; }
const RTPVideoHeaderVP9& vp9() const { return codecHeader.VP9; }
RTPVideoHeaderH264& h264() { return codecHeader.H264; }
const RTPVideoHeaderH264& h264() const { return codecHeader.H264; }
uint16_t width; // size
uint16_t height;
VideoRotation rotation;

View File

@ -21,20 +21,20 @@ namespace webrtc {
RtpPacketizer* RtpPacketizer::Create(VideoCodecType type,
size_t max_payload_len,
size_t last_packet_reduction_len,
const RTPVideoTypeHeader* rtp_type_header,
const RTPVideoHeader* rtp_video_header,
FrameType frame_type) {
switch (type) {
case kVideoCodecH264:
RTC_CHECK(rtp_type_header);
RTC_CHECK(rtp_video_header);
return new RtpPacketizerH264(max_payload_len, last_packet_reduction_len,
rtp_type_header->H264.packetization_mode);
rtp_video_header->h264().packetization_mode);
case kVideoCodecVP8:
RTC_CHECK(rtp_type_header);
return new RtpPacketizerVp8(rtp_type_header->VP8, max_payload_len,
RTC_CHECK(rtp_video_header);
return new RtpPacketizerVp8(rtp_video_header->vp8(), max_payload_len,
last_packet_reduction_len);
case kVideoCodecVP9:
RTC_CHECK(rtp_type_header);
return new RtpPacketizerVp9(rtp_type_header->VP9, max_payload_len,
RTC_CHECK(rtp_video_header);
return new RtpPacketizerVp9(rtp_video_header->vp9(), max_payload_len,
last_packet_reduction_len);
case kVideoCodecGeneric:
return new RtpPacketizerGeneric(frame_type, max_payload_len,

View File

@ -26,7 +26,7 @@ class RtpPacketizer {
static RtpPacketizer* Create(VideoCodecType type,
size_t max_payload_len,
size_t last_packet_reduction_len,
const RTPVideoTypeHeader* rtp_type_header,
const RTPVideoHeader* rtp_video_header,
FrameType frame_type);
virtual ~RtpPacketizer() {}

View File

@ -429,7 +429,7 @@ bool RtpDepacketizerH264::Parse(ParsedPayload* parsed_payload,
modified_buffer_.reset();
uint8_t nal_type = payload_data[0] & kTypeMask;
parsed_payload->video_header().codecHeader.H264.nalus_length = 0;
parsed_payload->video_header().h264().nalus_length = 0;
if (nal_type == H264::NaluType::kFuA) {
// Fragmented NAL units (FU-A).
if (!ParseFuaNalu(parsed_payload, payload_data))
@ -458,8 +458,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
parsed_payload->video_header().codec = kVideoCodecH264;
parsed_payload->video_header().simulcastIdx = 0;
parsed_payload->video_header().is_first_packet_in_frame = true;
RTPVideoHeaderH264* h264_header =
&parsed_payload->video_header().codecHeader.H264;
RTPVideoHeaderH264* h264_header = &parsed_payload->video_header().h264();
const uint8_t* nalu_start = payload_data + kNalHeaderSize;
const size_t nalu_length = length_ - kNalHeaderSize;
@ -618,7 +617,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
RTC_LOG(LS_WARNING) << "Unexpected STAP-A or FU-A received.";
return false;
}
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().codecHeader.H264;
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().h264();
if (h264->nalus_length == kMaxNalusPerPacket) {
RTC_LOG(LS_WARNING)
<< "Received packet containing more than " << kMaxNalusPerPacket
@ -677,7 +676,7 @@ bool RtpDepacketizerH264::ParseFuaNalu(
parsed_payload->video_header().codec = kVideoCodecH264;
parsed_payload->video_header().simulcastIdx = 0;
parsed_payload->video_header().is_first_packet_in_frame = first_fragment;
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().codecHeader.H264;
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().h264();
h264->packetization_type = kH264FuA;
h264->nalu_type = original_nal_type;
if (first_fragment) {

View File

@ -65,11 +65,10 @@ void CreateThreeFragments(RTPFragmentationHeader* fragmentation,
RtpPacketizer* CreateH264Packetizer(H264PacketizationMode mode,
size_t max_payload_size,
size_t last_packet_reduction) {
RTPVideoTypeHeader type_header;
type_header.H264.packetization_mode = mode;
RTPVideoHeader header;
header.h264().packetization_mode = mode;
return RtpPacketizer::Create(kVideoCodecH264, max_payload_size,
last_packet_reduction, &type_header,
kEmptyFrame);
last_packet_reduction, &header, kEmptyFrame);
}
void VerifyFua(size_t fua_index,
@ -602,9 +601,8 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264SingleNalu,
payload.video_header().codecHeader.H264.packetization_type);
EXPECT_EQ(kIdr, payload.video_header().codecHeader.H264.nalu_type);
EXPECT_EQ(kH264SingleNalu, payload.video_header().h264().packetization_type);
EXPECT_EQ(kIdr, payload.video_header().h264().nalu_type);
}
TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
@ -618,8 +616,7 @@ TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264SingleNalu,
payload.video_header().codecHeader.H264.packetization_type);
EXPECT_EQ(kH264SingleNalu, payload.video_header().h264().packetization_type);
EXPECT_EQ(1280u, payload.video_header().width);
EXPECT_EQ(720u, payload.video_header().height);
}
@ -648,7 +645,7 @@ TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
EXPECT_EQ(kH264StapA, h264.packetization_type);
// NALU type for aggregated packets is the type of the first packet only.
EXPECT_EQ(kSps, h264.nalu_type);
@ -679,8 +676,7 @@ TEST_F(RtpDepacketizerH264Test, TestStapANaluSpsWithResolution) {
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264StapA,
payload.video_header().codecHeader.H264.packetization_type);
EXPECT_EQ(kH264StapA, payload.video_header().h264().packetization_type);
EXPECT_EQ(1280u, payload.video_header().width);
EXPECT_EQ(720u, payload.video_header().height);
}
@ -807,10 +803,9 @@ TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
EXPECT_EQ(kH264StapA,
payload.video_header().codecHeader.H264.packetization_type);
EXPECT_EQ(kH264StapA, payload.video_header().h264().packetization_type);
// NALU type for aggregated packets is the type of the first packet only.
EXPECT_EQ(kSlice, payload.video_header().codecHeader.H264.nalu_type);
EXPECT_EQ(kSlice, payload.video_header().h264().nalu_type);
}
TEST_F(RtpDepacketizerH264Test, TestFuA) {
@ -847,7 +842,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
EXPECT_EQ(kH264FuA, h264.packetization_type);
EXPECT_EQ(kIdr, h264.nalu_type);
ASSERT_EQ(1u, h264.nalus_length);
@ -864,7 +859,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
{
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
EXPECT_EQ(kH264FuA, h264.packetization_type);
EXPECT_EQ(kIdr, h264.nalu_type);
// NALU info is only expected for the first FU-A packet.
@ -878,7 +873,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
{
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
EXPECT_EQ(kH264FuA, h264.packetization_type);
EXPECT_EQ(kIdr, h264.nalu_type);
// NALU info is only expected for the first FU-A packet.
@ -930,7 +925,7 @@ TEST_F(RtpDepacketizerH264Test, TestSeiPacket) {
};
RtpDepacketizer::ParsedPayload payload;
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
const RTPVideoHeaderH264& h264 = payload.video_header().codecHeader.H264;
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
EXPECT_EQ(kSei, h264.nalu_type);

View File

@ -486,16 +486,16 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
beginning_of_partition && (partition_id == 0);
parsed_payload->video_header().simulcastIdx = 0;
parsed_payload->video_header().codec = kVideoCodecVP8;
parsed_payload->video_header().codecHeader.VP8.nonReference =
parsed_payload->video_header().vp8().nonReference =
(*payload_data & 0x20) ? true : false; // N bit
parsed_payload->video_header().codecHeader.VP8.partitionId = partition_id;
parsed_payload->video_header().codecHeader.VP8.beginningOfPartition =
parsed_payload->video_header().vp8().partitionId = partition_id;
parsed_payload->video_header().vp8().beginningOfPartition =
beginning_of_partition;
parsed_payload->video_header().codecHeader.VP8.pictureId = kNoPictureId;
parsed_payload->video_header().codecHeader.VP8.tl0PicIdx = kNoTl0PicIdx;
parsed_payload->video_header().codecHeader.VP8.temporalIdx = kNoTemporalIdx;
parsed_payload->video_header().codecHeader.VP8.layerSync = false;
parsed_payload->video_header().codecHeader.VP8.keyIdx = kNoKeyIdx;
parsed_payload->video_header().vp8().pictureId = kNoPictureId;
parsed_payload->video_header().vp8().tl0PicIdx = kNoTl0PicIdx;
parsed_payload->video_header().vp8().temporalIdx = kNoTemporalIdx;
parsed_payload->video_header().vp8().layerSync = false;
parsed_payload->video_header().vp8().keyIdx = kNoKeyIdx;
if (partition_id > 8) {
// Weak check for corrupt payload_data: PartID MUST NOT be larger than 8.
@ -512,8 +512,8 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
if (extension) {
const int parsed_bytes =
ParseVP8Extension(&parsed_payload->video_header().codecHeader.VP8,
payload_data, payload_data_length);
ParseVP8Extension(&parsed_payload->video_header().vp8(), payload_data,
payload_data_length);
if (parsed_bytes < 0)
return false;
payload_data += parsed_bytes;

View File

@ -61,9 +61,9 @@ constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
// +-+-+-+-+-+-+-+-+
void VerifyBasicHeader(RTPVideoHeader* header, bool N, bool S, int part_id) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(N, header->codecHeader.VP8.nonReference);
EXPECT_EQ(S, header->codecHeader.VP8.beginningOfPartition);
EXPECT_EQ(part_id, header->codecHeader.VP8.partitionId);
EXPECT_EQ(N, header->vp8().nonReference);
EXPECT_EQ(S, header->vp8().beginningOfPartition);
EXPECT_EQ(part_id, header->vp8().partitionId);
}
void VerifyExtensions(RTPVideoHeader* header,
@ -72,10 +72,10 @@ void VerifyExtensions(RTPVideoHeader* header,
uint8_t temporal_idx, /* T */
int key_idx /* K */) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(picture_id, header->codecHeader.VP8.pictureId);
EXPECT_EQ(tl0_pic_idx, header->codecHeader.VP8.tl0PicIdx);
EXPECT_EQ(temporal_idx, header->codecHeader.VP8.temporalIdx);
EXPECT_EQ(key_idx, header->codecHeader.VP8.keyIdx);
EXPECT_EQ(picture_id, header->vp8().pictureId);
EXPECT_EQ(tl0_pic_idx, header->vp8().tl0PicIdx);
EXPECT_EQ(temporal_idx, header->vp8().temporalIdx);
EXPECT_EQ(key_idx, header->vp8().keyIdx);
}
} // namespace
@ -372,7 +372,7 @@ TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx, 2,
kNoKeyIdx);
EXPECT_FALSE(payload.video_header().codecHeader.VP8.layerSync);
EXPECT_FALSE(payload.video_header().vp8().layerSync);
}
TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
@ -453,8 +453,7 @@ TEST_F(RtpDepacketizerVp8Test, TestWithPacketizer) {
VerifyExtensions(&payload.video_header(), input_header.pictureId,
input_header.tl0PicIdx, input_header.temporalIdx,
input_header.keyIdx);
EXPECT_EQ(payload.video_header().codecHeader.VP8.layerSync,
input_header.layerSync);
EXPECT_EQ(payload.video_header().vp8().layerSync, input_header.layerSync);
}
TEST_F(RtpDepacketizerVp8Test, TestEmptyPayload) {

View File

@ -719,7 +719,7 @@ bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
RTPVideoHeaderVP9* vp9 = &parsed_payload->video_header().codecHeader.VP9;
RTPVideoHeaderVP9* vp9 = &parsed_payload->video_header().vp9();
vp9->InitRTPVideoHeaderVP9();
vp9->inter_pic_predicted = p_bit ? true : false;
vp9->flexible_mode = f_bit ? true : false;

View File

@ -83,7 +83,7 @@ void ParseAndCheckPacket(const uint8_t* packet,
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length));
EXPECT_EQ(kVideoCodecVP9, parsed.video_header().codec);
VerifyHeader(expected, parsed.video_header().codecHeader.VP9);
VerifyHeader(expected, parsed.video_header().vp9());
const size_t kExpectedPayloadLength = expected_length - expected_hdr_length;
VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength);
}

View File

@ -240,7 +240,7 @@ class RtpRtcpImplTest : public ::testing::Test {
rtp_video_header.is_first_packet_in_frame = true;
rtp_video_header.simulcastIdx = 0;
rtp_video_header.codec = kVideoCodecVP8;
rtp_video_header.codecHeader = {vp8_header};
rtp_video_header.vp8() = vp8_header;
rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
const uint8_t payload[100] = {0};

View File

@ -1811,8 +1811,7 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesGeneric) {
TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
RTPVideoHeader header;
header.codec = kVideoCodecH264;
header.codecHeader.H264.packetization_mode =
H264PacketizationMode::NonInterleaved;
header.h264().packetization_mode = H264PacketizationMode::NonInterleaved;
EXPECT_EQ(kDontRetransmit,
rtp_sender_video_->GetStorageType(
@ -1835,7 +1834,7 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) {
RTPVideoHeader header;
header.codec = kVideoCodecVP8;
header.codecHeader.VP8.temporalIdx = 0;
header.vp8().temporalIdx = 0;
EXPECT_EQ(kDontRetransmit,
rtp_sender_video_->GetStorageType(
@ -1868,7 +1867,7 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8HigherLayers) {
header.codec = kVideoCodecVP8;
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
header.codecHeader.VP8.temporalIdx = tid;
header.vp8().temporalIdx = tid;
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitOff,
@ -1894,7 +1893,7 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP9) {
header.codec = kVideoCodecVP9;
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
header.codecHeader.VP9.temporal_idx = tid;
header.vp9().temporal_idx = tid;
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitOff,
@ -1931,7 +1930,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
kFrameIntervalMs;
constexpr int kPattern[] = {0, 2, 1, 2};
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
header.codecHeader.VP8.temporalIdx = kPattern[i % arraysize(kPattern)];
header.vp8().temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
}
@ -1940,7 +1939,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
// right now. We will wait at most one expected retransmission time before
// acknowledging that it did not arrive, which means this frame and the next
// will not be retransmitted.
header.codecHeader.VP8.temporalIdx = 1;
header.vp8().temporalIdx = 1;
EXPECT_EQ(StorageType::kDontRetransmit,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
@ -1956,7 +1955,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
// Insert a frame for TL2. We just had frame in TL1, so the next one there is
// in three frames away. TL0 is still too far in the past. So, allow
// retransmission.
header.codecHeader.VP8.temporalIdx = 2;
header.vp8().temporalIdx = 2;
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
@ -1988,7 +1987,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
kFrameIntervalMs;
constexpr int kPattern[] = {0, 2, 2, 2};
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
header.codecHeader.VP8.temporalIdx = kPattern[i % arraysize(kPattern)];
header.vp8().temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
@ -1999,7 +1998,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
// we don't store for retransmission because we expect a frame in a lower
// layer, but that last frame in TL1 was a long time ago in absolute terms,
// so allow retransmission anyway.
header.codecHeader.VP8.temporalIdx = 1;
header.vp8().temporalIdx = 1;
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
}

View File

@ -352,7 +352,7 @@ bool RTPSenderVideo::SendVideo(enum VideoCodecType video_type,
std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
video_type, max_data_payload_length, last_packet_reduction_len,
video_header ? &(video_header->codecHeader) : nullptr, frame_type));
video_header, frame_type));
const uint8_t temporal_id =
video_header ? GetTemporalId(*video_header) : kNoTemporalIdx;
@ -472,9 +472,9 @@ StorageType RTPSenderVideo::GetStorageType(
uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
switch (header.codec) {
case kVideoCodecVP8:
return header.codecHeader.VP8.temporalIdx;
return header.vp8().temporalIdx;
case kVideoCodecVP9:
return header.codecHeader.VP9.temporal_idx;
return header.vp9().temporal_idx;
default:
return kNoTemporalIdx;
}

View File

@ -37,7 +37,7 @@ TEST(TestDecodingState, FrameContinuity) {
packet.seqNum = 0xffff;
packet.frameType = kVideoFrameDelta;
packet.video_header.codec = kVideoCodecVP8;
packet.video_header.codecHeader.VP8.pictureId = 0x007F;
packet.video_header.vp8().pictureId = 0x007F;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -53,17 +53,17 @@ TEST(TestDecodingState, FrameContinuity) {
packet.frameType = kVideoFrameDelta;
// Use pictureId
packet.is_first_packet_in_frame = false;
packet.video_header.codecHeader.VP8.pictureId = 0x0002;
packet.video_header.vp8().pictureId = 0x0002;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
frame.Reset();
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.video_header.vp8().pictureId = 0;
packet.seqNum = 10;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Use sequence numbers.
packet.video_header.codecHeader.VP8.pictureId = kNoPictureId;
packet.video_header.vp8().pictureId = kNoPictureId;
frame.Reset();
packet.seqNum = dec_state.sequence_num() - 1u;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -82,9 +82,9 @@ TEST(TestDecodingState, FrameContinuity) {
// Insert packet with temporal info.
dec_state.Reset();
frame.Reset();
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
packet.seqNum = 1;
packet.timestamp = 1;
EXPECT_TRUE(dec_state.full_sync());
@ -93,9 +93,9 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// 1 layer up - still good.
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 1;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 1;
packet.seqNum = 2;
packet.timestamp = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -104,18 +104,18 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// Lost non-base layer packet => should update sync parameter.
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 3;
packet.video_header.codecHeader.VP8.pictureId = 3;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 3;
packet.video_header.vp8().pictureId = 3;
packet.seqNum = 4;
packet.timestamp = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
// Now insert the next non-base layer (belonging to a next tl0PicId).
frame.Reset();
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.codecHeader.VP8.temporalIdx = 2;
packet.video_header.codecHeader.VP8.pictureId = 4;
packet.video_header.vp8().tl0PicIdx = 1;
packet.video_header.vp8().temporalIdx = 2;
packet.video_header.vp8().pictureId = 4;
packet.seqNum = 5;
packet.timestamp = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -125,9 +125,9 @@ TEST(TestDecodingState, FrameContinuity) {
EXPECT_TRUE(dec_state.full_sync());
// Next base layer (dropped interim non-base layers) - should update sync.
frame.Reset();
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 5;
packet.video_header.vp8().tl0PicIdx = 1;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 5;
packet.seqNum = 6;
packet.timestamp = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -137,18 +137,18 @@ TEST(TestDecodingState, FrameContinuity) {
// Check wrap for temporal layers.
frame.Reset();
packet.video_header.codecHeader.VP8.tl0PicIdx = 0x00FF;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 6;
packet.video_header.vp8().tl0PicIdx = 0x00FF;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 6;
packet.seqNum = 7;
packet.timestamp = 7;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
frame.Reset();
packet.video_header.codecHeader.VP8.tl0PicIdx = 0x0000;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 7;
packet.video_header.vp8().tl0PicIdx = 0x0000;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 7;
packet.seqNum = 8;
packet.timestamp = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@ -214,9 +214,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -226,9 +226,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 1;
packet.seqNum = 1;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 1;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -238,9 +238,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 3;
packet.seqNum = 3;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 3;
packet.video_header.codecHeader.VP8.pictureId = 3;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 3;
packet.video_header.vp8().pictureId = 3;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -249,9 +249,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
frame.Reset();
packet.timestamp = 4;
packet.seqNum = 4;
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 4;
packet.video_header.vp8().tl0PicIdx = 1;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -263,9 +263,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.is_first_packet_in_frame = 1;
packet.timestamp = 5;
packet.seqNum = 5;
packet.video_header.codecHeader.VP8.tl0PicIdx = 2;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 5;
packet.video_header.vp8().tl0PicIdx = 2;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -276,9 +276,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.frameType = kVideoFrameDelta;
packet.timestamp = 6;
packet.seqNum = 6;
packet.video_header.codecHeader.VP8.tl0PicIdx = 3;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 6;
packet.video_header.vp8().tl0PicIdx = 3;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -287,9 +287,9 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.is_first_packet_in_frame = 1;
packet.timestamp = 8;
packet.seqNum = 8;
packet.video_header.codecHeader.VP8.tl0PicIdx = 4;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 8;
packet.video_header.vp8().tl0PicIdx = 4;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -302,10 +302,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.is_first_packet_in_frame = 1;
packet.timestamp = 9;
packet.seqNum = 9;
packet.video_header.codecHeader.VP8.tl0PicIdx = 4;
packet.video_header.codecHeader.VP8.temporalIdx = 2;
packet.video_header.codecHeader.VP8.pictureId = 9;
packet.video_header.codecHeader.VP8.layerSync = true;
packet.video_header.vp8().tl0PicIdx = 4;
packet.video_header.vp8().temporalIdx = 2;
packet.video_header.vp8().pictureId = 9;
packet.video_header.vp8().layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -323,10 +323,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 1;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.video_header.codecHeader.VP8.layerSync = false;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
packet.video_header.vp8().layerSync = false;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@ -337,10 +337,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 0;
packet.timestamp = 1;
packet.seqNum = 1;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 2;
packet.video_header.codecHeader.VP8.pictureId = 1;
packet.video_header.codecHeader.VP8.layerSync = true;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 2;
packet.video_header.vp8().pictureId = 1;
packet.video_header.vp8().layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Layer 1
@ -350,10 +350,10 @@ TEST(TestDecodingState, MultiLayerBehavior) {
packet.markerBit = 1;
packet.timestamp = 2;
packet.seqNum = 3;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 2;
packet.video_header.codecHeader.VP8.layerSync = true;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 2;
packet.video_header.vp8().layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@ -368,9 +368,9 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -384,8 +384,8 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
packet.frameType = kVideoFrameDelta;
packet.timestamp += 3000;
++packet.seqNum;
packet.video_header.codecHeader.VP8.temporalIdx = 1;
packet.video_header.codecHeader.VP8.pictureId = 2;
packet.video_header.vp8().temporalIdx = 1;
packet.video_header.vp8().pictureId = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@ -421,9 +421,9 @@ TEST(TestDecodingState, PictureIdRepeat) {
packet.video_header.codec = kVideoCodecVP8;
packet.timestamp = 0;
packet.seqNum = 0;
packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
packet.video_header.codecHeader.VP8.temporalIdx = 0;
packet.video_header.codecHeader.VP8.pictureId = 0;
packet.video_header.vp8().tl0PicIdx = 0;
packet.video_header.vp8().temporalIdx = 0;
packet.video_header.vp8().pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@ -433,15 +433,15 @@ TEST(TestDecodingState, PictureIdRepeat) {
frame.Reset();
++packet.timestamp;
++packet.seqNum;
packet.video_header.codecHeader.VP8.temporalIdx++;
packet.video_header.codecHeader.VP8.pictureId++;
packet.video_header.vp8().temporalIdx++;
packet.video_header.vp8().pictureId++;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
frame.Reset();
// Testing only gap in tl0PicIdx when tl0PicIdx in continuous.
packet.video_header.codecHeader.VP8.tl0PicIdx += 3;
packet.video_header.codecHeader.VP8.temporalIdx++;
packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
packet.video_header.vp8().tl0PicIdx += 3;
packet.video_header.vp8().temporalIdx++;
packet.video_header.vp8().tl0PicIdx = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
}
@ -458,7 +458,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
packet.dataPtr = data;
packet.video_header.codec = kVideoCodecVP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.vp9();
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;
@ -501,7 +501,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
packet.dataPtr = data;
packet.video_header.codec = kVideoCodecVP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.vp9();
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;
@ -556,7 +556,7 @@ TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
packet.dataPtr = data;
packet.video_header.codec = kVideoCodecVP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
RTPVideoHeaderVP9& vp9_hdr = packet.video_header.vp9();
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;

View File

@ -64,16 +64,15 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
_codecSpecificInfo.codecType = kVideoCodecVP8;
}
_codecSpecificInfo.codecSpecific.VP8.nonReference =
header->codecHeader.VP8.nonReference;
if (header->codecHeader.VP8.temporalIdx != kNoTemporalIdx) {
header->vp8().nonReference;
if (header->vp8().temporalIdx != kNoTemporalIdx) {
_codecSpecificInfo.codecSpecific.VP8.temporalIdx =
header->codecHeader.VP8.temporalIdx;
header->vp8().temporalIdx;
_codecSpecificInfo.codecSpecific.VP8.layerSync =
header->codecHeader.VP8.layerSync;
header->vp8().layerSync;
}
if (header->codecHeader.VP8.keyIdx != kNoKeyIdx) {
_codecSpecificInfo.codecSpecific.VP8.keyIdx =
header->codecHeader.VP8.keyIdx;
if (header->vp8().keyIdx != kNoKeyIdx) {
_codecSpecificInfo.codecSpecific.VP8.keyIdx = header->vp8().keyIdx;
}
break;
}
@ -87,50 +86,48 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
_codecSpecificInfo.codecType = kVideoCodecVP9;
}
_codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted =
header->codecHeader.VP9.inter_pic_predicted;
header->vp9().inter_pic_predicted;
_codecSpecificInfo.codecSpecific.VP9.flexible_mode =
header->codecHeader.VP9.flexible_mode;
header->vp9().flexible_mode;
_codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
header->codecHeader.VP9.num_ref_pics;
for (uint8_t r = 0; r < header->codecHeader.VP9.num_ref_pics; ++r) {
header->vp9().num_ref_pics;
for (uint8_t r = 0; r < header->vp9().num_ref_pics; ++r) {
_codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
header->codecHeader.VP9.pid_diff[r];
header->vp9().pid_diff[r];
}
_codecSpecificInfo.codecSpecific.VP9.ss_data_available =
header->codecHeader.VP9.ss_data_available;
if (header->codecHeader.VP9.temporal_idx != kNoTemporalIdx) {
header->vp9().ss_data_available;
if (header->vp9().temporal_idx != kNoTemporalIdx) {
_codecSpecificInfo.codecSpecific.VP9.temporal_idx =
header->codecHeader.VP9.temporal_idx;
header->vp9().temporal_idx;
_codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
header->codecHeader.VP9.temporal_up_switch;
header->vp9().temporal_up_switch;
}
if (header->codecHeader.VP9.spatial_idx != kNoSpatialIdx) {
if (header->vp9().spatial_idx != kNoSpatialIdx) {
_codecSpecificInfo.codecSpecific.VP9.spatial_idx =
header->codecHeader.VP9.spatial_idx;
header->vp9().spatial_idx;
_codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
header->codecHeader.VP9.inter_layer_predicted;
header->vp9().inter_layer_predicted;
}
if (header->codecHeader.VP9.gof_idx != kNoGofIdx) {
_codecSpecificInfo.codecSpecific.VP9.gof_idx =
header->codecHeader.VP9.gof_idx;
if (header->vp9().gof_idx != kNoGofIdx) {
_codecSpecificInfo.codecSpecific.VP9.gof_idx = header->vp9().gof_idx;
}
if (header->codecHeader.VP9.ss_data_available) {
if (header->vp9().ss_data_available) {
_codecSpecificInfo.codecSpecific.VP9.num_spatial_layers =
header->codecHeader.VP9.num_spatial_layers;
header->vp9().num_spatial_layers;
_codecSpecificInfo.codecSpecific.VP9
.spatial_layer_resolution_present =
header->codecHeader.VP9.spatial_layer_resolution_present;
if (header->codecHeader.VP9.spatial_layer_resolution_present) {
for (size_t i = 0; i < header->codecHeader.VP9.num_spatial_layers;
++i) {
header->vp9().spatial_layer_resolution_present;
if (header->vp9().spatial_layer_resolution_present) {
for (size_t i = 0; i < header->vp9().num_spatial_layers; ++i) {
_codecSpecificInfo.codecSpecific.VP9.width[i] =
header->codecHeader.VP9.width[i];
header->vp9().width[i];
_codecSpecificInfo.codecSpecific.VP9.height[i] =
header->codecHeader.VP9.height[i];
header->vp9().height[i];
}
}
_codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9(
header->codecHeader.VP9.gof);
header->vp9().gof);
}
break;
}

View File

@ -36,7 +36,7 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
const uint8_t* data = packet->dataPtr;
const size_t data_size = packet->sizeBytes;
const RTPVideoHeader& video_header = packet->video_header;
RTPVideoHeaderH264* codec_header = &packet->video_header.codecHeader.H264;
RTPVideoHeaderH264* codec_header = &packet->video_header.h264();
bool append_sps_pps = false;
auto sps = sps_data_.end();

View File

@ -53,9 +53,9 @@ class TestH264SpsPpsTracker : public ::testing::Test {
VCMPacket GetDefaultPacket() {
VCMPacket packet;
packet.codec = kVideoCodecH264;
packet.video_header.codecHeader.H264.nalus_length = 0;
packet.video_header.h264().nalus_length = 0;
packet.video_header.is_first_packet_in_frame = false;
packet.video_header.codecHeader.H264.packetization_type = kH264SingleNalu;
packet.video_header.h264().packetization_type = kH264SingleNalu;
return packet;
}
@ -68,8 +68,8 @@ class TestH264SpsPpsTracker : public ::testing::Test {
data->push_back(H264::NaluType::kSps);
data->push_back(sps_id); // The sps data, just a single byte.
packet->video_header.codecHeader.H264
.nalus[packet->video_header.codecHeader.H264.nalus_length++] = info;
packet->video_header.h264()
.nalus[packet->video_header.h264().nalus_length++] = info;
}
void AddPps(VCMPacket* packet,
@ -83,8 +83,8 @@ class TestH264SpsPpsTracker : public ::testing::Test {
data->push_back(H264::NaluType::kPps);
data->push_back(pps_id); // The pps data, just a single byte.
packet->video_header.codecHeader.H264
.nalus[packet->video_header.codecHeader.H264.nalus_length++] = info;
packet->video_header.h264()
.nalus[packet->video_header.h264().nalus_length++] = info;
}
void AddIdr(VCMPacket* packet, int pps_id) {
@ -93,8 +93,8 @@ class TestH264SpsPpsTracker : public ::testing::Test {
info.sps_id = -1;
info.pps_id = pps_id;
packet->video_header.codecHeader.H264
.nalus[packet->video_header.codecHeader.H264.nalus_length++] = info;
packet->video_header.h264()
.nalus[packet->video_header.h264().nalus_length++] = info;
}
protected:
@ -104,7 +104,7 @@ class TestH264SpsPpsTracker : public ::testing::Test {
TEST_F(TestH264SpsPpsTracker, NoNalus) {
uint8_t data[] = {1, 2, 3};
VCMPacket packet = GetDefaultPacket();
packet.video_header.codecHeader.H264.packetization_type = kH264FuA;
packet.video_header.h264().packetization_type = kH264FuA;
packet.dataPtr = data;
packet.sizeBytes = sizeof(data);
@ -116,7 +116,7 @@ TEST_F(TestH264SpsPpsTracker, NoNalus) {
TEST_F(TestH264SpsPpsTracker, FuAFirstPacket) {
uint8_t data[] = {1, 2, 3};
VCMPacket packet = GetDefaultPacket();
packet.video_header.codecHeader.H264.packetization_type = kH264FuA;
packet.video_header.h264().packetization_type = kH264FuA;
packet.video_header.is_first_packet_in_frame = true;
packet.dataPtr = data;
packet.sizeBytes = sizeof(data);
@ -132,7 +132,7 @@ TEST_F(TestH264SpsPpsTracker, FuAFirstPacket) {
TEST_F(TestH264SpsPpsTracker, StapAIncorrectSegmentLength) {
uint8_t data[] = {0, 0, 2, 0};
VCMPacket packet = GetDefaultPacket();
packet.video_header.codecHeader.H264.packetization_type = kH264StapA;
packet.video_header.h264().packetization_type = kH264StapA;
packet.video_header.is_first_packet_in_frame = true;
packet.dataPtr = data;
packet.sizeBytes = sizeof(data);
@ -158,7 +158,7 @@ TEST_F(TestH264SpsPpsTracker, NoNalusFirstPacket) {
TEST_F(TestH264SpsPpsTracker, IdrNoSpsPpsInserted) {
std::vector<uint8_t> data = {1, 2, 3};
VCMPacket packet = GetDefaultPacket();
packet.video_header.codecHeader.H264.packetization_type = kH264FuA;
packet.video_header.h264().packetization_type = kH264FuA;
AddIdr(&packet, 0);
packet.dataPtr = data.data();
@ -244,7 +244,7 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsPacketThenIdrFirstPacket) {
TEST_F(TestH264SpsPpsTracker, SpsPpsIdrInStapA) {
std::vector<uint8_t> data;
VCMPacket packet = GetDefaultPacket();
packet.video_header.codecHeader.H264.packetization_type = kH264StapA;
packet.video_header.h264().packetization_type = kH264StapA;
packet.video_header.is_first_packet_in_frame = true; // Always true for StapA
data.insert(data.end(), {0}); // First byte is ignored
@ -289,13 +289,13 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBand) {
AddIdr(&idr_packet, 0);
idr_packet.dataPtr = kData;
idr_packet.sizeBytes = sizeof(kData);
EXPECT_EQ(1u, idr_packet.video_header.codecHeader.H264.nalus_length);
EXPECT_EQ(1u, idr_packet.video_header.h264().nalus_length);
EXPECT_EQ(H264SpsPpsTracker::kInsert,
tracker_.CopyAndFixBitstream(&idr_packet));
EXPECT_EQ(3u, idr_packet.video_header.codecHeader.H264.nalus_length);
EXPECT_EQ(3u, idr_packet.video_header.h264().nalus_length);
EXPECT_EQ(320, idr_packet.width);
EXPECT_EQ(240, idr_packet.height);
ExpectSpsPpsIdr(idr_packet.video_header.codecHeader.H264, 0, 0);
ExpectSpsPpsIdr(idr_packet.video_header.h264(), 0, 0);
if (idr_packet.dataPtr != kData) {
// In case CopyAndFixBitStream() prepends SPS/PPS nalus to the packet, it

View File

@ -124,10 +124,10 @@ void FrameList::Reset(UnorderedFrameList* free_frames) {
}
bool Vp9SsMap::Insert(const VCMPacket& packet) {
if (!packet.video_header.codecHeader.VP9.ss_data_available)
if (!packet.video_header.vp9().ss_data_available)
return false;
ss_map_[packet.timestamp] = packet.video_header.codecHeader.VP9.gof;
ss_map_[packet.timestamp] = packet.video_header.vp9().gof;
return true;
}
@ -175,7 +175,7 @@ void Vp9SsMap::AdvanceFront(uint32_t timestamp) {
// TODO(asapersson): Update according to updates in RTP payload profile.
bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
uint8_t gof_idx = packet->video_header.codecHeader.VP9.gof_idx;
uint8_t gof_idx = packet->video_header.vp9().gof_idx;
if (gof_idx == kNoGofIdx)
return false; // No update needed.
@ -186,7 +186,7 @@ bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
if (gof_idx >= it->second.num_frames_in_gof)
return false; // Assume corresponding SS not yet received.
RTPVideoHeaderVP9* vp9 = &packet->video_header.codecHeader.VP9;
RTPVideoHeaderVP9* vp9 = &packet->video_header.vp9();
vp9->temporal_idx = it->second.temporal_idx[gof_idx];
vp9->temporal_up_switch = it->second.temporal_up_switch[gof_idx];

View File

@ -50,12 +50,12 @@ class Vp9SsMapTest : public ::testing::Test {
packet_.frameType = kVideoFrameKey;
packet_.codec = kVideoCodecVP9;
packet_.video_header.codec = kVideoCodecVP9;
packet_.video_header.codecHeader.VP9.flexible_mode = false;
packet_.video_header.codecHeader.VP9.gof_idx = 0;
packet_.video_header.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
packet_.video_header.codecHeader.VP9.temporal_up_switch = false;
packet_.video_header.codecHeader.VP9.ss_data_available = true;
packet_.video_header.codecHeader.VP9.gof.SetGofInfoVP9(
packet_.video_header.vp9().flexible_mode = false;
packet_.video_header.vp9().gof_idx = 0;
packet_.video_header.vp9().temporal_idx = kNoTemporalIdx;
packet_.video_header.vp9().temporal_up_switch = false;
packet_.video_header.vp9().ss_data_available = true;
packet_.video_header.vp9().gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
}
@ -69,7 +69,7 @@ TEST_F(Vp9SsMapTest, Insert) {
}
TEST_F(Vp9SsMapTest, Insert_NoSsData) {
packet_.video_header.codecHeader.VP9.ss_data_available = false;
packet_.video_header.vp9().ss_data_available = false;
EXPECT_FALSE(map_.Insert(packet_));
}
@ -146,52 +146,52 @@ TEST_F(Vp9SsMapTest, RemoveOld_WithWrap) {
}
TEST_F(Vp9SsMapTest, UpdatePacket_NoSsData) {
packet_.video_header.codecHeader.VP9.gof_idx = 0;
packet_.video_header.vp9().gof_idx = 0;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket_NoGofIdx) {
EXPECT_TRUE(map_.Insert(packet_));
packet_.video_header.codecHeader.VP9.gof_idx = kNoGofIdx;
packet_.video_header.vp9().gof_idx = kNoGofIdx;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket_InvalidGofIdx) {
EXPECT_TRUE(map_.Insert(packet_));
packet_.video_header.codecHeader.VP9.gof_idx = 4;
packet_.video_header.vp9().gof_idx = 4;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket) {
EXPECT_TRUE(map_.Insert(packet_)); // kTemporalStructureMode3: 0-2-1-2..
packet_.video_header.codecHeader.VP9.gof_idx = 0;
packet_.video_header.vp9().gof_idx = 0;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(0, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_FALSE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(4, packet_.video_header.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(0, packet_.video_header.vp9().temporal_idx);
EXPECT_FALSE(packet_.video_header.vp9().temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
EXPECT_EQ(4, packet_.video_header.vp9().pid_diff[0]);
packet_.video_header.codecHeader.VP9.gof_idx = 1;
packet_.video_header.vp9().gof_idx = 1;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_TRUE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(2, packet_.video_header.vp9().temporal_idx);
EXPECT_TRUE(packet_.video_header.vp9().temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
EXPECT_EQ(1, packet_.video_header.vp9().pid_diff[0]);
packet_.video_header.codecHeader.VP9.gof_idx = 2;
packet_.video_header.vp9().gof_idx = 2;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_TRUE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(1, packet_.video_header.vp9().temporal_idx);
EXPECT_TRUE(packet_.video_header.vp9().temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
EXPECT_EQ(2, packet_.video_header.vp9().pid_diff[0]);
packet_.video_header.codecHeader.VP9.gof_idx = 3;
packet_.video_header.vp9().gof_idx = 3;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.temporal_idx);
EXPECT_TRUE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.pid_diff[0]);
EXPECT_EQ(2, packet_.video_header.vp9().temporal_idx);
EXPECT_TRUE(packet_.video_header.vp9().temporal_up_switch);
EXPECT_EQ(1U, packet_.video_header.vp9().num_ref_pics);
EXPECT_EQ(1, packet_.video_header.vp9().pid_diff[0]);
}
class TestBasicJitterBuffer : public ::testing::TestWithParam<std::string>,
@ -924,20 +924,20 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->video_header.codec = kVideoCodecVP9;
packet_->is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->video_header.codecHeader.VP9.flexible_mode = false;
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
packet_->video_header.codecHeader.VP9.end_of_frame = true;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.vp9().flexible_mode = false;
packet_->video_header.vp9().spatial_idx = 0;
packet_->video_header.vp9().beginning_of_frame = true;
packet_->video_header.vp9().end_of_frame = true;
packet_->video_header.vp9().temporal_up_switch = false;
packet_->seqNum = 65485;
packet_->timestamp = 1000;
packet_->frameType = kVideoFrameKey;
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.ss_data_available = true;
packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
packet_->video_header.vp9().picture_id = 5;
packet_->video_header.vp9().tl0_pic_idx = 200;
packet_->video_header.vp9().temporal_idx = 0;
packet_->video_header.vp9().ss_data_available = true;
packet_->video_header.vp9().gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@ -945,10 +945,10 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->seqNum = 65489;
packet_->timestamp = 13000;
packet_->frameType = kVideoFrameDelta;
packet_->video_header.codecHeader.VP9.picture_id = 9;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 201;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.ss_data_available = false;
packet_->video_header.vp9().picture_id = 9;
packet_->video_header.vp9().tl0_pic_idx = 201;
packet_->video_header.vp9().temporal_idx = 0;
packet_->video_header.vp9().ss_data_available = false;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@ -977,26 +977,26 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->video_header.codec = kVideoCodecVP9;
packet_->is_first_packet_in_frame = true;
packet_->markerBit = true;
packet_->video_header.codecHeader.VP9.flexible_mode = false;
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
packet_->video_header.codecHeader.VP9.end_of_frame = true;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->video_header.vp9().flexible_mode = false;
packet_->video_header.vp9().spatial_idx = 0;
packet_->video_header.vp9().beginning_of_frame = true;
packet_->video_header.vp9().end_of_frame = true;
packet_->video_header.vp9().tl0_pic_idx = 200;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
packet_->video_header.codecHeader.VP9.picture_id = 6;
packet_->video_header.codecHeader.VP9.temporal_idx = 2;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.vp9().picture_id = 6;
packet_->video_header.vp9().temporal_idx = 2;
packet_->video_header.vp9().temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->seqNum = 65487;
packet_->timestamp = 9000;
packet_->frameType = kVideoFrameDelta;
packet_->video_header.codecHeader.VP9.picture_id = 7;
packet_->video_header.codecHeader.VP9.temporal_idx = 1;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.vp9().picture_id = 7;
packet_->video_header.vp9().temporal_idx = 1;
packet_->video_header.vp9().temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@ -1005,11 +1005,11 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->frameType = kVideoFrameKey;
packet_->width = 352;
packet_->height = 288;
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.codecHeader.VP9.ss_data_available = true;
packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
packet_->video_header.vp9().picture_id = 5;
packet_->video_header.vp9().temporal_idx = 0;
packet_->video_header.vp9().temporal_up_switch = false;
packet_->video_header.vp9().ss_data_available = true;
packet_->video_header.vp9().gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@ -1051,30 +1051,30 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
bool re = false;
packet_->codec = kVideoCodecVP9;
packet_->video_header.codec = kVideoCodecVP9;
packet_->video_header.codecHeader.VP9.flexible_mode = false;
packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
packet_->video_header.codecHeader.VP9.end_of_frame = true;
packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->video_header.vp9().flexible_mode = false;
packet_->video_header.vp9().beginning_of_frame = true;
packet_->video_header.vp9().end_of_frame = true;
packet_->video_header.vp9().tl0_pic_idx = 200;
packet_->is_first_packet_in_frame = true;
packet_->markerBit = false;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.picture_id = 6;
packet_->video_header.codecHeader.VP9.temporal_idx = 1;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.vp9().spatial_idx = 0;
packet_->video_header.vp9().picture_id = 6;
packet_->video_header.vp9().temporal_idx = 1;
packet_->video_header.vp9().temporal_up_switch = true;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->is_first_packet_in_frame = false;
packet_->markerBit = true;
packet_->seqNum = 65487;
packet_->frameType = kVideoFrameDelta;
packet_->video_header.codecHeader.VP9.spatial_idx = 1;
packet_->video_header.codecHeader.VP9.picture_id = 6;
packet_->video_header.codecHeader.VP9.temporal_idx = 1;
packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
packet_->video_header.vp9().spatial_idx = 1;
packet_->video_header.vp9().picture_id = 6;
packet_->video_header.vp9().temporal_idx = 1;
packet_->video_header.vp9().temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->is_first_packet_in_frame = false;
@ -1082,10 +1082,10 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->seqNum = 65485;
packet_->timestamp = 3000;
packet_->frameType = kVideoFrameKey;
packet_->video_header.codecHeader.VP9.spatial_idx = 1;
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.vp9().spatial_idx = 1;
packet_->video_header.vp9().picture_id = 5;
packet_->video_header.vp9().temporal_idx = 0;
packet_->video_header.vp9().temporal_up_switch = false;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@ -1095,12 +1095,12 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameKey;
packet_->width = 352;
packet_->height = 288;
packet_->video_header.codecHeader.VP9.spatial_idx = 0;
packet_->video_header.codecHeader.VP9.picture_id = 5;
packet_->video_header.codecHeader.VP9.temporal_idx = 0;
packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->video_header.codecHeader.VP9.ss_data_available = true;
packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
packet_->video_header.vp9().spatial_idx = 0;
packet_->video_header.vp9().picture_id = 5;
packet_->video_header.vp9().temporal_idx = 0;
packet_->video_header.vp9().temporal_up_switch = false;
packet_->video_header.vp9().ss_data_available = true;
packet_->video_header.vp9().gof.SetGofInfoVP9(
kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@ -1160,11 +1160,11 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
packet_->markerBit = true;
packet_->codec = kVideoCodecH264;
packet_->video_header.codec = kVideoCodecH264;
packet_->video_header.codecHeader.H264.nalu_type = H264::NaluType::kIdr;
packet_->video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kIdr;
packet_->video_header.codecHeader.H264.nalus[0].sps_id = -1;
packet_->video_header.codecHeader.H264.nalus[0].pps_id = 0;
packet_->video_header.codecHeader.H264.nalus_length = 1;
packet_->video_header.h264().nalu_type = H264::NaluType::kIdr;
packet_->video_header.h264().nalus[0].type = H264::NaluType::kIdr;
packet_->video_header.h264().nalus[0].sps_id = -1;
packet_->video_header.h264().nalus[0].pps_id = 0;
packet_->video_header.h264().nalus_length = 1;
bool retransmitted = false;
EXPECT_EQ(kCompleteSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
@ -1180,14 +1180,14 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
packet_->markerBit = false;
packet_->codec = kVideoCodecH264;
packet_->video_header.codec = kVideoCodecH264;
packet_->video_header.codecHeader.H264.nalu_type = H264::NaluType::kStapA;
packet_->video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kSps;
packet_->video_header.codecHeader.H264.nalus[0].sps_id = 0;
packet_->video_header.codecHeader.H264.nalus[0].pps_id = -1;
packet_->video_header.codecHeader.H264.nalus[1].type = H264::NaluType::kPps;
packet_->video_header.codecHeader.H264.nalus[1].sps_id = 0;
packet_->video_header.codecHeader.H264.nalus[1].pps_id = 0;
packet_->video_header.codecHeader.H264.nalus_length = 2;
packet_->video_header.h264().nalu_type = H264::NaluType::kStapA;
packet_->video_header.h264().nalus[0].type = H264::NaluType::kSps;
packet_->video_header.h264().nalus[0].sps_id = 0;
packet_->video_header.h264().nalus[0].pps_id = -1;
packet_->video_header.h264().nalus[1].type = H264::NaluType::kPps;
packet_->video_header.h264().nalus[1].sps_id = 0;
packet_->video_header.h264().nalus[1].pps_id = 0;
packet_->video_header.h264().nalus_length = 2;
// Not complete since the marker bit hasn't been received.
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
@ -1199,11 +1199,11 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
packet_->markerBit = true;
packet_->codec = kVideoCodecH264;
packet_->video_header.codec = kVideoCodecH264;
packet_->video_header.codecHeader.H264.nalu_type = H264::NaluType::kIdr;
packet_->video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kIdr;
packet_->video_header.codecHeader.H264.nalus[0].sps_id = -1;
packet_->video_header.codecHeader.H264.nalus[0].pps_id = 0;
packet_->video_header.codecHeader.H264.nalus_length = 1;
packet_->video_header.h264().nalu_type = H264::NaluType::kIdr;
packet_->video_header.h264().nalus[0].type = H264::NaluType::kIdr;
packet_->video_header.h264().nalus[0].sps_id = -1;
packet_->video_header.h264().nalus[0].pps_id = 0;
packet_->video_header.h264().nalus_length = 1;
// Complete and decodable since the pps and sps are received in the first
// packet of this frame.
EXPECT_EQ(kCompleteSession,
@ -1221,11 +1221,11 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
packet_->markerBit = true;
packet_->codec = kVideoCodecH264;
packet_->video_header.codec = kVideoCodecH264;
packet_->video_header.codecHeader.H264.nalu_type = H264::NaluType::kSlice;
packet_->video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kSlice;
packet_->video_header.codecHeader.H264.nalus[0].sps_id = -1;
packet_->video_header.codecHeader.H264.nalus[0].pps_id = 0;
packet_->video_header.codecHeader.H264.nalus_length = 1;
packet_->video_header.h264().nalu_type = H264::NaluType::kSlice;
packet_->video_header.h264().nalus[0].type = H264::NaluType::kSlice;
packet_->video_header.h264().nalus[0].sps_id = -1;
packet_->video_header.h264().nalus[0].pps_id = 0;
packet_->video_header.h264().nalus_length = 1;
// Complete and decodable since sps, pps and key frame has been received.
EXPECT_EQ(kCompleteSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));

View File

@ -304,7 +304,7 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
if (is_h264 && !is_h264_keyframe) {
const RTPVideoHeaderH264& header =
data_buffer_[start_index].video_header.codecHeader.H264;
data_buffer_[start_index].video_header.h264();
if (header.nalus_length >= kMaxNalusPerPacket)
return found_frames;

View File

@ -108,10 +108,10 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
packet.seqNum = seq_num_start;
packet.markerBit = (seq_num_start == seq_num_end);
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
packet.video_header.codecHeader.VP8.pictureId = pid % (1 << 15);
packet.video_header.codecHeader.VP8.temporalIdx = tid;
packet.video_header.codecHeader.VP8.tl0PicIdx = tl0;
packet.video_header.codecHeader.VP8.layerSync = sync;
packet.video_header.vp8().pictureId = pid % (1 << 15);
packet.video_header.vp8().temporalIdx = tid;
packet.video_header.vp8().tl0PicIdx = tl0;
packet.video_header.vp8().layerSync = sync;
ref_packet_buffer_->InsertPacket(&packet);
if (seq_num_start != seq_num_end) {
@ -140,21 +140,21 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
packet.seqNum = seq_num_start;
packet.markerBit = (seq_num_start == seq_num_end);
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
packet.video_header.codecHeader.VP9.flexible_mode = false;
packet.video_header.codecHeader.VP9.picture_id = pid % (1 << 15);
packet.video_header.codecHeader.VP9.temporal_idx = tid;
packet.video_header.codecHeader.VP9.spatial_idx = sid;
packet.video_header.codecHeader.VP9.tl0_pic_idx = tl0;
packet.video_header.codecHeader.VP9.temporal_up_switch = up_switch;
packet.video_header.vp9().flexible_mode = false;
packet.video_header.vp9().picture_id = pid % (1 << 15);
packet.video_header.vp9().temporal_idx = tid;
packet.video_header.vp9().spatial_idx = sid;
packet.video_header.vp9().tl0_pic_idx = tl0;
packet.video_header.vp9().temporal_up_switch = up_switch;
if (ss != nullptr) {
packet.video_header.codecHeader.VP9.ss_data_available = true;
packet.video_header.codecHeader.VP9.gof = *ss;
packet.video_header.vp9().ss_data_available = true;
packet.video_header.vp9().gof = *ss;
}
ref_packet_buffer_->InsertPacket(&packet);
if (seq_num_start != seq_num_end) {
packet.markerBit = true;
packet.video_header.codecHeader.VP9.ss_data_available = false;
packet.video_header.vp9().ss_data_available = false;
packet.seqNum = seq_num_end;
ref_packet_buffer_->InsertPacket(&packet);
}
@ -179,15 +179,15 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
packet.seqNum = seq_num_start;
packet.markerBit = (seq_num_start == seq_num_end);
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
packet.video_header.codecHeader.VP9.inter_layer_predicted = inter;
packet.video_header.codecHeader.VP9.flexible_mode = true;
packet.video_header.codecHeader.VP9.picture_id = pid % (1 << 15);
packet.video_header.codecHeader.VP9.temporal_idx = tid;
packet.video_header.codecHeader.VP9.spatial_idx = sid;
packet.video_header.codecHeader.VP9.tl0_pic_idx = tl0;
packet.video_header.codecHeader.VP9.num_ref_pics = refs.size();
packet.video_header.vp9().inter_layer_predicted = inter;
packet.video_header.vp9().flexible_mode = true;
packet.video_header.vp9().picture_id = pid % (1 << 15);
packet.video_header.vp9().temporal_idx = tid;
packet.video_header.vp9().spatial_idx = sid;
packet.video_header.vp9().tl0_pic_idx = tl0;
packet.video_header.vp9().num_ref_pics = refs.size();
for (size_t i = 0; i < refs.size(); ++i)
packet.video_header.codecHeader.VP9.pid_diff[i] = refs[i];
packet.video_header.vp9().pid_diff[i] = refs[i];
ref_packet_buffer_->InsertPacket(&packet);
if (seq_num_start != seq_num_end) {

View File

@ -60,9 +60,9 @@ int VCMSessionInfo::PictureId() const {
if (packets_.empty())
return kNoPictureId;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.codecHeader.VP8.pictureId;
return packets_.front().video_header.vp8().pictureId;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return packets_.front().video_header.codecHeader.VP9.picture_id;
return packets_.front().video_header.vp9().picture_id;
} else {
return kNoPictureId;
}
@ -72,9 +72,9 @@ int VCMSessionInfo::TemporalId() const {
if (packets_.empty())
return kNoTemporalIdx;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.codecHeader.VP8.temporalIdx;
return packets_.front().video_header.vp8().temporalIdx;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return packets_.front().video_header.codecHeader.VP9.temporal_idx;
return packets_.front().video_header.vp9().temporal_idx;
} else {
return kNoTemporalIdx;
}
@ -84,9 +84,9 @@ bool VCMSessionInfo::LayerSync() const {
if (packets_.empty())
return false;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.codecHeader.VP8.layerSync;
return packets_.front().video_header.vp8().layerSync;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return packets_.front().video_header.codecHeader.VP9.temporal_up_switch;
return packets_.front().video_header.vp9().temporal_up_switch;
} else {
return false;
}
@ -96,9 +96,9 @@ int VCMSessionInfo::Tl0PicId() const {
if (packets_.empty())
return kNoTl0PicIdx;
if (packets_.front().video_header.codec == kVideoCodecVP8) {
return packets_.front().video_header.codecHeader.VP8.tl0PicIdx;
return packets_.front().video_header.vp8().tl0PicIdx;
} else if (packets_.front().video_header.codec == kVideoCodecVP9) {
return packets_.front().video_header.codecHeader.VP9.tl0_pic_idx;
return packets_.front().video_header.vp9().tl0_pic_idx;
} else {
return kNoTl0PicIdx;
}
@ -110,9 +110,8 @@ std::vector<NaluInfo> VCMSessionInfo::GetNaluInfos() const {
return std::vector<NaluInfo>();
std::vector<NaluInfo> nalu_infos;
for (const VCMPacket& packet : packets_) {
for (size_t i = 0; i < packet.video_header.codecHeader.H264.nalus_length;
++i) {
nalu_infos.push_back(packet.video_header.codecHeader.H264.nalus[i]);
for (size_t i = 0; i < packet.video_header.h264().nalus_length; ++i) {
nalu_infos.push_back(packet.video_header.h264().nalus[i]);
}
}
return nalu_infos;
@ -121,18 +120,15 @@ std::vector<NaluInfo> VCMSessionInfo::GetNaluInfos() const {
void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
if (packets_.empty() ||
packets_.front().video_header.codec != kVideoCodecVP9 ||
packets_.front().video_header.codecHeader.VP9.flexible_mode) {
packets_.front().video_header.vp9().flexible_mode) {
return;
}
packets_.front().video_header.codecHeader.VP9.temporal_idx =
gof_info.temporal_idx[idx];
packets_.front().video_header.codecHeader.VP9.temporal_up_switch =
packets_.front().video_header.vp9().temporal_idx = gof_info.temporal_idx[idx];
packets_.front().video_header.vp9().temporal_up_switch =
gof_info.temporal_up_switch[idx];
packets_.front().video_header.codecHeader.VP9.num_ref_pics =
gof_info.num_ref_pics[idx];
packets_.front().video_header.vp9().num_ref_pics = gof_info.num_ref_pics[idx];
for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
packets_.front().video_header.codecHeader.VP9.pid_diff[i] =
gof_info.pid_diff[idx][i];
packets_.front().video_header.vp9().pid_diff[i] = gof_info.pid_diff[idx][i];
}
}
@ -180,7 +176,7 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
const size_t kH264NALHeaderLengthInBytes = 1;
const size_t kLengthFieldLength = 2;
if (packet.video_header.codec == kVideoCodecH264 &&
packet.video_header.codecHeader.H264.packetization_type == kH264StapA) {
packet.video_header.h264().packetization_type == kH264StapA) {
size_t required_length = 0;
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
@ -336,7 +332,7 @@ size_t VCMSessionInfo::DeletePacketData(PacketIterator start,
VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
PacketIterator it) const {
while (it != packets_.end()) {
if ((*it).video_header.codecHeader.VP8.beginningOfPartition) {
if ((*it).video_header.vp8().beginningOfPartition) {
return it;
}
++it;
@ -348,10 +344,10 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd(
PacketIterator it) const {
assert((*it).codec == kVideoCodecVP8);
PacketIterator prev_it = it;
const int partition_id = (*it).video_header.codecHeader.VP8.partitionId;
const int partition_id = (*it).video_header.vp8().partitionId;
while (it != packets_.end()) {
bool beginning = (*it).video_header.codecHeader.VP8.beginningOfPartition;
int current_partition_id = (*it).video_header.codecHeader.VP8.partitionId;
bool beginning = (*it).video_header.vp8().beginningOfPartition;
int current_partition_id = (*it).video_header.vp8().partitionId;
bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
if (packet_loss_found ||
(beginning && current_partition_id != partition_id)) {

View File

@ -512,17 +512,13 @@ class TestPacketBufferH264 : public TestPacketBuffer {
packet.timestamp = timestamp;
if (keyframe == kKeyFrame) {
if (sps_pps_idr_is_keyframe_) {
packet.video_header.codecHeader.H264.nalus[0].type =
H264::NaluType::kSps;
packet.video_header.codecHeader.H264.nalus[1].type =
H264::NaluType::kPps;
packet.video_header.codecHeader.H264.nalus[2].type =
H264::NaluType::kIdr;
packet.video_header.codecHeader.H264.nalus_length = 3;
packet.video_header.h264().nalus[0].type = H264::NaluType::kSps;
packet.video_header.h264().nalus[1].type = H264::NaluType::kPps;
packet.video_header.h264().nalus[2].type = H264::NaluType::kIdr;
packet.video_header.h264().nalus_length = 3;
} else {
packet.video_header.codecHeader.H264.nalus[0].type =
H264::NaluType::kIdr;
packet.video_header.codecHeader.H264.nalus_length = 1;
packet.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
packet.video_header.h264().nalus_length = 1;
}
}
packet.is_first_packet_in_frame = first == kFirst;
@ -596,12 +592,12 @@ TEST_P(TestPacketBufferH264Parameterized, GetBitstreamBufferPadding) {
new uint8_t[sizeof(data_data) + EncodedImage::kBufferPaddingBytesH264]);
VCMPacket packet;
packet.video_header.codecHeader.H264.nalus_length = 1;
packet.video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kIdr;
packet.video_header.h264().nalus_length = 1;
packet.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
packet.seqNum = seq_num;
packet.codec = kVideoCodecH264;
packet.insertStartCode = true;
packet.video_header.codecHeader.H264.packetization_type = kH264SingleNalu;
packet.video_header.h264().packetization_type = kH264SingleNalu;
packet.dataPtr = data;
packet.sizeBytes = sizeof(data_data);
packet.is_first_packet_in_frame = true;
@ -759,7 +755,7 @@ TEST_F(TestPacketBuffer, IncomingCodecChange) {
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
packet.codec = kVideoCodecH264;
packet.video_header.codecHeader.H264.nalus_length = 1;
packet.video_header.h264().nalus_length = 1;
packet.timestamp = 3;
packet.seqNum = 3;
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
@ -782,7 +778,7 @@ TEST_F(TestPacketBuffer, TooManyNalusInPacket) {
packet.frameType = kVideoFrameKey;
packet.is_first_packet_in_frame = true;
packet.markerBit = true;
packet.video_header.codecHeader.H264.nalus_length = kMaxNalusPerPacket;
packet.video_header.h264().nalus_length = kMaxNalusPerPacket;
packet.sizeBytes = 0;
packet.dataPtr = nullptr;
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
@ -877,8 +873,8 @@ class TestPacketBufferH264IdrIsKeyframe
};
TEST_F(TestPacketBufferH264IdrIsKeyframe, IdrIsKeyframe) {
packet_.video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kIdr;
packet_.video_header.codecHeader.H264.nalus_length = 1;
packet_.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
packet_.video_header.h264().nalus_length = 1;
packet_buffer_->InsertPacket(&packet_);
@ -887,10 +883,10 @@ TEST_F(TestPacketBufferH264IdrIsKeyframe, IdrIsKeyframe) {
}
TEST_F(TestPacketBufferH264IdrIsKeyframe, SpsPpsIdrIsKeyframe) {
packet_.video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kSps;
packet_.video_header.codecHeader.H264.nalus[1].type = H264::NaluType::kPps;
packet_.video_header.codecHeader.H264.nalus[2].type = H264::NaluType::kIdr;
packet_.video_header.codecHeader.H264.nalus_length = 3;
packet_.video_header.h264().nalus[0].type = H264::NaluType::kSps;
packet_.video_header.h264().nalus[1].type = H264::NaluType::kPps;
packet_.video_header.h264().nalus[2].type = H264::NaluType::kIdr;
packet_.video_header.h264().nalus_length = 3;
packet_buffer_->InsertPacket(&packet_);
@ -906,8 +902,8 @@ class TestPacketBufferH264SpsPpsIdrIsKeyframe
};
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, IdrIsNotKeyframe) {
packet_.video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kIdr;
packet_.video_header.codecHeader.H264.nalus_length = 1;
packet_.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
packet_.video_header.h264().nalus_length = 1;
packet_buffer_->InsertPacket(&packet_);
@ -916,9 +912,9 @@ TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, IdrIsNotKeyframe) {
}
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
packet_.video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kSps;
packet_.video_header.codecHeader.H264.nalus[1].type = H264::NaluType::kPps;
packet_.video_header.codecHeader.H264.nalus_length = 2;
packet_.video_header.h264().nalus[0].type = H264::NaluType::kSps;
packet_.video_header.h264().nalus[1].type = H264::NaluType::kPps;
packet_.video_header.h264().nalus_length = 2;
packet_buffer_->InsertPacket(&packet_);
@ -927,10 +923,10 @@ TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
}
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIdrIsKeyframe) {
packet_.video_header.codecHeader.H264.nalus[0].type = H264::NaluType::kSps;
packet_.video_header.codecHeader.H264.nalus[1].type = H264::NaluType::kPps;
packet_.video_header.codecHeader.H264.nalus[2].type = H264::NaluType::kIdr;
packet_.video_header.codecHeader.H264.nalus_length = 3;
packet_.video_header.h264().nalus[0].type = H264::NaluType::kSps;
packet_.video_header.h264().nalus[1].type = H264::NaluType::kPps;
packet_.video_header.h264().nalus[2].type = H264::NaluType::kIdr;
packet_.video_header.h264().nalus_length = 3;
packet_buffer_->InsertPacket(&packet_);

View File

@ -183,8 +183,8 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
header.header.ssrc = 1;
header.header.headerLength = 12;
header.video_header().codec = kVideoCodecVP8;
header.video_header().codecHeader.VP8.pictureId = -1;
header.video_header().codecHeader.VP8.tl0PicIdx = -1;
header.video_header().vp8().pictureId = -1;
header.video_header().vp8().tl0PicIdx = -1;
for (int i = 0; i < 3; ++i) {
// Insert 2 video frames.
for (int j = 0; j < 2; ++j) {

View File

@ -136,23 +136,23 @@ bool LayerFilteringTransport::SendRtp(const uint8_t* packet,
RtpDepacketizer::ParsedPayload parsed_payload;
if (depacketizer->Parse(&parsed_payload, payload, payload_data_length)) {
const int temporal_idx = static_cast<int>(
is_vp8 ? parsed_payload.video_header().codecHeader.VP8.temporalIdx
: parsed_payload.video_header().codecHeader.VP9.temporal_idx);
is_vp8 ? parsed_payload.video_header().vp8().temporalIdx
: parsed_payload.video_header().vp9().temporal_idx);
const int spatial_idx = static_cast<int>(
is_vp8 ? kNoSpatialIdx
: parsed_payload.video_header().codecHeader.VP9.spatial_idx);
: parsed_payload.video_header().vp9().spatial_idx);
const bool non_ref_for_inter_layer_pred =
is_vp8 ? false
: parsed_payload.video_header()
.codecHeader.VP9.non_ref_for_inter_layer_pred;
.vp9()
.non_ref_for_inter_layer_pred;
// The number of spatial layers is sent in ssData, which is included only
// in the first packet of the first spatial layer of a key frame.
if (!parsed_payload.video_header().codecHeader.VP9.inter_pic_predicted &&
parsed_payload.video_header().codecHeader.VP9.beginning_of_frame ==
1 &&
if (!parsed_payload.video_header().vp9().inter_pic_predicted &&
parsed_payload.video_header().vp9().beginning_of_frame == 1 &&
spatial_idx == 0) {
num_active_spatial_layers_ =
parsed_payload.video_header().codecHeader.VP9.num_spatial_layers;
parsed_payload.video_header().vp9().num_spatial_layers;
} else if (spatial_idx == kNoSpatialIdx)
num_active_spatial_layers_ = 1;
RTC_CHECK_GT(num_active_spatial_layers_, 0);
@ -160,7 +160,7 @@ bool LayerFilteringTransport::SendRtp(const uint8_t* packet,
if (selected_sl_ >= 0 &&
spatial_idx ==
std::min(num_active_spatial_layers_ - 1, selected_sl_) &&
parsed_payload.video_header().codecHeader.VP9.end_of_frame) {
parsed_payload.video_header().vp9().end_of_frame) {
// This layer is now the last in the superframe.
set_marker_bit = true;
} else {

View File

@ -26,57 +26,54 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
rtp->codec = info->codecType;
switch (info->codecType) {
case kVideoCodecVP8: {
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
rtp->vp8().InitRTPVideoHeaderVP8();
rtp->vp8().nonReference = info->codecSpecific.VP8.nonReference;
rtp->vp8().temporalIdx = info->codecSpecific.VP8.temporalIdx;
rtp->vp8().layerSync = info->codecSpecific.VP8.layerSync;
rtp->vp8().keyIdx = info->codecSpecific.VP8.keyIdx;
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
return;
}
case kVideoCodecVP9: {
rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
rtp->codecHeader.VP9.inter_pic_predicted =
rtp->vp9().InitRTPVideoHeaderVP9();
rtp->vp9().inter_pic_predicted =
info->codecSpecific.VP9.inter_pic_predicted;
rtp->codecHeader.VP9.flexible_mode =
info->codecSpecific.VP9.flexible_mode;
rtp->codecHeader.VP9.ss_data_available =
info->codecSpecific.VP9.ss_data_available;
rtp->codecHeader.VP9.non_ref_for_inter_layer_pred =
rtp->vp9().flexible_mode = info->codecSpecific.VP9.flexible_mode;
rtp->vp9().ss_data_available = info->codecSpecific.VP9.ss_data_available;
rtp->vp9().non_ref_for_inter_layer_pred =
info->codecSpecific.VP9.non_ref_for_inter_layer_pred;
rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
rtp->codecHeader.VP9.temporal_up_switch =
rtp->vp9().temporal_idx = info->codecSpecific.VP9.temporal_idx;
rtp->vp9().spatial_idx = info->codecSpecific.VP9.spatial_idx;
rtp->vp9().temporal_up_switch =
info->codecSpecific.VP9.temporal_up_switch;
rtp->codecHeader.VP9.inter_layer_predicted =
rtp->vp9().inter_layer_predicted =
info->codecSpecific.VP9.inter_layer_predicted;
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
rtp->codecHeader.VP9.num_spatial_layers =
rtp->vp9().gof_idx = info->codecSpecific.VP9.gof_idx;
rtp->vp9().num_spatial_layers =
info->codecSpecific.VP9.num_spatial_layers;
if (info->codecSpecific.VP9.ss_data_available) {
rtp->codecHeader.VP9.spatial_layer_resolution_present =
rtp->vp9().spatial_layer_resolution_present =
info->codecSpecific.VP9.spatial_layer_resolution_present;
if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
++i) {
rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
rtp->vp9().width[i] = info->codecSpecific.VP9.width[i];
rtp->vp9().height[i] = info->codecSpecific.VP9.height[i];
}
}
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
rtp->vp9().gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
}
rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
rtp->vp9().num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i) {
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
rtp->vp9().pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
}
rtp->codecHeader.VP9.end_of_picture =
info->codecSpecific.VP9.end_of_picture;
rtp->vp9().end_of_picture = info->codecSpecific.VP9.end_of_picture;
return;
}
case kVideoCodecH264:
rtp->codecHeader.H264.packetization_mode =
rtp->h264().packetization_mode =
info->codecSpecific.H264.packetization_mode;
rtp->simulcastIdx = info->codecSpecific.H264.simulcast_idx;
return;
@ -130,30 +127,29 @@ class PayloadRouter::RtpPayloadParams final {
(static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
}
if (rtp_video_header->codec == kVideoCodecVP8) {
rtp_video_header->codecHeader.VP8.pictureId = state_.picture_id;
rtp_video_header->vp8().pictureId = state_.picture_id;
if (rtp_video_header->codecHeader.VP8.temporalIdx != kNoTemporalIdx) {
if (rtp_video_header->codecHeader.VP8.temporalIdx == 0) {
if (rtp_video_header->vp8().temporalIdx != kNoTemporalIdx) {
if (rtp_video_header->vp8().temporalIdx == 0) {
++state_.tl0_pic_idx;
}
rtp_video_header->codecHeader.VP8.tl0PicIdx = state_.tl0_pic_idx;
rtp_video_header->vp8().tl0PicIdx = state_.tl0_pic_idx;
}
}
if (rtp_video_header->codec == kVideoCodecVP9) {
rtp_video_header->codecHeader.VP9.picture_id = state_.picture_id;
rtp_video_header->vp9().picture_id = state_.picture_id;
// Note that in the case that we have no temporal layers but we do have
// spatial layers, packets will carry layering info with a temporal_idx of
// zero, and we then have to set and increment tl0_pic_idx.
if (rtp_video_header->codecHeader.VP9.temporal_idx != kNoTemporalIdx ||
rtp_video_header->codecHeader.VP9.spatial_idx != kNoSpatialIdx) {
if (rtp_video_header->vp9().temporal_idx != kNoTemporalIdx ||
rtp_video_header->vp9().spatial_idx != kNoSpatialIdx) {
if (first_frame_in_picture &&
(rtp_video_header->codecHeader.VP9.temporal_idx == 0 ||
rtp_video_header->codecHeader.VP9.temporal_idx ==
kNoTemporalIdx)) {
(rtp_video_header->vp9().temporal_idx == 0 ||
rtp_video_header->vp9().temporal_idx == kNoTemporalIdx)) {
++state_.tl0_pic_idx;
}
rtp_video_header->codecHeader.VP9.tl0_pic_idx = state_.tl0_pic_idx;
rtp_video_header->vp9().tl0_pic_idx = state_.tl0_pic_idx;
}
}
}

View File

@ -334,12 +334,12 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp8) {
EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
EXPECT_EQ(1, header->simulcastIdx);
EXPECT_EQ(kVideoCodecVP8, header->codec);
EXPECT_EQ(kPictureId + 1, header->codecHeader.VP8.pictureId);
EXPECT_EQ(kTemporalIdx, header->codecHeader.VP8.temporalIdx);
EXPECT_EQ(kTl0PicIdx, header->codecHeader.VP8.tl0PicIdx);
EXPECT_EQ(kNoKeyIdx, header->codecHeader.VP8.keyIdx);
EXPECT_TRUE(header->codecHeader.VP8.layerSync);
EXPECT_TRUE(header->codecHeader.VP8.nonReference);
EXPECT_EQ(kPictureId + 1, header->vp8().pictureId);
EXPECT_EQ(kTemporalIdx, header->vp8().temporalIdx);
EXPECT_EQ(kTl0PicIdx, header->vp8().tl0PicIdx);
EXPECT_EQ(kNoKeyIdx, header->vp8().keyIdx);
EXPECT_TRUE(header->vp8().layerSync);
EXPECT_TRUE(header->vp8().nonReference);
return true;
}));
@ -379,15 +379,15 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
EXPECT_EQ(kVideoRotation_90, header->rotation);
EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
EXPECT_EQ(kVideoCodecVP9, header->codec);
EXPECT_EQ(kPictureId + 1, header->codecHeader.VP9.picture_id);
EXPECT_EQ(kTl0PicIdx, header->codecHeader.VP9.tl0_pic_idx);
EXPECT_EQ(header->codecHeader.VP9.temporal_idx,
EXPECT_EQ(kPictureId + 1, header->vp9().picture_id);
EXPECT_EQ(kTl0PicIdx, header->vp9().tl0_pic_idx);
EXPECT_EQ(header->vp9().temporal_idx,
codec_info.codecSpecific.VP9.temporal_idx);
EXPECT_EQ(header->codecHeader.VP9.spatial_idx,
EXPECT_EQ(header->vp9().spatial_idx,
codec_info.codecSpecific.VP9.spatial_idx);
EXPECT_EQ(header->codecHeader.VP9.num_spatial_layers,
EXPECT_EQ(header->vp9().num_spatial_layers,
codec_info.codecSpecific.VP9.num_spatial_layers);
EXPECT_EQ(header->codecHeader.VP9.end_of_picture,
EXPECT_EQ(header->vp9().end_of_picture,
codec_info.codecSpecific.VP9.end_of_picture);
return true;
}));
@ -408,15 +408,15 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
EXPECT_EQ(kVideoRotation_90, header->rotation);
EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
EXPECT_EQ(kVideoCodecVP9, header->codec);
EXPECT_EQ(kPictureId + 1, header->codecHeader.VP9.picture_id);
EXPECT_EQ(kTl0PicIdx, header->codecHeader.VP9.tl0_pic_idx);
EXPECT_EQ(header->codecHeader.VP9.temporal_idx,
EXPECT_EQ(kPictureId + 1, header->vp9().picture_id);
EXPECT_EQ(kTl0PicIdx, header->vp9().tl0_pic_idx);
EXPECT_EQ(header->vp9().temporal_idx,
codec_info.codecSpecific.VP9.temporal_idx);
EXPECT_EQ(header->codecHeader.VP9.spatial_idx,
EXPECT_EQ(header->vp9().spatial_idx,
codec_info.codecSpecific.VP9.spatial_idx);
EXPECT_EQ(header->codecHeader.VP9.num_spatial_layers,
EXPECT_EQ(header->vp9().num_spatial_layers,
codec_info.codecSpecific.VP9.num_spatial_layers);
EXPECT_EQ(header->codecHeader.VP9.end_of_picture,
EXPECT_EQ(header->vp9().end_of_picture,
codec_info.codecSpecific.VP9.end_of_picture);
return true;
}));
@ -446,7 +446,7 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_H264) {
EXPECT_EQ(0, header->simulcastIdx);
EXPECT_EQ(kVideoCodecH264, header->codec);
EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
header->codecHeader.H264.packetization_mode);
header->h264().packetization_mode);
return true;
}));
@ -522,7 +522,7 @@ TEST(PayloadRouterTest, PictureIdIsSetForVp8) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP8, header->codec);
EXPECT_EQ(kInitialPictureId1 + 1, header->codecHeader.VP8.pictureId);
EXPECT_EQ(kInitialPictureId1 + 1, header->vp8().pictureId);
return true;
}));
EXPECT_CALL(rtp1, Sending()).WillOnce(Return(true));
@ -537,7 +537,7 @@ TEST(PayloadRouterTest, PictureIdIsSetForVp8) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP8, header->codec);
EXPECT_EQ(kInitialPictureId2 + 1, header->codecHeader.VP8.pictureId);
EXPECT_EQ(kInitialPictureId2 + 1, header->vp8().pictureId);
return true;
}));
EXPECT_CALL(rtp2, Sending()).WillOnce(Return(true));
@ -574,7 +574,7 @@ TEST(PayloadRouterTest, PictureIdWraps) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP8, header->codec);
EXPECT_EQ(0, header->codecHeader.VP8.pictureId);
EXPECT_EQ(0, header->vp8().pictureId);
return true;
}));
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
@ -612,8 +612,8 @@ TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp8) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP8, header->codec);
EXPECT_EQ(kInitialPictureId1 + 1, header->codecHeader.VP8.pictureId);
EXPECT_EQ(kInitialTl0PicIdx1, header->codecHeader.VP8.tl0PicIdx);
EXPECT_EQ(kInitialPictureId1 + 1, header->vp8().pictureId);
EXPECT_EQ(kInitialTl0PicIdx1, header->vp8().tl0PicIdx);
return true;
}));
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
@ -628,8 +628,8 @@ TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp8) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP8, header->codec);
EXPECT_EQ(kInitialPictureId1 + 2, header->codecHeader.VP8.pictureId);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->codecHeader.VP8.tl0PicIdx);
EXPECT_EQ(kInitialPictureId1 + 2, header->vp8().pictureId);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp8().tl0PicIdx);
return true;
}));
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
@ -668,8 +668,8 @@ TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp9) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP9, header->codec);
EXPECT_EQ(kInitialPictureId1 + 1, header->codecHeader.VP9.picture_id);
EXPECT_EQ(kInitialTl0PicIdx1, header->codecHeader.VP9.tl0_pic_idx);
EXPECT_EQ(kInitialPictureId1 + 1, header->vp9().picture_id);
EXPECT_EQ(kInitialTl0PicIdx1, header->vp9().tl0_pic_idx);
return true;
}));
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
@ -684,8 +684,8 @@ TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp9) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP9, header->codec);
EXPECT_EQ(kInitialPictureId1 + 2, header->codecHeader.VP9.picture_id);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->codecHeader.VP9.tl0_pic_idx);
EXPECT_EQ(kInitialPictureId1 + 2, header->vp9().picture_id);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp9().tl0_pic_idx);
return true;
}));
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
@ -700,8 +700,8 @@ TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp9) {
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
Unused, const RTPVideoHeader* header, Unused) {
EXPECT_EQ(kVideoCodecVP9, header->codec);
EXPECT_EQ(kInitialPictureId1 + 2, header->codecHeader.VP9.picture_id);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->codecHeader.VP9.tl0_pic_idx);
EXPECT_EQ(kInitialPictureId1 + 2, header->vp9().picture_id);
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp9().tl0_pic_idx);
return true;
}));
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));

View File

@ -97,20 +97,14 @@ class PictureIdObserver : public test::RtpRtcpObserver {
switch (codec_type_) {
case kVideoCodecVP8:
parsed->picture_id =
parsed_payload.video_header().codecHeader.VP8.pictureId;
parsed->tl0_pic_idx =
parsed_payload.video_header().codecHeader.VP8.tl0PicIdx;
parsed->temporal_idx =
parsed_payload.video_header().codecHeader.VP8.temporalIdx;
parsed->picture_id = parsed_payload.video_header().vp8().pictureId;
parsed->tl0_pic_idx = parsed_payload.video_header().vp8().tl0PicIdx;
parsed->temporal_idx = parsed_payload.video_header().vp8().temporalIdx;
break;
case kVideoCodecVP9:
parsed->picture_id =
parsed_payload.video_header().codecHeader.VP9.picture_id;
parsed->tl0_pic_idx =
parsed_payload.video_header().codecHeader.VP9.tl0_pic_idx;
parsed->temporal_idx =
parsed_payload.video_header().codecHeader.VP9.temporal_idx;
parsed->picture_id = parsed_payload.video_header().vp9().picture_id;
parsed->tl0_pic_idx = parsed_payload.video_header().vp9().tl0_pic_idx;
parsed->temporal_idx = parsed_payload.video_header().vp9().temporal_idx;
break;
default:
RTC_NOTREACHED();

View File

@ -152,8 +152,8 @@ class RtpVideoStreamReceiverTest : public testing::Test {
data->push_back(H264::NaluType::kSps);
data->push_back(sps_id);
packet->video_header()
.codecHeader.H264
.nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
.h264()
.nalus[packet->video_header().h264().nalus_length++] = info;
}
void AddPps(WebRtcRTPHeader* packet,
@ -167,8 +167,8 @@ class RtpVideoStreamReceiverTest : public testing::Test {
data->push_back(H264::NaluType::kPps);
data->push_back(pps_id);
packet->video_header()
.codecHeader.H264
.nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
.h264()
.nalus[packet->video_header().h264().nalus_length++] = info;
}
void AddIdr(WebRtcRTPHeader* packet, int pps_id) {
@ -177,8 +177,8 @@ class RtpVideoStreamReceiverTest : public testing::Test {
info.sps_id = -1;
info.pps_id = pps_id;
packet->video_header()
.codecHeader.H264
.nalus[packet->video_header().codecHeader.H264.nalus_length++] = info;
.h264()
.nalus[packet->video_header().h264().nalus_length++] = info;
}
protected:

View File

@ -561,11 +561,11 @@ class VideoAnalyzer : public PacketReceiver,
depacketizer->Parse(&parsed_payload, payload, payload_data_length);
RTC_DCHECK(result);
const int temporal_idx = static_cast<int>(
is_vp8 ? parsed_payload.video_header().codecHeader.VP8.temporalIdx
: parsed_payload.video_header().codecHeader.VP9.temporal_idx);
is_vp8 ? parsed_payload.video_header().vp8().temporalIdx
: parsed_payload.video_header().vp9().temporal_idx);
const int spatial_idx = static_cast<int>(
is_vp8 ? kNoSpatialIdx
: parsed_payload.video_header().codecHeader.VP9.spatial_idx);
: parsed_payload.video_header().vp9().spatial_idx);
return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
temporal_idx <= selected_tl_) &&
(selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||

View File

@ -3172,17 +3172,17 @@ class Vp9HeaderObserver : public test::SendTest {
EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
EXPECT_EQ(VideoCodecType::kVideoCodecVP9, parsed.video_header().codec);
// Verify common fields for all configurations.
VerifyCommonHeader(parsed.video_header().codecHeader.VP9);
VerifyCommonHeader(parsed.video_header().vp9());
CompareConsecutiveFrames(header, parsed.video_header());
// Verify configuration specific settings.
InspectHeader(parsed.video_header().codecHeader.VP9);
InspectHeader(parsed.video_header().vp9());
++packets_sent_;
if (header.markerBit) {
++frames_sent_;
}
last_header_ = header;
last_vp9_ = parsed.video_header().codecHeader.VP9;
last_vp9_ = parsed.video_header().vp9();
}
return SEND_PACKET;
}
@ -3367,7 +3367,7 @@ class Vp9HeaderObserver : public test::SendTest {
void CompareConsecutiveFrames(const RTPHeader& header,
const RTPVideoHeader& video) const {
const RTPVideoHeaderVP9& vp9 = video.codecHeader.VP9;
const RTPVideoHeaderVP9& vp9 = video.vp9();
bool new_frame = packets_sent_ == 0 ||
IsNewerTimestamp(header.timestamp, last_header_.timestamp);