diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn index d1e3311f0d..d561bf60f9 100644 --- a/modules/video_coding/BUILD.gn +++ b/modules/video_coding/BUILD.gn @@ -89,6 +89,7 @@ rtc_static_library("video_coding") { } deps = [ + ":codec_globals_headers", ":video_coding_utility", ":webrtc_h264", ":webrtc_i420", diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc index 648af011f0..72dd70e5e6 100644 --- a/modules/video_coding/frame_object.cc +++ b/modules/video_coding/frame_object.cc @@ -10,13 +10,9 @@ #include "modules/video_coding/frame_object.h" -#include - #include "common_video/h264/h264_common.h" #include "modules/video_coding/packet_buffer.h" #include "rtc_base/checks.h" -#include "rtc_base/logging.h" -#include "system_wrappers/include/field_trial.h" namespace webrtc { namespace video_coding { @@ -54,6 +50,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer, _payloadType = first_packet->payloadType; _timeStamp = first_packet->timestamp; ntp_time_ms_ = first_packet->ntp_time_ms_; + _frameType = first_packet->frameType; // Setting frame's playout delays to the same values // as of the first packet's. @@ -73,58 +70,6 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer, _buffer = new uint8_t[_size]; _length = frame_size; - // For H264 frames we can't determine the frame type by just looking at the - // first packet. Instead we consider the frame to be a keyframe if it contains - // an IDR, and SPS/PPS if the field trial is set. - if (codec_type_ == kVideoCodecH264) { - _frameType = kVideoFrameDelta; - frame_type_ = kVideoFrameDelta; - bool contains_sps = false; - bool contains_pps = false; - bool contains_idr = false; - for (uint16_t seq_num = first_seq_num; - seq_num != static_cast(last_seq_num + 1) && - _frameType == kVideoFrameDelta; - ++seq_num) { - VCMPacket* packet = packet_buffer_->GetPacket(seq_num); - RTC_CHECK(packet); - const RTPVideoHeaderH264& header = packet->video_header.codecHeader.H264; - for (size_t i = 0; i < header.nalus_length; ++i) { - if (header.nalus[i].type == H264::NaluType::kSps) { - contains_sps = true; - } else if (header.nalus[i].type == H264::NaluType::kPps) { - contains_pps = true; - } else if (header.nalus[i].type == H264::NaluType::kIdr) { - contains_idr = true; - } - } - } - const bool sps_pps_idr_is_keyframe = - field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe"); - if ((sps_pps_idr_is_keyframe && contains_idr && contains_sps && - contains_pps) || - (!sps_pps_idr_is_keyframe && contains_idr)) { - _frameType = kVideoFrameKey; - frame_type_ = kVideoFrameKey; - } - if (contains_idr && (!contains_sps || !contains_pps)) { - std::stringstream ss; - ss << "Received H.264-IDR frame " - << "(SPS: " << contains_sps << ", PPS: " << contains_pps << "). "; - if (sps_pps_idr_is_keyframe) { - ss << "Treating as delta frame since WebRTC-SpsPpsIdrIsH264Keyframe is " - "enabled."; - } else { - ss << "Treating as key frame since WebRTC-SpsPpsIdrIsH264Keyframe is " - "disabled."; - } - LOG(LS_WARNING) << ss.str(); - } - } else { - _frameType = first_packet->frameType; - frame_type_ = first_packet->frameType; - } - bool bitstream_copied = GetBitstream(_buffer); RTC_DCHECK(bitstream_copied); _encodedWidth = first_packet->width; diff --git a/modules/video_coding/h264_sps_pps_tracker.cc b/modules/video_coding/h264_sps_pps_tracker.cc index 2346120116..443946d853 100644 --- a/modules/video_coding/h264_sps_pps_tracker.cc +++ b/modules/video_coding/h264_sps_pps_tracker.cc @@ -16,6 +16,7 @@ #include "common_video/h264/h264_common.h" #include "common_video/h264/pps_parser.h" #include "common_video/h264/sps_parser.h" +#include "modules/video_coding/codecs/h264/include/h264_globals.h" #include "modules/video_coding/frame_object.h" #include "modules/video_coding/packet_buffer.h" #include "rtc_base/checks.h" @@ -35,14 +36,14 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream( const uint8_t* data = packet->dataPtr; const size_t data_size = packet->sizeBytes; const RTPVideoHeader& video_header = packet->video_header; - const RTPVideoHeaderH264& codec_header = video_header.codecHeader.H264; + RTPVideoHeaderH264* codec_header = &packet->video_header.codecHeader.H264; bool append_sps_pps = false; auto sps = sps_data_.end(); auto pps = pps_data_.end(); - for (size_t i = 0; i < codec_header.nalus_length; ++i) { - const NaluInfo& nalu = codec_header.nalus[i]; + for (size_t i = 0; i < codec_header->nalus_length; ++i) { + const NaluInfo& nalu = codec_header->nalus[i]; switch (nalu.type) { case H264::NaluType::kSps: { sps_data_[nalu.sps_id].width = packet->width; @@ -109,7 +110,7 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream( required_size += pps->second.size + sizeof(start_code_h264); } - if (codec_header.packetization_type == kH264StapA) { + if (codec_header->packetization_type == kH264StapA) { const uint8_t* nalu_ptr = data + 1; while (nalu_ptr < data + data_size) { RTC_DCHECK(video_header.is_first_packet_in_frame); @@ -144,10 +145,27 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream( insert_at += sizeof(start_code_h264); memcpy(insert_at, pps->second.data.get(), pps->second.size); insert_at += pps->second.size; + + // Update codec header to reflect the newly added SPS and PPS. + NaluInfo sps_info; + sps_info.type = H264::NaluType::kSps; + sps_info.sps_id = sps->first; + sps_info.pps_id = -1; + NaluInfo pps_info; + pps_info.type = H264::NaluType::kPps; + pps_info.sps_id = sps->first; + pps_info.pps_id = pps->first; + if (codec_header->nalus_length + 2 <= kMaxNalusPerPacket) { + codec_header->nalus[codec_header->nalus_length++] = sps_info; + codec_header->nalus[codec_header->nalus_length++] = pps_info; + } else { + LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert " + "SPS/PPS provided out-of-band."; + } } // Copy the rest of the bitstream and insert start codes. - if (codec_header.packetization_type == kH264StapA) { + if (codec_header->packetization_type == kH264StapA) { const uint8_t* nalu_ptr = data + 1; while (nalu_ptr < data + data_size) { memcpy(insert_at, start_code_h264, sizeof(start_code_h264)); diff --git a/modules/video_coding/h264_sps_pps_tracker_unittest.cc b/modules/video_coding/h264_sps_pps_tracker_unittest.cc index 918686911b..f88992f28b 100644 --- a/modules/video_coding/h264_sps_pps_tracker_unittest.cc +++ b/modules/video_coding/h264_sps_pps_tracker_unittest.cc @@ -21,6 +21,31 @@ namespace video_coding { namespace { const uint8_t start_code[] = {0, 0, 0, 1}; + +void ExpectSpsPpsIdr(const RTPVideoHeaderH264& codec_header, + uint8_t sps_id, + uint8_t pps_id) { + bool contains_sps = false; + bool contains_pps = false; + bool contains_idr = false; + for (const auto& nalu : codec_header.nalus) { + if (nalu.type == H264::NaluType::kSps) { + EXPECT_EQ(sps_id, nalu.sps_id); + contains_sps = true; + } else if (nalu.type == H264::NaluType::kPps) { + EXPECT_EQ(sps_id, nalu.sps_id); + EXPECT_EQ(pps_id, nalu.pps_id); + contains_pps = true; + } else if (nalu.type == H264::NaluType::kIdr) { + EXPECT_EQ(pps_id, nalu.pps_id); + contains_idr = true; + } + } + EXPECT_TRUE(contains_sps); + EXPECT_TRUE(contains_pps); + EXPECT_TRUE(contains_idr); +} + } // namespace class TestH264SpsPpsTracker : public ::testing::Test { @@ -264,10 +289,14 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBand) { AddIdr(&idr_packet, 0); idr_packet.dataPtr = kData; idr_packet.sizeBytes = sizeof(kData); + EXPECT_EQ(1u, idr_packet.video_header.codecHeader.H264.nalus_length); EXPECT_EQ(H264SpsPpsTracker::kInsert, tracker_.CopyAndFixBitstream(&idr_packet)); + EXPECT_EQ(3u, idr_packet.video_header.codecHeader.H264.nalus_length); EXPECT_EQ(320, idr_packet.width); EXPECT_EQ(240, idr_packet.height); + ExpectSpsPpsIdr(idr_packet.video_header.codecHeader.H264, 0, 0); + if (idr_packet.dataPtr != kData) { // In case CopyAndFixBitStream() prepends SPS/PPS nalus to the packet, it // uses new uint8_t[] to allocate memory. Caller of CopyAndFixBitStream() diff --git a/modules/video_coding/packet_buffer.cc b/modules/video_coding/packet_buffer.cc index 7b9b659543..0e4ef6ad6e 100644 --- a/modules/video_coding/packet_buffer.cc +++ b/modules/video_coding/packet_buffer.cc @@ -12,6 +12,7 @@ #include #include +#include #include #include "common_video/h264/h264_common.h" @@ -20,6 +21,7 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "system_wrappers/include/clock.h" +#include "system_wrappers/include/field_trial.h" namespace webrtc { namespace video_coding { @@ -45,7 +47,9 @@ PacketBuffer::PacketBuffer(Clock* clock, is_cleared_to_first_seq_num_(false), data_buffer_(start_buffer_size), sequence_buffer_(start_buffer_size), - received_frame_callback_(received_frame_callback) { + received_frame_callback_(received_frame_callback), + sps_pps_idr_is_h264_keyframe_( + field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) { RTC_DCHECK_LE(start_buffer_size, max_buffer_size); // Buffer size must always be a power of 2. RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0); @@ -269,11 +273,15 @@ std::vector> PacketBuffer::FindFrames( // the |frame_begin| flag is set. int start_index = index; size_t tested_packets = 0; - - bool is_h264 = data_buffer_[start_index].codec == kVideoCodecH264; - bool is_h264_keyframe = false; int64_t frame_timestamp = data_buffer_[start_index].timestamp; + // Identify H.264 keyframes by means of SPS, PPS, and IDR. + bool is_h264 = data_buffer_[start_index].codec == kVideoCodecH264; + bool has_h264_sps = false; + bool has_h264_pps = false; + bool has_h264_idr = false; + bool is_h264_keyframe = false; + while (true) { ++tested_packets; frame_size += data_buffer_[start_index].sizeBytes; @@ -287,12 +295,20 @@ std::vector> PacketBuffer::FindFrames( if (is_h264 && !is_h264_keyframe) { const RTPVideoHeaderH264& header = data_buffer_[start_index].video_header.codecHeader.H264; - for (size_t i = 0; i < header.nalus_length; ++i) { - if (header.nalus[i].type == H264::NaluType::kIdr) { - is_h264_keyframe = true; - break; + for (size_t j = 0; j < header.nalus_length; ++j) { + if (header.nalus[j].type == H264::NaluType::kSps) { + has_h264_sps = true; + } else if (header.nalus[j].type == H264::NaluType::kPps) { + has_h264_pps = true; + } else if (header.nalus[j].type == H264::NaluType::kIdr) { + has_h264_idr = true; } } + if ((sps_pps_idr_is_h264_keyframe_ && has_h264_idr && has_h264_sps && + has_h264_pps) || + (!sps_pps_idr_is_h264_keyframe_ && has_h264_idr)) { + is_h264_keyframe = true; + } } if (tested_packets == size_) @@ -315,18 +331,45 @@ std::vector> PacketBuffer::FindFrames( --start_seq_num; } - // If this is H264 but not a keyframe, make sure there are no gaps in the - // packet sequence numbers up until this point. - if (is_h264 && !is_h264_keyframe && - missing_packets_.upper_bound(start_seq_num) != - missing_packets_.begin()) { - uint16_t stop_index = (index + 1) % size_; - while (start_index != stop_index) { - sequence_buffer_[start_index].frame_created = false; - start_index = (start_index + 1) % size_; + if (is_h264) { + // Warn if this is an unsafe frame. + if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) { + std::stringstream ss; + ss << "Received H.264-IDR frame " + << "(SPS: " << has_h264_sps << ", PPS: " << has_h264_pps << "). "; + if (sps_pps_idr_is_h264_keyframe_) { + ss << "Treating as delta frame since " + "WebRTC-SpsPpsIdrIsH264Keyframe is enabled."; + } else { + ss << "Treating as key frame since " + "WebRTC-SpsPpsIdrIsH264Keyframe is disabled."; + } + LOG(LS_WARNING) << ss.str(); } - return found_frames; + // Now that we have decided whether to treat this frame as a key frame + // or delta frame in the frame buffer, we update the field that + // determines if the RtpFrameObject is a key frame or delta frame. + const size_t first_packet_index = start_seq_num % size_; + RTC_CHECK_LT(first_packet_index, size_); + if (is_h264_keyframe) { + data_buffer_[first_packet_index].frameType = kVideoFrameKey; + } else { + data_buffer_[first_packet_index].frameType = kVideoFrameDelta; + } + + // If this is not a keyframe, make sure there are no gaps in the + // packet sequence numbers up until this point. + if (!is_h264_keyframe && missing_packets_.upper_bound(start_seq_num) != + missing_packets_.begin()) { + uint16_t stop_index = (index + 1) % size_; + while (start_index != stop_index) { + sequence_buffer_[start_index].frame_created = false; + start_index = (start_index + 1) % size_; + } + + return found_frames; + } } missing_packets_.erase(missing_packets_.begin(), diff --git a/modules/video_coding/packet_buffer.h b/modules/video_coding/packet_buffer.h index ca499bfde9..c1ca7b8d39 100644 --- a/modules/video_coding/packet_buffer.h +++ b/modules/video_coding/packet_buffer.h @@ -160,6 +160,10 @@ class PacketBuffer { std::set> missing_packets_ RTC_GUARDED_BY(crit_); + // Indicates if we should require SPS, PPS, and IDR for a particular + // RTP timestamp to treat the corresponding frame as a keyframe. + const bool sps_pps_idr_is_h264_keyframe_; + mutable volatile int ref_count_ = 0; }; diff --git a/modules/video_coding/video_packet_buffer_unittest.cc b/modules/video_coding/video_packet_buffer_unittest.cc index 4c6f0b3f52..f8b3dc65dc 100644 --- a/modules/video_coding/video_packet_buffer_unittest.cc +++ b/modules/video_coding/video_packet_buffer_unittest.cc @@ -27,8 +27,10 @@ namespace video_coding { class TestPacketBuffer : public ::testing::Test, public OnReceivedFrameCallback { protected: - TestPacketBuffer() - : rand_(0x7732213), + TestPacketBuffer() : TestPacketBuffer("") {} + explicit TestPacketBuffer(std::string field_trials) + : scoped_field_trials_(field_trials), + rand_(0x7732213), clock_(new SimulatedClock(0)), packet_buffer_( PacketBuffer::Create(clock_.get(), kStartSize, kMaxSize, this)) {} @@ -81,6 +83,8 @@ class TestPacketBuffer : public ::testing::Test, static constexpr int kStartSize = 16; static constexpr int kMaxSize = 64; + const test::ScopedFieldTrials scoped_field_trials_; + Random rand_; std::unique_ptr clock_; rtc::scoped_refptr packet_buffer_; @@ -423,15 +427,17 @@ TEST_F(TestPacketBuffer, GetBitstreamOneFrameFullBuffer) { EXPECT_EQ(memcmp(result, expected, kStartSize), 0); } -class TestPacketBufferH264 : public TestPacketBuffer, - public ::testing::WithParamInterface { +// If |sps_pps_idr_is_keyframe| is true, we require keyframes to contain +// SPS/PPS/IDR and the keyframes we create as part of the test do contain +// SPS/PPS/IDR. If |sps_pps_idr_is_keyframe| is false, we only require and +// create keyframes containing only IDR. +class TestPacketBufferH264 : public TestPacketBuffer { protected: - TestPacketBufferH264() : TestPacketBufferH264(GetParam()) {} explicit TestPacketBufferH264(bool sps_pps_idr_is_keyframe) - : sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe), - scoped_field_trials_(sps_pps_idr_is_keyframe_ - ? "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/" - : "") {} + : TestPacketBuffer(sps_pps_idr_is_keyframe + ? "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/" + : ""), + sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe) {} bool InsertH264(uint16_t seq_num, // packet sequence number IsKeyFrame keyframe, // is keyframe @@ -468,14 +474,22 @@ class TestPacketBufferH264 : public TestPacketBuffer, } const bool sps_pps_idr_is_keyframe_; - const test::ScopedFieldTrials scoped_field_trials_; +}; + +// This fixture is used to test the general behaviour of the packet buffer +// in both configurations. +class TestPacketBufferH264Parameterized + : public ::testing::WithParamInterface, + public TestPacketBufferH264 { + protected: + TestPacketBufferH264Parameterized() : TestPacketBufferH264(GetParam()) {} }; INSTANTIATE_TEST_CASE_P(SpsPpsIdrIsKeyframe, - TestPacketBufferH264, + TestPacketBufferH264Parameterized, ::testing::Values(false, true)); -TEST_P(TestPacketBufferH264, GetBitstreamOneFrameFullBuffer) { +TEST_P(TestPacketBufferH264Parameterized, GetBitstreamOneFrameFullBuffer) { uint8_t* data_arr[kStartSize]; uint8_t expected[kStartSize]; uint8_t result[kStartSize]; @@ -501,7 +515,7 @@ TEST_P(TestPacketBufferH264, GetBitstreamOneFrameFullBuffer) { EXPECT_EQ(memcmp(result, expected, kStartSize), 0); } -TEST_P(TestPacketBufferH264, GetBitstreamBufferPadding) { +TEST_P(TestPacketBufferH264Parameterized, GetBitstreamBufferPadding) { uint16_t seq_num = Rand(); uint8_t data_data[] = "some plain old data"; uint8_t* data = new uint8_t[sizeof(data_data)]; @@ -661,7 +675,7 @@ TEST_F(TestPacketBuffer, PacketTimestamps) { EXPECT_FALSE(packet_keyframe_ms); } -TEST_P(TestPacketBufferH264, OneFrameFillBuffer) { +TEST_P(TestPacketBufferH264Parameterized, OneFrameFillBuffer) { InsertH264(0, kKeyFrame, kFirst, kNotLast, 1000); for (int i = 1; i < kStartSize - 1; ++i) InsertH264(i, kKeyFrame, kNotFirst, kNotLast, 1000); @@ -671,7 +685,7 @@ TEST_P(TestPacketBufferH264, OneFrameFillBuffer) { CheckFrame(0); } -TEST_P(TestPacketBufferH264, CreateFramesAfterFilledBuffer) { +TEST_P(TestPacketBufferH264Parameterized, CreateFramesAfterFilledBuffer) { InsertH264(kStartSize - 2, kKeyFrame, kFirst, kLast, 0); ASSERT_EQ(1UL, frames_from_callback_.size()); frames_from_callback_.clear(); @@ -688,7 +702,7 @@ TEST_P(TestPacketBufferH264, CreateFramesAfterFilledBuffer) { CheckFrame(kStartSize); } -TEST_P(TestPacketBufferH264, OneFrameMaxSeqNum) { +TEST_P(TestPacketBufferH264Parameterized, OneFrameMaxSeqNum) { InsertH264(65534, kKeyFrame, kFirst, kNotLast, 1000); InsertH264(65535, kKeyFrame, kNotFirst, kLast, 1000); @@ -696,7 +710,7 @@ TEST_P(TestPacketBufferH264, OneFrameMaxSeqNum) { CheckFrame(65534); } -TEST_P(TestPacketBufferH264, ClearMissingPacketsOnKeyframe) { +TEST_P(TestPacketBufferH264Parameterized, ClearMissingPacketsOnKeyframe) { InsertH264(0, kKeyFrame, kFirst, kLast, 1000); InsertH264(2, kKeyFrame, kFirst, kLast, 3000); InsertH264(3, kDeltaFrame, kFirst, kNotLast, 4000); @@ -713,7 +727,7 @@ TEST_P(TestPacketBufferH264, ClearMissingPacketsOnKeyframe) { CheckFrame(kStartSize + 1); } -TEST_P(TestPacketBufferH264, FindFramesOnPadding) { +TEST_P(TestPacketBufferH264Parameterized, FindFramesOnPadding) { InsertH264(0, kKeyFrame, kFirst, kLast, 1000); InsertH264(2, kDeltaFrame, kFirst, kLast, 1000); diff --git a/video/full_stack_tests.cc b/video/full_stack_tests.cc index ea1e37f9f0..c3dfb798e6 100644 --- a/video/full_stack_tests.cc +++ b/video/full_stack_tests.cc @@ -190,7 +190,6 @@ TEST_F(FullStackTest, ForemanCifPlr5H264) { foreman_cif.call.send_side_bwe = true; foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false, "H264", 1, 0, 0, false, false, "foreman_cif"}; - std::string fec_description; foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264", 0.0, 0.0, kFullStackTestDurationSecs}; foreman_cif.pipe.loss_percent = 5; @@ -198,6 +197,21 @@ TEST_F(FullStackTest, ForemanCifPlr5H264) { RunTest(foreman_cif); } +TEST_F(FullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) { + test::ScopedFieldTrials override_field_trials( + "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"); + + VideoQualityTest::Params foreman_cif; + foreman_cif.call.send_side_bwe = true; + foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false, + "H264", 1, 0, 0, false, false, "foreman_cif"}; + foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_sps_pps_idr", 0.0, + 0.0, kFullStackTestDurationSecs}; + foreman_cif.pipe.loss_percent = 5; + foreman_cif.pipe.queue_delay_ms = 50; + RunTest(foreman_cif); +} + // Verify that this is worth the bot time, before enabling. TEST_F(FullStackTest, ForemanCifPlr5H264Flexfec) { VideoQualityTest::Params foreman_cif; diff --git a/video/rtp_video_stream_receiver_unittest.cc b/video/rtp_video_stream_receiver_unittest.cc index a3d9da6c25..297218205f 100644 --- a/video/rtp_video_stream_receiver_unittest.cc +++ b/video/rtp_video_stream_receiver_unittest.cc @@ -35,8 +35,6 @@ namespace webrtc { namespace { -const char kNewJitterBufferFieldTrialEnabled[] = - "WebRTC-NewVideoJitterBuffer/Enabled/"; const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01}; class MockTransport : public Transport { @@ -120,8 +118,10 @@ MATCHER_P(SamePacketAs, other, "") { class RtpVideoStreamReceiverTest : public testing::Test { public: - RtpVideoStreamReceiverTest() - : config_(CreateConfig()), + RtpVideoStreamReceiverTest() : RtpVideoStreamReceiverTest("") {} + explicit RtpVideoStreamReceiverTest(std::string field_trials) + : override_field_trials_(field_trials), + config_(CreateConfig()), timing_(Clock::GetRealTimeClock()), process_thread_(ProcessThread::Create("TestThread")) {} @@ -189,8 +189,7 @@ class RtpVideoStreamReceiverTest : public testing::Test { return config; } - webrtc::test::ScopedFieldTrials override_field_trials_{ - kNewJitterBufferFieldTrialEnabled}; + const webrtc::test::ScopedFieldTrials override_field_trials_; VideoReceiveStream::Config config_; MockNackSender mock_nack_sender_; MockKeyFrameRequestSender mock_key_frame_request_sender_; @@ -237,7 +236,19 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) { &rtp_header); } -TEST_F(RtpVideoStreamReceiverTest, InBandSpsPps) { +class RtpVideoStreamReceiverTestH264 + : public RtpVideoStreamReceiverTest, + public testing::WithParamInterface { + protected: + RtpVideoStreamReceiverTestH264() : RtpVideoStreamReceiverTest(GetParam()) {} +}; + +INSTANTIATE_TEST_CASE_P( + SpsPpsIdrIsKeyframe, + RtpVideoStreamReceiverTestH264, + ::testing::Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/")); + +TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) { std::vector sps_data; WebRtcRTPHeader sps_packet = GetDefaultPacket(); AddSps(&sps_packet, 0, &sps_data); @@ -279,7 +290,7 @@ TEST_F(RtpVideoStreamReceiverTest, InBandSpsPps) { idr_data.data(), idr_data.size(), &idr_packet); } -TEST_F(RtpVideoStreamReceiverTest, OutOfBandFmtpSpsPps) { +TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) { constexpr int kPayloadType = 99; VideoCodec codec; codec.plType = kPayloadType;