Move H.264 SPS VUI rewriting to FrameEncodeMetadataWriter.

Bug: webrtc:10559
Change-Id: I956287e71a47856cfb6dd807d9715d6ee2572f55
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/138263
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Sergey Silkin <ssilkin@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Mirta Dvornicic <mirtad@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28100}
This commit is contained in:
Mirta Dvornicic 2019-05-28 16:30:16 +02:00 committed by Commit Bot
parent a1d1a1e976
commit 28f0eb2dde
12 changed files with 248 additions and 104 deletions

View File

@ -93,6 +93,12 @@ class RTC_EXPORT EncodedImage {
buffer_ = nullptr;
}
void SetEncodedData(const rtc::CopyOnWriteBuffer& encoded_data) {
encoded_data_ = encoded_data;
size_ = encoded_data.size();
buffer_ = nullptr;
}
uint8_t* data() { return buffer_ ? buffer_ : encoded_data_.data(); }
const uint8_t* data() const {
return buffer_ ? buffer_ : encoded_data_.cdata();

View File

@ -202,7 +202,7 @@ void SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps(
size_t num_nalus,
const size_t* nalu_offsets,
const size_t* nalu_lengths,
rtc::Buffer* output_buffer,
rtc::CopyOnWriteBuffer* output_buffer,
size_t* output_nalu_offsets,
size_t* output_nalu_lengths) {
// Allocate some extra space for potentially adding a missing VUI.

View File

@ -18,6 +18,7 @@
#include "absl/types/optional.h"
#include "common_video/h264/sps_parser.h"
#include "rtc_base/buffer.h"
#include "rtc_base/copy_on_write_buffer.h"
namespace webrtc {
@ -60,7 +61,7 @@ class SpsVuiRewriter : private SpsParser {
size_t num_nalus,
const size_t* nalu_offsets,
const size_t* nalu_lengths,
rtc::Buffer* output_buffer,
rtc::CopyOnWriteBuffer* output_buffer,
size_t* output_nalu_offsets,
size_t* output_nalu_lengths);

View File

@ -209,7 +209,7 @@ TEST(SpsVuiRewriterTest, ParseOutgoingBitstreamOptimalVui) {
nalu_lengths[1] = sizeof(kIdr1);
buffer.AppendData(kIdr1);
rtc::Buffer modified_buffer;
rtc::CopyOnWriteBuffer modified_buffer;
size_t modified_nalu_offsets[kNumNalus];
size_t modified_nalu_lengths[kNumNalus];
@ -273,7 +273,7 @@ TEST(SpsVuiRewriterTest, ParseOutgoingBitstreamNoVui) {
expected_nalu_lengths[2] = sizeof(kIdr2);
expected_buffer.AppendData(kIdr2);
rtc::Buffer modified_buffer;
rtc::CopyOnWriteBuffer modified_buffer;
size_t modified_nalu_offsets[kNumNalus];
size_t modified_nalu_lengths[kNumNalus];

View File

@ -76,26 +76,15 @@ RtpPacketizerH264::RtpPacketizerH264(
H264PacketizationMode packetization_mode,
const RTPFragmentationHeader& fragmentation)
: limits_(limits),
modified_buffer_(new rtc::Buffer()),
num_packets_left_(0) {
// Guard against uninitialized memory in packetization_mode.
RTC_CHECK(packetization_mode == H264PacketizationMode::NonInterleaved ||
packetization_mode == H264PacketizationMode::SingleNalUnit);
RTPFragmentationHeader modified_fragmentation;
modified_fragmentation.CopyFrom(fragmentation);
SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps(
payload, fragmentation.fragmentationVectorSize,
fragmentation.fragmentationOffset, fragmentation.fragmentationLength,
modified_buffer_.get(), modified_fragmentation.fragmentationOffset,
modified_fragmentation.fragmentationLength);
for (size_t i = 0; i < modified_fragmentation.fragmentationVectorSize; ++i) {
const uint8_t* fragment = modified_buffer_->data() +
modified_fragmentation.fragmentationOffset[i];
const size_t fragment_length =
modified_fragmentation.fragmentationLength[i];
for (size_t i = 0; i < fragmentation.fragmentationVectorSize; ++i) {
const uint8_t* fragment =
payload.data() + fragmentation.fragmentationOffset[i];
const size_t fragment_length = fragmentation.fragmentationLength[i];
input_fragments_.push_back(Fragment(fragment, fragment_length));
}

View File

@ -90,7 +90,6 @@ class RtpPacketizerH264 : public RtpPacketizer {
void NextFragmentPacket(RtpPacketToSend* rtp_packet);
const PayloadSizeLimits limits_;
std::unique_ptr<rtc::Buffer> modified_buffer_;
size_t num_packets_left_;
std::deque<Fragment> input_fragments_;
std::queue<PacketUnit> packets_;

View File

@ -497,7 +497,6 @@ TEST(RtpPacketizerH264Test, RejectsOverlongDataInPacketizationMode0) {
EXPECT_THAT(packets, IsEmpty());
}
const uint8_t kStartSequence[] = {0x00, 0x00, 0x00, 0x01};
const uint8_t kOriginalSps[] = {kSps, 0x00, 0x00, 0x03, 0x03,
0xF4, 0x05, 0x03, 0xC7, 0xC0};
const uint8_t kRewrittenSps[] = {kSps, 0x00, 0x00, 0x03, 0x03, 0xF4, 0x05, 0x03,
@ -505,79 +504,6 @@ const uint8_t kRewrittenSps[] = {kSps, 0x00, 0x00, 0x03, 0x03, 0xF4, 0x05, 0x03,
const uint8_t kIdrOne[] = {kIdr, 0xFF, 0x00, 0x00, 0x04};
const uint8_t kIdrTwo[] = {kIdr, 0xFF, 0x00, 0x11};
class RtpPacketizerH264TestSpsRewriting : public ::testing::Test {
public:
void SetUp() override {
fragmentation_header_.VerifyAndAllocateFragmentationHeader(3);
fragmentation_header_.fragmentationVectorSize = 3;
in_buffer_.AppendData(kStartSequence);
fragmentation_header_.fragmentationOffset[0] = in_buffer_.size();
fragmentation_header_.fragmentationLength[0] = sizeof(kOriginalSps);
in_buffer_.AppendData(kOriginalSps);
fragmentation_header_.fragmentationOffset[1] = in_buffer_.size();
fragmentation_header_.fragmentationLength[1] = sizeof(kIdrOne);
in_buffer_.AppendData(kIdrOne);
fragmentation_header_.fragmentationOffset[2] = in_buffer_.size();
fragmentation_header_.fragmentationLength[2] = sizeof(kIdrTwo);
in_buffer_.AppendData(kIdrTwo);
}
protected:
rtc::Buffer in_buffer_;
RTPFragmentationHeader fragmentation_header_;
};
TEST_F(RtpPacketizerH264TestSpsRewriting, FuASps) {
const size_t kHeaderOverhead = kFuAHeaderSize + 1;
// Set size to fragment SPS into two FU-A packets.
RtpPacketizer::PayloadSizeLimits limits;
limits.max_payload_len = sizeof(kOriginalSps) - 2 + kHeaderOverhead;
RtpPacketizerH264 packetizer(in_buffer_, limits,
H264PacketizationMode::NonInterleaved,
fragmentation_header_);
std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
size_t offset = H264::kNaluTypeSize;
size_t length = packets[0].payload_size() - kFuAHeaderSize;
EXPECT_THAT(packets[0].payload().subview(kFuAHeaderSize),
ElementsAreArray(&kRewrittenSps[offset], length));
offset += length;
length = packets[1].payload_size() - kFuAHeaderSize;
EXPECT_THAT(packets[1].payload().subview(kFuAHeaderSize),
ElementsAreArray(&kRewrittenSps[offset], length));
offset += length;
EXPECT_EQ(offset, sizeof(kRewrittenSps));
}
TEST_F(RtpPacketizerH264TestSpsRewriting, StapASps) {
const size_t kHeaderOverhead = kFuAHeaderSize + 1;
const size_t kExpectedTotalSize = H264::kNaluTypeSize + // Stap-A type.
sizeof(kRewrittenSps) + sizeof(kIdrOne) +
sizeof(kIdrTwo) + (kLengthFieldLength * 3);
// Set size to include SPS and the rest of the packets in a Stap-A package.
RtpPacketizer::PayloadSizeLimits limits;
limits.max_payload_len = kExpectedTotalSize + kHeaderOverhead;
RtpPacketizerH264 packetizer(in_buffer_, limits,
H264PacketizationMode::NonInterleaved,
fragmentation_header_);
std::vector<RtpPacketToSend> packets = FetchAllPackets(&packetizer);
ASSERT_THAT(packets, SizeIs(1));
EXPECT_EQ(packets[0].payload_size(), kExpectedTotalSize);
EXPECT_THAT(
packets[0].payload().subview(H264::kNaluTypeSize + kLengthFieldLength,
sizeof(kRewrittenSps)),
ElementsAreArray(kRewrittenSps));
}
struct H264ParsedPayload : public RtpDepacketizer::ParsedPayload {
RTPVideoHeaderH264& h264() {
return absl::get<RTPVideoHeaderH264>(video.video_type_header);

View File

@ -12,8 +12,11 @@
#include <algorithm>
#include "absl/memory/memory.h"
#include "common_video/h264/sps_vui_rewriter.h"
#include "modules/include/module_common_types_public.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/logging.h"
#include "rtc_base/time_utils.h"
@ -183,6 +186,35 @@ void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx,
}
}
std::unique_ptr<RTPFragmentationHeader>
FrameEncodeMetadataWriter::UpdateBitstream(
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation,
EncodedImage* encoded_image) {
if (!codec_specific_info ||
codec_specific_info->codecType != kVideoCodecH264 || !fragmentation ||
encoded_image->_frameType != VideoFrameType::kVideoFrameKey) {
return nullptr;
}
rtc::CopyOnWriteBuffer modified_buffer;
std::unique_ptr<RTPFragmentationHeader> modified_fragmentation =
absl::make_unique<RTPFragmentationHeader>();
modified_fragmentation->CopyFrom(*fragmentation);
// Make sure that the data is not copied if owned by EncodedImage.
const EncodedImage& buffer = *encoded_image;
SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps(
buffer, fragmentation->fragmentationVectorSize,
fragmentation->fragmentationOffset, fragmentation->fragmentationLength,
&modified_buffer, modified_fragmentation->fragmentationOffset,
modified_fragmentation->fragmentationLength);
encoded_image->SetEncodedData(modified_buffer);
return modified_fragmentation;
}
void FrameEncodeMetadataWriter::Reset() {
rtc::CritScope cs(&lock_);
timing_frames_info_.clear();

View File

@ -12,12 +12,14 @@
#define VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
#include <list>
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/video/encoded_image.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/critical_section.h"
namespace webrtc {
@ -34,6 +36,12 @@ class FrameEncodeMetadataWriter {
void OnEncodeStarted(const VideoFrame& frame);
void FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image);
std::unique_ptr<RTPFragmentationHeader> UpdateBitstream(
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation,
EncodedImage* encoded_image);
void Reset();
private:

View File

@ -16,9 +16,11 @@
#include "api/video/i420_buffer.h"
#include "api/video/video_frame.h"
#include "api/video/video_timing.h"
#include "common_video/h264/h264_common.h"
#include "common_video/test/utilities.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "rtc_base/time_utils.h"
#include "test/gmock.h"
#include "test/gtest.h"
namespace webrtc {
@ -125,7 +127,7 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
}
} // namespace
TEST(FrameEncodeTimerTest, MarksTimingFramesPeriodicallyTogether) {
TEST(FrameEncodeMetadataWriterTest, MarksTimingFramesPeriodicallyTogether) {
const int64_t kDelayMs = 29;
const size_t kMinFrameSize = 10;
const size_t kMaxFrameSize = 20;
@ -169,7 +171,7 @@ TEST(FrameEncodeTimerTest, MarksTimingFramesPeriodicallyTogether) {
}
}
TEST(FrameEncodeTimerTest, MarksOutliers) {
TEST(FrameEncodeMetadataWriterTest, MarksOutliers) {
const int64_t kDelayMs = 29;
const size_t kMinFrameSize = 2495;
const size_t kMaxFrameSize = 2505;
@ -191,7 +193,7 @@ TEST(FrameEncodeTimerTest, MarksOutliers) {
}
}
TEST(FrameEncodeTimerTest, NoTimingFrameIfNoEncodeStartTime) {
TEST(FrameEncodeMetadataWriterTest, NoTimingFrameIfNoEncodeStartTime) {
int64_t timestamp = 1;
constexpr size_t kFrameSize = 500;
EncodedImage image;
@ -228,7 +230,8 @@ TEST(FrameEncodeTimerTest, NoTimingFrameIfNoEncodeStartTime) {
EXPECT_FALSE(IsTimingFrame(image));
}
TEST(FrameEncodeTimerTest, AdjustsCaptureTimeForInternalSourceEncoder) {
TEST(FrameEncodeMetadataWriterTest,
AdjustsCaptureTimeForInternalSourceEncoder) {
const int64_t kEncodeStartDelayMs = 2;
const int64_t kEncodeFinishDelayMs = 10;
constexpr size_t kFrameSize = 500;
@ -273,7 +276,7 @@ TEST(FrameEncodeTimerTest, AdjustsCaptureTimeForInternalSourceEncoder) {
1);
}
TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
TEST(FrameEncodeMetadataWriterTest, NotifiesAboutDroppedFrames) {
const int64_t kTimestampMs1 = 47721840;
const int64_t kTimestampMs2 = 47721850;
const int64_t kTimestampMs3 = 47721860;
@ -332,7 +335,7 @@ TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
EXPECT_EQ(1u, sink.GetNumFramesDropped());
}
TEST(FrameEncodeTimerTest, RestoresCaptureTimestamps) {
TEST(FrameEncodeMetadataWriterTest, RestoresCaptureTimestamps) {
EncodedImage image;
const int64_t kTimestampMs = 123456;
FakeEncodedImageCallback sink;
@ -357,7 +360,7 @@ TEST(FrameEncodeTimerTest, RestoresCaptureTimestamps) {
EXPECT_EQ(kTimestampMs, image.capture_time_ms_);
}
TEST(FrameEncodeTimerTest, CopiesRotation) {
TEST(FrameEncodeMetadataWriterTest, CopiesRotation) {
EncodedImage image;
const int64_t kTimestampMs = 123456;
FakeEncodedImageCallback sink;
@ -381,7 +384,7 @@ TEST(FrameEncodeTimerTest, CopiesRotation) {
EXPECT_EQ(kVideoRotation_180, image.rotation_);
}
TEST(FrameEncodeTimerTest, SetsContentType) {
TEST(FrameEncodeMetadataWriterTest, SetsContentType) {
EncodedImage image;
const int64_t kTimestampMs = 123456;
FakeEncodedImageCallback sink;
@ -407,7 +410,7 @@ TEST(FrameEncodeTimerTest, SetsContentType) {
EXPECT_EQ(VideoContentType::SCREENSHARE, image.content_type_);
}
TEST(FrameEncodeTimerTest, CopiesColorSpace) {
TEST(FrameEncodeMetadataWriterTest, CopiesColorSpace) {
EncodedImage image;
const int64_t kTimestampMs = 123456;
FakeEncodedImageCallback sink;
@ -434,5 +437,86 @@ TEST(FrameEncodeTimerTest, CopiesColorSpace) {
EXPECT_EQ(color_space, *image.ColorSpace());
}
TEST(FrameEncodeMetadataWriterTest, DoesNotRewriteBitstreamWithoutCodecInfo) {
uint8_t buffer[] = {1, 2, 3};
EncodedImage image(buffer, sizeof(buffer), sizeof(buffer));
const RTPFragmentationHeader fragmentation;
FakeEncodedImageCallback sink;
FrameEncodeMetadataWriter encode_metadata_writer(&sink);
EXPECT_EQ(
encode_metadata_writer.UpdateBitstream(nullptr, &fragmentation, &image),
nullptr);
EXPECT_EQ(image.data(), buffer);
EXPECT_EQ(image.size(), sizeof(buffer));
}
TEST(FrameEncodeMetadataWriterTest, DoesNotRewriteVp8Bitstream) {
uint8_t buffer[] = {1, 2, 3};
EncodedImage image(buffer, sizeof(buffer), sizeof(buffer));
CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = kVideoCodecVP8;
const RTPFragmentationHeader fragmentation;
FakeEncodedImageCallback sink;
FrameEncodeMetadataWriter encode_metadata_writer(&sink);
EXPECT_EQ(encode_metadata_writer.UpdateBitstream(&codec_specific_info,
&fragmentation, &image),
nullptr);
EXPECT_EQ(image.data(), buffer);
EXPECT_EQ(image.size(), sizeof(buffer));
}
TEST(FrameEncodeMetadataWriterTest,
DoesNotRewriteH264BitstreamWithoutFragmentation) {
uint8_t buffer[] = {1, 2, 3};
EncodedImage image(buffer, sizeof(buffer), sizeof(buffer));
CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = kVideoCodecH264;
FakeEncodedImageCallback sink;
FrameEncodeMetadataWriter encode_metadata_writer(&sink);
EXPECT_EQ(encode_metadata_writer.UpdateBitstream(&codec_specific_info,
nullptr, &image),
nullptr);
EXPECT_EQ(image.data(), buffer);
EXPECT_EQ(image.size(), sizeof(buffer));
}
TEST(FrameEncodeMetadataWriterTest, RewritesH264BitstreamWithNonOptimalSps) {
uint8_t original_sps[] = {0, 0, 0, 1, H264::NaluType::kSps,
0x00, 0x00, 0x03, 0x03, 0xF4,
0x05, 0x03, 0xC7, 0xC0};
const uint8_t kRewrittenSps[] = {0, 0, 0, 1, H264::NaluType::kSps,
0x00, 0x00, 0x03, 0x03, 0xF4,
0x05, 0x03, 0xC7, 0xE0, 0x1B,
0x41, 0x10, 0x8D, 0x00};
EncodedImage image(original_sps, sizeof(original_sps), sizeof(original_sps));
image._frameType = VideoFrameType::kVideoFrameKey;
CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = kVideoCodecH264;
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 4;
fragmentation.fragmentationLength[0] = sizeof(original_sps) - 4;
FakeEncodedImageCallback sink;
FrameEncodeMetadataWriter encode_metadata_writer(&sink);
std::unique_ptr<RTPFragmentationHeader> modified_fragmentation =
encode_metadata_writer.UpdateBitstream(&codec_specific_info,
&fragmentation, &image);
ASSERT_NE(modified_fragmentation, nullptr);
EXPECT_THAT(std::vector<uint8_t>(image.data(), image.data() + image.size()),
testing::ElementsAreArray(kRewrittenSps));
ASSERT_THAT(modified_fragmentation->fragmentationVectorSize, 1U);
EXPECT_EQ(modified_fragmentation->fragmentationOffset[0], 4U);
EXPECT_EQ(modified_fragmentation->fragmentationLength[0],
sizeof(kRewrittenSps) - 4);
}
} // namespace test
} // namespace webrtc

View File

@ -1412,6 +1412,10 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
frame_encode_metadata_writer_.FillTimingInfo(spatial_idx, &image_copy);
std::unique_ptr<RTPFragmentationHeader> fragmentation_copy =
frame_encode_metadata_writer_.UpdateBitstream(codec_specific_info,
fragmentation, &image_copy);
// Piggyback ALR experiment group id and simulcast id into the content type.
const uint8_t experiment_id =
experiment_groups_[videocontenttypehelpers::IsScreenshare(
@ -1487,7 +1491,7 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
EncodedImageCallback::Result result = sink_->OnEncodedImage(
image_copy, codec_info_copy ? codec_info_copy.get() : codec_specific_info,
fragmentation);
fragmentation_copy ? fragmentation_copy.get() : fragmentation);
// We are only interested in propagating the meta-data about the image, not
// encoded data itself, to the post encode function. Since we cannot be sure

View File

@ -15,12 +15,14 @@
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "api/video/builtin_video_bitrate_allocator_factory.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video_codecs/vp8_temporal_layers.h"
#include "api/video_codecs/vp8_temporal_layers_factory.h"
#include "common_video/h264/h264_common.h"
#include "media/base/video_adapter.h"
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
#include "modules/video_coding/utility/default_video_bitrate_allocator.h"
@ -58,6 +60,11 @@ const int kMaxInitialFramedrop = 4;
const int kDefaultFramerate = 30;
const int64_t kFrameIntervalMs = rtc::kNumMillisecsPerSec / kDefaultFramerate;
uint8_t optimal_sps[] = {0, 0, 0, 1, H264::NaluType::kSps,
0x00, 0x00, 0x03, 0x03, 0xF4,
0x05, 0x03, 0xC7, 0xE0, 0x1B,
0x41, 0x10, 0x8D, 0x00};
class TestBuffer : public webrtc::I420Buffer {
public:
TestBuffer(rtc::Event* event, int width, int height)
@ -658,6 +665,14 @@ class VideoStreamEncoderTest : public ::testing::Test {
encoded_image_callback_->OnEncodedImage(image, nullptr, nullptr);
}
void InjectEncodedImage(const EncodedImage& image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
rtc::CritScope lock(&local_crit_sect_);
encoded_image_callback_->OnEncodedImage(image, codec_specific_info,
fragmentation);
}
void ExpectNullFrame() {
rtc::CritScope lock(&local_crit_sect_);
expect_null_frame_ = true;
@ -855,6 +870,16 @@ class VideoStreamEncoderTest : public ::testing::Test {
return last_capture_time_ms_;
}
std::vector<uint8_t> GetLastEncodedImageData() {
rtc::CritScope lock(&crit_);
return std::move(last_encoded_image_data_);
}
RTPFragmentationHeader GetLastFragmentation() {
rtc::CritScope lock(&crit_);
return std::move(last_fragmentation_);
}
private:
Result OnEncodedImage(
const EncodedImage& encoded_image,
@ -862,6 +887,11 @@ class VideoStreamEncoderTest : public ::testing::Test {
const RTPFragmentationHeader* fragmentation) override {
rtc::CritScope lock(&crit_);
EXPECT_TRUE(expect_frames_);
last_encoded_image_data_ = std::vector<uint8_t>(
encoded_image.data(), encoded_image.data() + encoded_image.size());
if (fragmentation) {
last_fragmentation_.CopyFrom(*fragmentation);
}
uint32_t timestamp = encoded_image.Timestamp();
if (last_timestamp_ != timestamp) {
num_received_layers_ = 1;
@ -890,6 +920,8 @@ class VideoStreamEncoderTest : public ::testing::Test {
rtc::CriticalSection crit_;
TestEncoder* test_encoder_;
rtc::Event encoded_frame_event_;
std::vector<uint8_t> last_encoded_image_data_;
RTPFragmentationHeader last_fragmentation_;
uint32_t last_timestamp_ = 0;
int64_t last_capture_time_ms_ = 0;
uint32_t last_height_ = 0;
@ -3826,4 +3858,67 @@ TEST_F(VideoStreamEncoderTest, AdjustsTimestampInternalSource) {
video_stream_encoder_->Stop();
}
TEST_F(VideoStreamEncoderTest, DoesNotRewriteH264BitstreamWithOptimalSps) {
// Configure internal source factory and setup test again.
encoder_factory_.SetHasInternalSource(true);
ResetEncoder("H264", 1, 1, 1, false);
EncodedImage image(optimal_sps, sizeof(optimal_sps), sizeof(optimal_sps));
image._frameType = VideoFrameType::kVideoFrameKey;
CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = kVideoCodecH264;
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 4;
fragmentation.fragmentationLength[0] = sizeof(optimal_sps) - 4;
fake_encoder_.InjectEncodedImage(image, &codec_specific_info, &fragmentation);
EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs));
EXPECT_THAT(sink_.GetLastEncodedImageData(),
testing::ElementsAreArray(optimal_sps));
RTPFragmentationHeader last_fragmentation = sink_.GetLastFragmentation();
ASSERT_THAT(last_fragmentation.fragmentationVectorSize, 1U);
EXPECT_EQ(last_fragmentation.fragmentationOffset[0], 4U);
EXPECT_EQ(last_fragmentation.fragmentationLength[0], sizeof(optimal_sps) - 4);
video_stream_encoder_->Stop();
}
TEST_F(VideoStreamEncoderTest, RewritesH264BitstreamWithNonOptimalSps) {
uint8_t original_sps[] = {0, 0, 0, 1, H264::NaluType::kSps,
0x00, 0x00, 0x03, 0x03, 0xF4,
0x05, 0x03, 0xC7, 0xC0};
// Configure internal source factory and setup test again.
encoder_factory_.SetHasInternalSource(true);
ResetEncoder("H264", 1, 1, 1, false);
EncodedImage image(original_sps, sizeof(original_sps), sizeof(original_sps));
image._frameType = VideoFrameType::kVideoFrameKey;
CodecSpecificInfo codec_specific_info;
codec_specific_info.codecType = kVideoCodecH264;
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 4;
fragmentation.fragmentationLength[0] = sizeof(original_sps) - 4;
fake_encoder_.InjectEncodedImage(image, &codec_specific_info, &fragmentation);
EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs));
EXPECT_THAT(sink_.GetLastEncodedImageData(),
testing::ElementsAreArray(optimal_sps));
RTPFragmentationHeader last_fragmentation = sink_.GetLastFragmentation();
ASSERT_THAT(last_fragmentation.fragmentationVectorSize, 1U);
EXPECT_EQ(last_fragmentation.fragmentationOffset[0], 4U);
EXPECT_EQ(last_fragmentation.fragmentationLength[0], sizeof(optimal_sps) - 4);
video_stream_encoder_->Stop();
}
} // namespace webrtc