Add helper that generate filter data given a captured and an encoded frame
R=sprang@webrtc.org Bug: b/358039777 Change-Id: I48400db23b836d45f03cfa151aa1a9f1b8f00b2a Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/359940 Reviewed-by: Erik Språng <sprang@webrtc.org> Commit-Queue: Fanny Linderborg <linderborg@webrtc.org> Cr-Commit-Position: refs/heads/main@{#42807}
This commit is contained in:
parent
52e46247bf
commit
a6186b2485
@ -8,6 +8,28 @@
|
|||||||
|
|
||||||
import("../../webrtc.gni")
|
import("../../webrtc.gni")
|
||||||
|
|
||||||
|
rtc_library("frame_instrumentation_generator") {
|
||||||
|
sources = [
|
||||||
|
"frame_instrumentation_generator.cc",
|
||||||
|
"frame_instrumentation_generator.h",
|
||||||
|
]
|
||||||
|
deps = [
|
||||||
|
":generic_mapping_functions",
|
||||||
|
":halton_frame_sampler",
|
||||||
|
"../../api:scoped_refptr",
|
||||||
|
"../../api/video:encoded_image",
|
||||||
|
"../../api/video:video_frame",
|
||||||
|
"../../api/video:video_frame_type",
|
||||||
|
"../../api/video_codecs:video_codecs_api",
|
||||||
|
"../../modules:module_api_public",
|
||||||
|
"../../modules/video_coding:video_coding_utility",
|
||||||
|
"../../rtc_base:logging",
|
||||||
|
"//third_party/abseil-cpp/absl/algorithm:container",
|
||||||
|
"//third_party/abseil-cpp/absl/types:optional",
|
||||||
|
"//third_party/abseil-cpp/absl/types:variant",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
rtc_library("generic_mapping_functions") {
|
rtc_library("generic_mapping_functions") {
|
||||||
sources = [
|
sources = [
|
||||||
"generic_mapping_functions.cc",
|
"generic_mapping_functions.cc",
|
||||||
@ -44,6 +66,21 @@ rtc_library("halton_sequence") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (rtc_include_tests) {
|
if (rtc_include_tests) {
|
||||||
|
rtc_library("frame_instrumentation_generator_unittest") {
|
||||||
|
testonly = true
|
||||||
|
sources = [ "frame_instrumentation_generator_unittest.cc" ]
|
||||||
|
deps = [
|
||||||
|
":frame_instrumentation_generator",
|
||||||
|
"../../api:scoped_refptr",
|
||||||
|
"../../api/video:encoded_image",
|
||||||
|
"../../api/video:video_frame",
|
||||||
|
"../../api/video:video_frame_type",
|
||||||
|
"../../test:test_support",
|
||||||
|
"//third_party/abseil-cpp/absl/types:optional",
|
||||||
|
"//third_party/abseil-cpp/absl/types:variant",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
rtc_library("generic_mapping_functions_unittest") {
|
rtc_library("generic_mapping_functions_unittest") {
|
||||||
testonly = true
|
testonly = true
|
||||||
sources = [ "generic_mapping_functions_unittest.cc" ]
|
sources = [ "generic_mapping_functions_unittest.cc" ]
|
||||||
@ -78,6 +115,7 @@ if (rtc_include_tests) {
|
|||||||
testonly = true
|
testonly = true
|
||||||
sources = []
|
sources = []
|
||||||
deps = [
|
deps = [
|
||||||
|
":frame_instrumentation_generator_unittest",
|
||||||
":generic_mapping_functions_unittest",
|
":generic_mapping_functions_unittest",
|
||||||
":halton_frame_sampler_unittest",
|
":halton_frame_sampler_unittest",
|
||||||
":halton_sequence_unittest",
|
":halton_sequence_unittest",
|
||||||
|
|||||||
169
video/corruption_detection/frame_instrumentation_generator.cc
Normal file
169
video/corruption_detection/frame_instrumentation_generator.cc
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2024 The WebRTC project authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "video/corruption_detection/frame_instrumentation_generator.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <iterator>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/algorithm/container.h"
|
||||||
|
#include "absl/types/optional.h"
|
||||||
|
#include "absl/types/variant.h"
|
||||||
|
#include "api/scoped_refptr.h"
|
||||||
|
#include "api/video/encoded_image.h"
|
||||||
|
#include "api/video/video_codec_type.h"
|
||||||
|
#include "api/video/video_frame.h"
|
||||||
|
#include "api/video/video_frame_buffer.h"
|
||||||
|
#include "api/video/video_frame_type.h"
|
||||||
|
#include "api/video_codecs/video_codec.h"
|
||||||
|
#include "modules/include/module_common_types_public.h"
|
||||||
|
#include "modules/video_coding/utility/qp_parser.h"
|
||||||
|
#include "rtc_base/logging.h"
|
||||||
|
#include "video/corruption_detection/generic_mapping_functions.h"
|
||||||
|
#include "video/corruption_detection/halton_frame_sampler.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
absl::optional<FilterSettings> GetCorruptionFilterSettings(
|
||||||
|
const EncodedImage& encoded_image,
|
||||||
|
VideoCodecType video_codec_type,
|
||||||
|
int layer_id) {
|
||||||
|
/* TODO: b/358039777 - Uncomment when parameters are available in EncodedImage
|
||||||
|
if (encoded_image.CorruptionDetectionParameters()) {
|
||||||
|
return FilterSettings{
|
||||||
|
.std_dev = encoded_image.CorruptionDetectionParameters()->std_dev,
|
||||||
|
.luma_error_threshold =
|
||||||
|
encoded_image.CorruptionDetectionParameters()->luma_error_threshold,
|
||||||
|
.chroma_error_threshold = encoded_image.CorruptionDetectionParameters()
|
||||||
|
->chroma_error_threshold};
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
int qp = encoded_image.qp_;
|
||||||
|
if (qp == -1) {
|
||||||
|
absl::optional<uint32_t> parsed_qp = QpParser().Parse(
|
||||||
|
video_codec_type, layer_id, encoded_image.data(), encoded_image.size());
|
||||||
|
if (!parsed_qp.has_value()) {
|
||||||
|
RTC_LOG(LS_VERBOSE) << "Missing QP for "
|
||||||
|
<< CodecTypeToPayloadString(video_codec_type)
|
||||||
|
<< " layer " << layer_id << ".";
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
qp = *parsed_qp;
|
||||||
|
}
|
||||||
|
|
||||||
|
return GetCorruptionFilterSettings(qp, video_codec_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
FrameInstrumentationGenerator::FrameInstrumentationGenerator(
|
||||||
|
VideoCodecType video_codec_type)
|
||||||
|
: video_codec_type_(video_codec_type) {}
|
||||||
|
|
||||||
|
void FrameInstrumentationGenerator::OnCapturedFrame(VideoFrame frame) {
|
||||||
|
captured_frames_.push(frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
FrameInstrumentationGenerator::OnEncodedImage(
|
||||||
|
const EncodedImage& encoded_image) {
|
||||||
|
uint32_t rtp_timestamp_encoded_image = encoded_image.RtpTimestamp();
|
||||||
|
while (!captured_frames_.empty() &&
|
||||||
|
IsNewerTimestamp(rtp_timestamp_encoded_image,
|
||||||
|
captured_frames_.front().rtp_timestamp())) {
|
||||||
|
captured_frames_.pop();
|
||||||
|
}
|
||||||
|
if (captured_frames_.empty() ||
|
||||||
|
captured_frames_.front().rtp_timestamp() != rtp_timestamp_encoded_image) {
|
||||||
|
RTC_LOG(LS_VERBOSE) << "No captured frames for RTC timestamp "
|
||||||
|
<< rtp_timestamp_encoded_image << ".";
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
VideoFrame captured_frame = captured_frames_.front();
|
||||||
|
|
||||||
|
int layer_id = std::max(encoded_image.SpatialIndex().value_or(0),
|
||||||
|
encoded_image.SimulcastIndex().value_or(0));
|
||||||
|
bool is_key_frame =
|
||||||
|
encoded_image.FrameType() == VideoFrameType::kVideoFrameKey;
|
||||||
|
if (is_key_frame) {
|
||||||
|
contexts_.erase(layer_id);
|
||||||
|
} else {
|
||||||
|
for (const auto& [unused, context] : contexts_) {
|
||||||
|
if (context.rtp_timestamp_of_last_key_frame ==
|
||||||
|
rtp_timestamp_encoded_image) {
|
||||||
|
// Upper layer of an SVC key frame.
|
||||||
|
is_key_frame = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_key_frame) {
|
||||||
|
contexts_[layer_id].rtp_timestamp_of_last_key_frame =
|
||||||
|
encoded_image.RtpTimestamp();
|
||||||
|
} else if (contexts_.find(layer_id) == contexts_.end()) {
|
||||||
|
RTC_LOG(LS_INFO) << "The first frame of a spatial or simulcast layer is "
|
||||||
|
"not a key frame.";
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sequence_index = contexts_[layer_id].frame_sampler.GetCurrentIndex();
|
||||||
|
// TODO: b/358039777 - Maybe allow other sample sizes as well
|
||||||
|
std::vector<HaltonFrameSampler::Coordinates> sample_coordinates =
|
||||||
|
contexts_[layer_id]
|
||||||
|
.frame_sampler.GetSampleCoordinatesForFrameIfFrameShouldBeSampled(
|
||||||
|
is_key_frame, captured_frame.rtp_timestamp(),
|
||||||
|
/*sample_size=*/13);
|
||||||
|
if (sample_coordinates.empty()) {
|
||||||
|
if (!is_key_frame) {
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
return FrameInstrumentationSyncData{.sequence_index = sequence_index,
|
||||||
|
.is_key_frame = true};
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::optional<FilterSettings> filter_settings =
|
||||||
|
GetCorruptionFilterSettings(encoded_image, video_codec_type_, layer_id);
|
||||||
|
if (!filter_settings.has_value()) {
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
scoped_refptr<I420BufferInterface> captured_frame_buffer_as_i420 =
|
||||||
|
captured_frame.video_frame_buffer()->ToI420();
|
||||||
|
if (!captured_frame_buffer_as_i420) {
|
||||||
|
RTC_LOG(LS_ERROR) << "Failed to convert "
|
||||||
|
<< VideoFrameBufferTypeToString(
|
||||||
|
captured_frame.video_frame_buffer()->type())
|
||||||
|
<< " image to I420.";
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
FrameInstrumentationData data = {
|
||||||
|
.sequence_index = sequence_index,
|
||||||
|
.is_key_frame = is_key_frame,
|
||||||
|
.std_dev = filter_settings->std_dev,
|
||||||
|
.luma_error_threshold = filter_settings->luma_error_threshold,
|
||||||
|
.chroma_error_threshold = filter_settings->chroma_error_threshold};
|
||||||
|
std::vector<FilteredSample> samples = GetSampleValuesForFrame(
|
||||||
|
captured_frame_buffer_as_i420, sample_coordinates,
|
||||||
|
encoded_image._encodedWidth, encoded_image._encodedHeight,
|
||||||
|
filter_settings->std_dev);
|
||||||
|
data.sample_values.reserve(samples.size());
|
||||||
|
absl::c_transform(samples, std::back_inserter(data.sample_values),
|
||||||
|
[](const FilteredSample& sample) { return sample.value; });
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace webrtc
|
||||||
74
video/corruption_detection/frame_instrumentation_generator.h
Normal file
74
video/corruption_detection/frame_instrumentation_generator.h
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2024 The WebRTC project authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef VIDEO_CORRUPTION_DETECTION_FRAME_INSTRUMENTATION_GENERATOR_H_
|
||||||
|
#define VIDEO_CORRUPTION_DETECTION_FRAME_INSTRUMENTATION_GENERATOR_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <map>
|
||||||
|
#include <queue>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/types/optional.h"
|
||||||
|
#include "absl/types/variant.h"
|
||||||
|
#include "api/video/encoded_image.h"
|
||||||
|
#include "api/video/video_codec_type.h"
|
||||||
|
#include "api/video/video_frame.h"
|
||||||
|
#include "video/corruption_detection/halton_frame_sampler.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
// TODO: b/358039777 - Error handling: negative values etc.
|
||||||
|
struct FrameInstrumentationSyncData {
|
||||||
|
int sequence_index;
|
||||||
|
bool is_key_frame;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct FrameInstrumentationData {
|
||||||
|
int sequence_index;
|
||||||
|
bool is_key_frame;
|
||||||
|
double std_dev;
|
||||||
|
int luma_error_threshold;
|
||||||
|
int chroma_error_threshold;
|
||||||
|
std::vector<double> sample_values;
|
||||||
|
};
|
||||||
|
|
||||||
|
class FrameInstrumentationGenerator {
|
||||||
|
public:
|
||||||
|
FrameInstrumentationGenerator() = delete;
|
||||||
|
explicit FrameInstrumentationGenerator(VideoCodecType video_codec_type);
|
||||||
|
|
||||||
|
FrameInstrumentationGenerator(const FrameInstrumentationGenerator&) = delete;
|
||||||
|
FrameInstrumentationGenerator& operator=(
|
||||||
|
const FrameInstrumentationGenerator&) = delete;
|
||||||
|
|
||||||
|
~FrameInstrumentationGenerator() = default;
|
||||||
|
|
||||||
|
void OnCapturedFrame(VideoFrame frame);
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
OnEncodedImage(const EncodedImage& encoded_image);
|
||||||
|
|
||||||
|
private:
|
||||||
|
struct Context {
|
||||||
|
HaltonFrameSampler frame_sampler;
|
||||||
|
uint32_t rtp_timestamp_of_last_key_frame = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Incoming video frames in capture order.
|
||||||
|
std::queue<VideoFrame> captured_frames_;
|
||||||
|
// Map from spatial or simulcast index to sampling context.
|
||||||
|
std::map<int, Context> contexts_;
|
||||||
|
const VideoCodecType video_codec_type_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // VIDEO_CORRUPTION_DETECTION_FRAME_INSTRUMENTATION_GENERATOR_H_
|
||||||
@ -0,0 +1,393 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2024 The WebRTC project authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "video/corruption_detection/frame_instrumentation_generator.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/types/optional.h"
|
||||||
|
#include "absl/types/variant.h"
|
||||||
|
#include "api/scoped_refptr.h"
|
||||||
|
#include "api/video/encoded_image.h"
|
||||||
|
#include "api/video/i420_buffer.h"
|
||||||
|
#include "api/video/video_codec_type.h"
|
||||||
|
#include "api/video/video_frame.h"
|
||||||
|
#include "api/video/video_frame_type.h"
|
||||||
|
#include "test/gtest.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr int kDefaultScaledWidth = 4;
|
||||||
|
constexpr int kDefaultScaledHeight = 4;
|
||||||
|
|
||||||
|
scoped_refptr<I420Buffer> MakeDefaultI420FrameBuffer() {
|
||||||
|
// Create an I420 frame of size 4x4.
|
||||||
|
const int kDefaultLumaWidth = 4;
|
||||||
|
const int kDefaultLumaHeight = 4;
|
||||||
|
const int kDefaultChromaWidth = 2;
|
||||||
|
const int kDefaultPixelValue = 30;
|
||||||
|
std::vector<uint8_t> kDefaultYContent(16, kDefaultPixelValue);
|
||||||
|
std::vector<uint8_t> kDefaultUContent(4, kDefaultPixelValue);
|
||||||
|
std::vector<uint8_t> kDefaultVContent(4, kDefaultPixelValue);
|
||||||
|
|
||||||
|
return I420Buffer::Copy(kDefaultLumaWidth, kDefaultLumaHeight,
|
||||||
|
kDefaultYContent.data(), kDefaultLumaWidth,
|
||||||
|
kDefaultUContent.data(), kDefaultChromaWidth,
|
||||||
|
kDefaultVContent.data(), kDefaultChromaWidth);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsNothingWhenNoFramesHaveBeenProvided) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecGeneric);
|
||||||
|
|
||||||
|
EXPECT_FALSE(generator.OnEncodedImage(EncodedImage()).has_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsNothingWhenNoFrameWithTheSameTimestampIsProvided) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecGeneric);
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image.SetRtpTimestamp(2);
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
|
||||||
|
EXPECT_FALSE(generator.OnEncodedImage(encoded_image).has_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsNothingWhenTheFirstFrameOfASpatialOrSimulcastLayerIsNotAKeyFrame) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecGeneric);
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Delta frame with no preceding key frame.
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image.SetRtpTimestamp(1);
|
||||||
|
encoded_image.SetFrameType(VideoFrameType::kVideoFrameDelta);
|
||||||
|
encoded_image.SetSpatialIndex(0);
|
||||||
|
encoded_image.SetSimulcastIndex(0);
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
|
||||||
|
// The first frame of a spatial or simulcast layer is not a key frame.
|
||||||
|
EXPECT_FALSE(generator.OnEncodedImage(encoded_image).has_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsNothingWhenQpIsUnsetAndNotParseable) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecGeneric);
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Frame where QP is unset and QP is not parseable from the encoded data.
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image.SetRtpTimestamp(1);
|
||||||
|
encoded_image.SetFrameType(VideoFrameType::kVideoFrameKey);
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
|
||||||
|
EXPECT_FALSE(generator.OnEncodedImage(encoded_image).has_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
#if GTEST_HAS_DEATH_TEST
|
||||||
|
TEST(FrameInstrumentationGeneratorTest, FailsWhenCodecIsUnsupported) {
|
||||||
|
// No available mapping from codec to filter parameters.
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecGeneric);
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image.SetRtpTimestamp(1);
|
||||||
|
encoded_image.SetFrameType(VideoFrameType::kVideoFrameKey);
|
||||||
|
encoded_image.qp_ = 10;
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
|
||||||
|
EXPECT_DEATH(generator.OnEncodedImage(encoded_image),
|
||||||
|
"Codec type Generic is not supported");
|
||||||
|
}
|
||||||
|
#endif // GTEST_HAS_DEATH_TEST
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsInstrumentationDataForVP8KeyFrameWithQpSet) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecVP8);
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
// VP8 key frame with QP set.
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image.SetRtpTimestamp(1);
|
||||||
|
encoded_image.SetFrameType(VideoFrameType::kVideoFrameKey);
|
||||||
|
encoded_image.qp_ = 10;
|
||||||
|
encoded_image._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
data = generator.OnEncodedImage(encoded_image);
|
||||||
|
|
||||||
|
ASSERT_TRUE(data.has_value());
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data));
|
||||||
|
FrameInstrumentationData frame_instrumentation_data =
|
||||||
|
absl::get<FrameInstrumentationData>(*data);
|
||||||
|
EXPECT_EQ(frame_instrumentation_data.sequence_index, 0);
|
||||||
|
EXPECT_TRUE(frame_instrumentation_data.is_key_frame);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.std_dev, 0.0);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.luma_error_threshold, 0);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.chroma_error_threshold, 0);
|
||||||
|
EXPECT_FALSE(frame_instrumentation_data.sample_values.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsInstrumentationDataWhenQpIsParseable) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecVP8);
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// VP8 key frame with parseable QP.
|
||||||
|
constexpr uint8_t kCodedFrameVp8Qp25[] = {
|
||||||
|
0x10, 0x02, 0x00, 0x9d, 0x01, 0x2a, 0x10, 0x00, 0x10, 0x00,
|
||||||
|
0x02, 0x47, 0x08, 0x85, 0x85, 0x88, 0x85, 0x84, 0x88, 0x0c,
|
||||||
|
0x82, 0x00, 0x0c, 0x0d, 0x60, 0x00, 0xfe, 0xfc, 0x5c, 0xd0};
|
||||||
|
scoped_refptr<EncodedImageBuffer> encoded_image_buffer =
|
||||||
|
EncodedImageBuffer::Create(kCodedFrameVp8Qp25,
|
||||||
|
sizeof(kCodedFrameVp8Qp25));
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image.SetRtpTimestamp(1);
|
||||||
|
encoded_image.SetFrameType(VideoFrameType::kVideoFrameKey);
|
||||||
|
encoded_image.SetEncodedData(encoded_image_buffer);
|
||||||
|
encoded_image._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
data = generator.OnEncodedImage(encoded_image);
|
||||||
|
|
||||||
|
ASSERT_TRUE(data.has_value());
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data));
|
||||||
|
FrameInstrumentationData frame_instrumentation_data =
|
||||||
|
absl::get<FrameInstrumentationData>(*data);
|
||||||
|
EXPECT_EQ(frame_instrumentation_data.sequence_index, 0);
|
||||||
|
EXPECT_TRUE(frame_instrumentation_data.is_key_frame);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.std_dev, 0.0);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.luma_error_threshold, 0);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.chroma_error_threshold, 0);
|
||||||
|
EXPECT_FALSE(frame_instrumentation_data.sample_values.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsInstrumentationDataForUpperLayerOfAnSvcKeyFrame) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecVP9);
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
EncodedImage encoded_image1;
|
||||||
|
encoded_image1.SetRtpTimestamp(1);
|
||||||
|
encoded_image1.SetFrameType(VideoFrameType::kVideoFrameKey);
|
||||||
|
encoded_image1.SetSpatialIndex(0);
|
||||||
|
encoded_image1.qp_ = 10;
|
||||||
|
encoded_image1._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image1._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
// Delta frame that is an upper layer of an SVC key frame.
|
||||||
|
EncodedImage encoded_image2;
|
||||||
|
encoded_image2.SetRtpTimestamp(1);
|
||||||
|
encoded_image2.SetFrameType(VideoFrameType::kVideoFrameDelta);
|
||||||
|
encoded_image2.SetSpatialIndex(1);
|
||||||
|
encoded_image2.qp_ = 10;
|
||||||
|
encoded_image2._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image2._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
generator.OnEncodedImage(encoded_image1);
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
data = generator.OnEncodedImage(encoded_image2);
|
||||||
|
|
||||||
|
ASSERT_TRUE(data.has_value());
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data));
|
||||||
|
FrameInstrumentationData frame_instrumentation_data =
|
||||||
|
absl::get<FrameInstrumentationData>(*data);
|
||||||
|
EXPECT_EQ(frame_instrumentation_data.sequence_index, 0);
|
||||||
|
EXPECT_TRUE(frame_instrumentation_data.is_key_frame);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.std_dev, 0.0);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.luma_error_threshold, 0);
|
||||||
|
EXPECT_NE(frame_instrumentation_data.chroma_error_threshold, 0);
|
||||||
|
EXPECT_FALSE(frame_instrumentation_data.sample_values.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsNothingWhenNotEnoughTimeHasPassedSinceLastSampledFrame) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecVP8);
|
||||||
|
VideoFrame frame1 = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
VideoFrame frame2 = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(2)
|
||||||
|
.build();
|
||||||
|
EncodedImage encoded_image1;
|
||||||
|
encoded_image1.SetRtpTimestamp(1);
|
||||||
|
encoded_image1.SetFrameType(VideoFrameType::kVideoFrameKey);
|
||||||
|
encoded_image1.SetSpatialIndex(0);
|
||||||
|
encoded_image1.qp_ = 10;
|
||||||
|
encoded_image1._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image1._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
// Delta frame that is too recent in comparison to the last sampled frame:
|
||||||
|
// passed time < 90'000.
|
||||||
|
EncodedImage encoded_image2;
|
||||||
|
encoded_image2.SetRtpTimestamp(2);
|
||||||
|
encoded_image2.SetFrameType(VideoFrameType::kVideoFrameDelta);
|
||||||
|
encoded_image2.SetSpatialIndex(0);
|
||||||
|
encoded_image2.qp_ = 10;
|
||||||
|
encoded_image2._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image2._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame1);
|
||||||
|
generator.OnCapturedFrame(frame2);
|
||||||
|
generator.OnEncodedImage(encoded_image1);
|
||||||
|
|
||||||
|
ASSERT_FALSE(generator.OnEncodedImage(encoded_image2).has_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
ReturnsInstrumentationDataForUpperLayerOfASecondSvcKeyFrame) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecVP9);
|
||||||
|
VideoFrame frame1 = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(1)
|
||||||
|
.build();
|
||||||
|
VideoFrame frame2 = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(2)
|
||||||
|
.build();
|
||||||
|
for (const VideoFrame& frame : {frame1, frame2}) {
|
||||||
|
EncodedImage encoded_image1;
|
||||||
|
encoded_image1.SetRtpTimestamp(frame.rtp_timestamp());
|
||||||
|
encoded_image1.SetFrameType(VideoFrameType::kVideoFrameKey);
|
||||||
|
encoded_image1.SetSpatialIndex(0);
|
||||||
|
encoded_image1.qp_ = 10;
|
||||||
|
encoded_image1._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image1._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
EncodedImage encoded_image2;
|
||||||
|
encoded_image2.SetRtpTimestamp(frame.rtp_timestamp());
|
||||||
|
encoded_image2.SetFrameType(VideoFrameType::kVideoFrameDelta);
|
||||||
|
encoded_image2.SetSpatialIndex(1);
|
||||||
|
encoded_image2.qp_ = 10;
|
||||||
|
encoded_image2._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image2._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame1);
|
||||||
|
generator.OnCapturedFrame(frame2);
|
||||||
|
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
data1 = generator.OnEncodedImage(encoded_image1);
|
||||||
|
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
data2 = generator.OnEncodedImage(encoded_image2);
|
||||||
|
|
||||||
|
ASSERT_TRUE(data1.has_value());
|
||||||
|
ASSERT_TRUE(data2.has_value());
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data1));
|
||||||
|
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data2));
|
||||||
|
|
||||||
|
EXPECT_TRUE(absl::get<FrameInstrumentationData>(*data1).is_key_frame);
|
||||||
|
EXPECT_TRUE(absl::get<FrameInstrumentationData>(*data2).is_key_frame);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(FrameInstrumentationGeneratorTest,
|
||||||
|
OutputsDeltaFrameInstrumentationDataForSimulcast) {
|
||||||
|
FrameInstrumentationGenerator generator(VideoCodecType::kVideoCodecVP9);
|
||||||
|
bool has_found_delta_frame = false;
|
||||||
|
// 34 frames is the minimum number of frames to be able to sample a delta
|
||||||
|
// frame.
|
||||||
|
for (int i = 0; i < 34; ++i) {
|
||||||
|
VideoFrame frame = VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(MakeDefaultI420FrameBuffer())
|
||||||
|
.set_rtp_timestamp(i)
|
||||||
|
.build();
|
||||||
|
EncodedImage encoded_image1;
|
||||||
|
encoded_image1.SetRtpTimestamp(frame.rtp_timestamp());
|
||||||
|
encoded_image1.SetFrameType(i == 0 ? VideoFrameType::kVideoFrameKey
|
||||||
|
: VideoFrameType::kVideoFrameDelta);
|
||||||
|
encoded_image1.SetSimulcastIndex(0);
|
||||||
|
encoded_image1.qp_ = 10;
|
||||||
|
encoded_image1._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image1._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
EncodedImage encoded_image2;
|
||||||
|
encoded_image2.SetRtpTimestamp(frame.rtp_timestamp());
|
||||||
|
encoded_image2.SetFrameType(i == 0 ? VideoFrameType::kVideoFrameKey
|
||||||
|
: VideoFrameType::kVideoFrameDelta);
|
||||||
|
encoded_image2.SetSimulcastIndex(1);
|
||||||
|
encoded_image2.qp_ = 10;
|
||||||
|
encoded_image2._encodedWidth = kDefaultScaledWidth;
|
||||||
|
encoded_image2._encodedHeight = kDefaultScaledHeight;
|
||||||
|
|
||||||
|
generator.OnCapturedFrame(frame);
|
||||||
|
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
data1 = generator.OnEncodedImage(encoded_image1);
|
||||||
|
|
||||||
|
absl::optional<
|
||||||
|
absl::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
|
||||||
|
data2 = generator.OnEncodedImage(encoded_image2);
|
||||||
|
|
||||||
|
if (i == 0) {
|
||||||
|
ASSERT_TRUE(data1.has_value());
|
||||||
|
ASSERT_TRUE(data2.has_value());
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data1));
|
||||||
|
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data2));
|
||||||
|
|
||||||
|
EXPECT_TRUE(absl::get<FrameInstrumentationData>(*data1).is_key_frame);
|
||||||
|
EXPECT_TRUE(absl::get<FrameInstrumentationData>(*data2).is_key_frame);
|
||||||
|
} else if (data1.has_value() || data2.has_value()) {
|
||||||
|
if (data1.has_value()) {
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data1));
|
||||||
|
EXPECT_FALSE(absl::get<FrameInstrumentationData>(*data1).is_key_frame);
|
||||||
|
}
|
||||||
|
if (data2.has_value()) {
|
||||||
|
ASSERT_TRUE(absl::holds_alternative<FrameInstrumentationData>(*data2));
|
||||||
|
EXPECT_FALSE(absl::get<FrameInstrumentationData>(*data2).is_key_frame);
|
||||||
|
}
|
||||||
|
has_found_delta_frame = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPECT_TRUE(has_found_delta_frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace webrtc
|
||||||
Loading…
x
Reference in New Issue
Block a user