Aggregate and log video codec metrics
Bug: b/261160916, webrtc:14852 Change-Id: Idcb7e96b12ca38af49b9b1f10d1e23cc7faac92b Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/293945 Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Commit-Queue: Sergey Silkin <ssilkin@webrtc.org> Cr-Commit-Position: refs/heads/main@{#39427}
This commit is contained in:
parent
b27efd487d
commit
fddc9131a5
@ -1021,6 +1021,7 @@ if (rtc_include_tests) {
|
||||
deps = [
|
||||
"../api/numerics:numerics",
|
||||
"../api/units:data_rate",
|
||||
"../api/units:data_size",
|
||||
"../api/units:frequency",
|
||||
"test/metrics:metric",
|
||||
"test/metrics:metrics_logger",
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
#include "api/test/metrics/metric.h"
|
||||
#include "api/test/metrics/metrics_logger.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "api/units/data_size.h"
|
||||
#include "api/units/frequency.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -44,10 +45,10 @@ class VideoCodecStats {
|
||||
|
||||
int width = 0;
|
||||
int height = 0;
|
||||
int size_bytes = 0;
|
||||
DataSize frame_size = DataSize::Zero();
|
||||
bool keyframe = false;
|
||||
absl::optional<int> qp = absl::nullopt;
|
||||
absl::optional<int> base_spatial_idx = absl::nullopt;
|
||||
absl::optional<int> qp;
|
||||
absl::optional<int> base_spatial_idx;
|
||||
|
||||
Timestamp encode_start = Timestamp::Zero();
|
||||
TimeDelta encode_time = TimeDelta::Zero();
|
||||
@ -59,29 +60,35 @@ class VideoCodecStats {
|
||||
double u = 0.0;
|
||||
double v = 0.0;
|
||||
};
|
||||
absl::optional<Psnr> psnr = absl::nullopt;
|
||||
absl::optional<Psnr> psnr;
|
||||
|
||||
absl::optional<DataRate> target_bitrate;
|
||||
absl::optional<Frequency> target_framerate;
|
||||
|
||||
bool encoded = false;
|
||||
bool decoded = false;
|
||||
};
|
||||
|
||||
struct Stream {
|
||||
int num_frames = 0;
|
||||
int num_keyframes = 0;
|
||||
|
||||
SamplesStatsCounter width;
|
||||
SamplesStatsCounter height;
|
||||
SamplesStatsCounter size_bytes;
|
||||
SamplesStatsCounter frame_size_bytes;
|
||||
SamplesStatsCounter keyframe;
|
||||
SamplesStatsCounter qp;
|
||||
|
||||
SamplesStatsCounter encode_time_us;
|
||||
SamplesStatsCounter decode_time_us;
|
||||
SamplesStatsCounter encode_time_ms;
|
||||
SamplesStatsCounter decode_time_ms;
|
||||
|
||||
DataRate bitrate = DataRate::Zero();
|
||||
Frequency framerate = Frequency::Zero();
|
||||
int bitrate_mismatch_pct = 0;
|
||||
int framerate_mismatch_pct = 0;
|
||||
SamplesStatsCounter transmission_time_us;
|
||||
SamplesStatsCounter target_bitrate_kbps;
|
||||
SamplesStatsCounter target_framerate_fps;
|
||||
|
||||
SamplesStatsCounter encoded_bitrate_kbps;
|
||||
SamplesStatsCounter encoded_framerate_fps;
|
||||
|
||||
SamplesStatsCounter bitrate_mismatch_pct;
|
||||
SamplesStatsCounter framerate_mismatch_pct;
|
||||
|
||||
SamplesStatsCounter transmission_time_ms;
|
||||
|
||||
struct Psnr {
|
||||
SamplesStatsCounter y;
|
||||
@ -97,13 +104,8 @@ class VideoCodecStats {
|
||||
virtual std::vector<Frame> Slice(
|
||||
absl::optional<Filter> filter = absl::nullopt) const = 0;
|
||||
|
||||
// Returns video statistics aggregated for given `frames`. If `bitrate` is
|
||||
// provided, also performs rate control analysis. If `framerate` is provided,
|
||||
// also calculates frame rate mismatch.
|
||||
virtual Stream Aggregate(
|
||||
const std::vector<Frame>& frames,
|
||||
absl::optional<DataRate> bitrate = absl::nullopt,
|
||||
absl::optional<Frequency> framerate = absl::nullopt) const = 0;
|
||||
// Returns video statistics aggregated for given `frames`.
|
||||
virtual Stream Aggregate(const std::vector<Frame>& frames) const = 0;
|
||||
|
||||
// Logs `Stream` metrics to provided `MetricsLogger`.
|
||||
virtual void LogMetrics(MetricsLogger* logger,
|
||||
|
||||
@ -1060,6 +1060,7 @@ if (rtc_include_tests) {
|
||||
"../../api:video_codec_tester_api",
|
||||
"../../api:videocodec_test_fixture_api",
|
||||
"../../api:videocodec_test_stats_api",
|
||||
"../../api/test/metrics:global_metrics_logger_and_exporter",
|
||||
"../../api/test/video:function_video_factory",
|
||||
"../../api/units:data_rate",
|
||||
"../../api/units:frequency",
|
||||
|
||||
@ -69,11 +69,10 @@ void VideoCodecAnalyzer::StartEncode(const VideoFrame& input_frame) {
|
||||
|
||||
RTC_CHECK(frame_num_.find(timestamp_rtp) == frame_num_.end());
|
||||
frame_num_[timestamp_rtp] = num_frames_++;
|
||||
int frame_num = frame_num_[timestamp_rtp];
|
||||
|
||||
VideoCodecStats::Frame* fs =
|
||||
stats_.AddFrame(frame_num, timestamp_rtp, /*spatial_idx=*/0);
|
||||
fs->encode_start = Timestamp::Micros(encode_start_us);
|
||||
stats_.AddFrame({.frame_num = frame_num_[timestamp_rtp],
|
||||
.timestamp_rtp = timestamp_rtp,
|
||||
.encode_start = Timestamp::Micros(encode_start_us)});
|
||||
});
|
||||
}
|
||||
|
||||
@ -86,16 +85,18 @@ void VideoCodecAnalyzer::FinishEncode(const EncodedImage& frame) {
|
||||
width = frame._encodedWidth,
|
||||
height = frame._encodedHeight,
|
||||
frame_type = frame._frameType,
|
||||
size_bytes = frame.size(), qp = frame.qp_,
|
||||
frame_size_bytes = frame.size(), qp = frame.qp_,
|
||||
encode_finished_us]() {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
|
||||
if (spatial_idx > 0) {
|
||||
VideoCodecStats::Frame* fs0 =
|
||||
VideoCodecStats::Frame* base_frame =
|
||||
stats_.GetFrame(timestamp_rtp, /*spatial_idx=*/0);
|
||||
VideoCodecStats::Frame* fs =
|
||||
stats_.AddFrame(fs0->frame_num, timestamp_rtp, spatial_idx);
|
||||
fs->encode_start = fs0->encode_start;
|
||||
|
||||
stats_.AddFrame({.frame_num = base_frame->frame_num,
|
||||
.timestamp_rtp = timestamp_rtp,
|
||||
.spatial_idx = spatial_idx,
|
||||
.encode_start = base_frame->encode_start});
|
||||
}
|
||||
|
||||
VideoCodecStats::Frame* fs = stats_.GetFrame(timestamp_rtp, spatial_idx);
|
||||
@ -103,7 +104,7 @@ void VideoCodecAnalyzer::FinishEncode(const EncodedImage& frame) {
|
||||
fs->temporal_idx = temporal_idx;
|
||||
fs->width = width;
|
||||
fs->height = height;
|
||||
fs->size_bytes = static_cast<int>(size_bytes);
|
||||
fs->frame_size = DataSize::Bytes(frame_size_bytes);
|
||||
fs->qp = qp;
|
||||
fs->keyframe = frame_type == VideoFrameType::kVideoFrameKey;
|
||||
fs->encode_time = Timestamp::Micros(encode_finished_us) - fs->encode_start;
|
||||
@ -115,7 +116,7 @@ void VideoCodecAnalyzer::StartDecode(const EncodedImage& frame) {
|
||||
int64_t decode_start_us = rtc::TimeMicros();
|
||||
task_queue_.PostTask([this, timestamp_rtp = frame.Timestamp(),
|
||||
spatial_idx = frame.SpatialIndex().value_or(0),
|
||||
size_bytes = frame.size(), decode_start_us]() {
|
||||
frame_size_bytes = frame.size(), decode_start_us]() {
|
||||
RTC_DCHECK_RUN_ON(&sequence_checker_);
|
||||
|
||||
VideoCodecStats::Frame* fs = stats_.GetFrame(timestamp_rtp, spatial_idx);
|
||||
@ -123,11 +124,11 @@ void VideoCodecAnalyzer::StartDecode(const EncodedImage& frame) {
|
||||
if (frame_num_.find(timestamp_rtp) == frame_num_.end()) {
|
||||
frame_num_[timestamp_rtp] = num_frames_++;
|
||||
}
|
||||
int frame_num = frame_num_[timestamp_rtp];
|
||||
|
||||
fs = stats_.AddFrame(frame_num, timestamp_rtp, spatial_idx);
|
||||
fs->spatial_idx = spatial_idx;
|
||||
fs->size_bytes = size_bytes;
|
||||
stats_.AddFrame({.frame_num = frame_num_[timestamp_rtp],
|
||||
.timestamp_rtp = timestamp_rtp,
|
||||
.spatial_idx = spatial_idx,
|
||||
.frame_size = DataSize::Bytes(frame_size_bytes)});
|
||||
fs = stats_.GetFrame(timestamp_rtp, spatial_idx);
|
||||
}
|
||||
|
||||
fs->decode_start = Timestamp::Micros(decode_start_us);
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#include "api/numerics/samples_stats_counter.h"
|
||||
#include "api/test/metrics/global_metrics_logger_and_exporter.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
@ -23,6 +24,112 @@ using Frame = VideoCodecStats::Frame;
|
||||
using Stream = VideoCodecStats::Stream;
|
||||
|
||||
constexpr Frequency k90kHz = Frequency::Hertz(90000);
|
||||
|
||||
class LeakyBucket {
|
||||
public:
|
||||
LeakyBucket() : level_bits_(0) {}
|
||||
|
||||
// Updates bucket level and returns its current level in bits. Data is remove
|
||||
// from bucket with rate equal to target bitrate of previous frame. Bucket
|
||||
// level is tracked with floating point precision. Returned value of bucket
|
||||
// level is rounded up.
|
||||
int Update(const Frame& frame) {
|
||||
RTC_CHECK(frame.target_bitrate) << "Bitrate must be specified.";
|
||||
|
||||
if (prev_frame_) {
|
||||
RTC_CHECK_GT(frame.timestamp_rtp, prev_frame_->timestamp_rtp)
|
||||
<< "Timestamp must increase.";
|
||||
TimeDelta passed =
|
||||
(frame.timestamp_rtp - prev_frame_->timestamp_rtp) / k90kHz;
|
||||
level_bits_ -=
|
||||
prev_frame_->target_bitrate->bps() * passed.us() / 1000000.0;
|
||||
level_bits_ = std::max(level_bits_, 0.0);
|
||||
}
|
||||
|
||||
prev_frame_ = frame;
|
||||
|
||||
level_bits_ += frame.frame_size.bytes() * 8;
|
||||
return static_cast<int>(std::ceil(level_bits_));
|
||||
}
|
||||
|
||||
private:
|
||||
absl::optional<Frame> prev_frame_;
|
||||
double level_bits_;
|
||||
};
|
||||
|
||||
// Merges spatial layer frames into superframes.
|
||||
std::vector<Frame> Merge(const std::vector<Frame>& frames) {
|
||||
std::vector<Frame> superframes;
|
||||
// Map from frame timestamp to index in `superframes` vector.
|
||||
std::map<uint32_t, int> index;
|
||||
|
||||
for (const auto& f : frames) {
|
||||
if (index.find(f.timestamp_rtp) == index.end()) {
|
||||
index[f.timestamp_rtp] = static_cast<int>(superframes.size());
|
||||
superframes.push_back(f);
|
||||
continue;
|
||||
}
|
||||
|
||||
Frame& sf = superframes[index[f.timestamp_rtp]];
|
||||
|
||||
sf.width = std::max(sf.width, f.width);
|
||||
sf.height = std::max(sf.height, f.height);
|
||||
sf.frame_size += f.frame_size;
|
||||
sf.keyframe |= f.keyframe;
|
||||
|
||||
sf.encode_time = std::max(sf.encode_time, f.encode_time);
|
||||
sf.decode_time = std::max(sf.decode_time, f.decode_time);
|
||||
|
||||
if (f.spatial_idx > sf.spatial_idx) {
|
||||
if (f.qp) {
|
||||
sf.qp = f.qp;
|
||||
}
|
||||
if (f.psnr) {
|
||||
sf.psnr = f.psnr;
|
||||
}
|
||||
}
|
||||
|
||||
sf.spatial_idx = std::max(sf.spatial_idx, f.spatial_idx);
|
||||
sf.temporal_idx = std::max(sf.temporal_idx, f.temporal_idx);
|
||||
|
||||
sf.encoded |= f.encoded;
|
||||
sf.decoded |= f.decoded;
|
||||
}
|
||||
|
||||
return superframes;
|
||||
}
|
||||
|
||||
Timestamp RtpToTime(uint32_t timestamp_rtp) {
|
||||
return Timestamp::Micros((timestamp_rtp / k90kHz).us());
|
||||
}
|
||||
|
||||
SamplesStatsCounter::StatsSample StatsSample(double value, Timestamp time) {
|
||||
return SamplesStatsCounter::StatsSample{value, time};
|
||||
}
|
||||
|
||||
TimeDelta CalcTotalDuration(const std::vector<Frame>& frames) {
|
||||
RTC_CHECK(!frames.empty());
|
||||
TimeDelta duration = TimeDelta::Zero();
|
||||
if (frames.size() > 1) {
|
||||
duration +=
|
||||
(frames.rbegin()->timestamp_rtp - frames.begin()->timestamp_rtp) /
|
||||
k90kHz;
|
||||
}
|
||||
|
||||
// Add last frame duration. If target frame rate is provided, calculate frame
|
||||
// duration from it. Otherwise, assume duration of last frame is the same as
|
||||
// duration of preceding frame.
|
||||
if (frames.rbegin()->target_framerate) {
|
||||
duration += 1 / *frames.rbegin()->target_framerate;
|
||||
} else {
|
||||
RTC_CHECK_GT(frames.size(), 1u);
|
||||
duration += (frames.rbegin()->timestamp_rtp -
|
||||
std::next(frames.rbegin())->timestamp_rtp) /
|
||||
k90kHz;
|
||||
}
|
||||
|
||||
return duration;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
std::vector<Frame> VideoCodecStatsImpl::Slice(
|
||||
@ -51,36 +158,99 @@ std::vector<Frame> VideoCodecStatsImpl::Slice(
|
||||
return frames;
|
||||
}
|
||||
|
||||
Stream VideoCodecStatsImpl::Aggregate(
|
||||
const std::vector<Frame>& frames,
|
||||
absl::optional<DataRate> bitrate,
|
||||
absl::optional<Frequency> framerate) const {
|
||||
Stream VideoCodecStatsImpl::Aggregate(const std::vector<Frame>& frames) const {
|
||||
std::vector<Frame> superframes = Merge(frames);
|
||||
RTC_CHECK(!superframes.empty());
|
||||
|
||||
LeakyBucket leacky_bucket;
|
||||
Stream stream;
|
||||
stream.num_frames = static_cast<int>(superframes.size());
|
||||
for (size_t i = 0; i < superframes.size(); ++i) {
|
||||
Frame& f = superframes[i];
|
||||
Timestamp time = RtpToTime(f.timestamp_rtp);
|
||||
|
||||
for (const auto& f : superframes) {
|
||||
Timestamp time = Timestamp::Micros((f.timestamp_rtp / k90kHz).us());
|
||||
// TODO(webrtc:14852): Add AddSample(double value, Timestamp time) method to
|
||||
// SamplesStatsCounter.
|
||||
stream.decode_time_us.AddSample(SamplesStatsCounter::StatsSample(
|
||||
{.value = static_cast<double>(f.decode_time.us()), .time = time}));
|
||||
if (!f.frame_size.IsZero()) {
|
||||
stream.width.AddSample(StatsSample(f.width, time));
|
||||
stream.height.AddSample(StatsSample(f.height, time));
|
||||
stream.frame_size_bytes.AddSample(
|
||||
StatsSample(f.frame_size.bytes(), time));
|
||||
stream.keyframe.AddSample(StatsSample(f.keyframe, time));
|
||||
if (f.qp) {
|
||||
stream.qp.AddSample(StatsSample(*f.qp, time));
|
||||
}
|
||||
}
|
||||
|
||||
if (f.encoded) {
|
||||
stream.encode_time_ms.AddSample(StatsSample(f.encode_time.ms(), time));
|
||||
}
|
||||
|
||||
if (f.decoded) {
|
||||
stream.decode_time_ms.AddSample(StatsSample(f.decode_time.ms(), time));
|
||||
}
|
||||
|
||||
if (f.psnr) {
|
||||
stream.psnr.y.AddSample(
|
||||
SamplesStatsCounter::StatsSample({.value = f.psnr->y, .time = time}));
|
||||
stream.psnr.u.AddSample(
|
||||
SamplesStatsCounter::StatsSample({.value = f.psnr->u, .time = time}));
|
||||
stream.psnr.v.AddSample(
|
||||
SamplesStatsCounter::StatsSample({.value = f.psnr->v, .time = time}));
|
||||
stream.psnr.y.AddSample(StatsSample(f.psnr->y, time));
|
||||
stream.psnr.u.AddSample(StatsSample(f.psnr->u, time));
|
||||
stream.psnr.v.AddSample(StatsSample(f.psnr->v, time));
|
||||
}
|
||||
|
||||
if (f.keyframe) {
|
||||
++stream.num_keyframes;
|
||||
if (f.target_framerate) {
|
||||
stream.target_framerate_fps.AddSample(
|
||||
StatsSample(f.target_framerate->millihertz() / 1000.0, time));
|
||||
}
|
||||
|
||||
// TODO(webrtc:14852): Aggregate other metrics.
|
||||
if (f.target_bitrate) {
|
||||
stream.target_bitrate_kbps.AddSample(
|
||||
StatsSample(f.target_bitrate->bps() / 1000.0, time));
|
||||
|
||||
int buffer_level_bits = leacky_bucket.Update(f);
|
||||
stream.transmission_time_ms.AddSample(
|
||||
StatsSample(buffer_level_bits * rtc::kNumMillisecsPerSec /
|
||||
f.target_bitrate->bps(),
|
||||
RtpToTime(f.timestamp_rtp)));
|
||||
}
|
||||
}
|
||||
|
||||
TimeDelta duration = CalcTotalDuration(superframes);
|
||||
DataRate encoded_bitrate =
|
||||
DataSize::Bytes(stream.frame_size_bytes.GetSum()) / duration;
|
||||
|
||||
int num_encoded_frames = stream.frame_size_bytes.NumSamples();
|
||||
Frequency encoded_framerate = num_encoded_frames / duration;
|
||||
|
||||
absl::optional<double> bitrate_mismatch_pct;
|
||||
if (auto target_bitrate = superframes.begin()->target_bitrate;
|
||||
target_bitrate) {
|
||||
bitrate_mismatch_pct = 100.0 *
|
||||
(encoded_bitrate.bps() - target_bitrate->bps()) /
|
||||
target_bitrate->bps();
|
||||
}
|
||||
|
||||
absl::optional<double> framerate_mismatch_pct;
|
||||
if (auto target_framerate = superframes.begin()->target_framerate;
|
||||
target_framerate) {
|
||||
framerate_mismatch_pct =
|
||||
100.0 *
|
||||
(encoded_framerate.millihertz() - target_framerate->millihertz()) /
|
||||
target_framerate->millihertz();
|
||||
}
|
||||
|
||||
for (auto& f : superframes) {
|
||||
Timestamp time = RtpToTime(f.timestamp_rtp);
|
||||
stream.encoded_bitrate_kbps.AddSample(
|
||||
StatsSample(encoded_bitrate.bps() / 1000.0, time));
|
||||
|
||||
stream.encoded_framerate_fps.AddSample(
|
||||
StatsSample(encoded_framerate.millihertz() / 1000.0, time));
|
||||
|
||||
if (bitrate_mismatch_pct) {
|
||||
stream.bitrate_mismatch_pct.AddSample(
|
||||
StatsSample(*bitrate_mismatch_pct, time));
|
||||
}
|
||||
|
||||
if (framerate_mismatch_pct) {
|
||||
stream.framerate_mismatch_pct.AddSample(
|
||||
StatsSample(*framerate_mismatch_pct, time));
|
||||
}
|
||||
}
|
||||
|
||||
return stream;
|
||||
@ -91,94 +261,82 @@ void VideoCodecStatsImpl::LogMetrics(MetricsLogger* logger,
|
||||
std::string test_case_name) const {
|
||||
logger->LogMetric("width", test_case_name, stream.width, Unit::kCount,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
// TODO(webrtc:14852): Log other metrics.
|
||||
|
||||
logger->LogMetric("height", test_case_name, stream.height, Unit::kCount,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
|
||||
logger->LogMetric("frame_size_bytes", test_case_name, stream.frame_size_bytes,
|
||||
Unit::kBytes,
|
||||
webrtc::test::ImprovementDirection::kNeitherIsBetter);
|
||||
|
||||
logger->LogMetric("keyframe", test_case_name, stream.keyframe, Unit::kCount,
|
||||
webrtc::test::ImprovementDirection::kSmallerIsBetter);
|
||||
|
||||
logger->LogMetric("qp", test_case_name, stream.qp, Unit::kUnitless,
|
||||
webrtc::test::ImprovementDirection::kSmallerIsBetter);
|
||||
|
||||
logger->LogMetric("encode_time_ms", test_case_name, stream.encode_time_ms,
|
||||
Unit::kMilliseconds,
|
||||
webrtc::test::ImprovementDirection::kSmallerIsBetter);
|
||||
|
||||
logger->LogMetric("decode_time_ms", test_case_name, stream.decode_time_ms,
|
||||
Unit::kMilliseconds,
|
||||
webrtc::test::ImprovementDirection::kSmallerIsBetter);
|
||||
|
||||
logger->LogMetric("target_bitrate_kbps", test_case_name,
|
||||
stream.target_bitrate_kbps, Unit::kKilobitsPerSecond,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
|
||||
logger->LogMetric("target_framerate_fps", test_case_name,
|
||||
stream.target_framerate_fps, Unit::kHertz,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
|
||||
logger->LogMetric("encoded_bitrate_kbps", test_case_name,
|
||||
stream.encoded_bitrate_kbps, Unit::kKilobitsPerSecond,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
|
||||
logger->LogMetric("encoded_framerate_fps", test_case_name,
|
||||
stream.encoded_framerate_fps, Unit::kHertz,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
|
||||
logger->LogMetric("bitrate_mismatch_pct", test_case_name,
|
||||
stream.bitrate_mismatch_pct, Unit::kPercent,
|
||||
webrtc::test::ImprovementDirection::kSmallerIsBetter);
|
||||
|
||||
logger->LogMetric("framerate_mismatch_pct", test_case_name,
|
||||
stream.framerate_mismatch_pct, Unit::kPercent,
|
||||
webrtc::test::ImprovementDirection::kSmallerIsBetter);
|
||||
|
||||
logger->LogMetric("transmission_time_ms", test_case_name,
|
||||
stream.transmission_time_ms, Unit::kMilliseconds,
|
||||
webrtc::test::ImprovementDirection::kSmallerIsBetter);
|
||||
|
||||
logger->LogMetric("psnr_y_db", test_case_name, stream.psnr.y, Unit::kUnitless,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
|
||||
logger->LogMetric("psnr_u_db", test_case_name, stream.psnr.u, Unit::kUnitless,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
|
||||
logger->LogMetric("psnr_v_db", test_case_name, stream.psnr.v, Unit::kUnitless,
|
||||
webrtc::test::ImprovementDirection::kBiggerIsBetter);
|
||||
}
|
||||
|
||||
Frame* VideoCodecStatsImpl::AddFrame(int frame_num,
|
||||
uint32_t timestamp_rtp,
|
||||
int spatial_idx) {
|
||||
Frame frame;
|
||||
frame.frame_num = frame_num;
|
||||
frame.timestamp_rtp = timestamp_rtp;
|
||||
frame.spatial_idx = spatial_idx;
|
||||
|
||||
FrameId frame_id;
|
||||
frame_id.frame_num = frame_num;
|
||||
frame_id.spatial_idx = spatial_idx;
|
||||
|
||||
void VideoCodecStatsImpl::AddFrame(const Frame& frame) {
|
||||
FrameId frame_id{.timestamp_rtp = frame.timestamp_rtp,
|
||||
.spatial_idx = frame.spatial_idx};
|
||||
RTC_CHECK(frames_.find(frame_id) == frames_.end())
|
||||
<< "Frame with frame_num=" << frame_num
|
||||
<< " and spatial_idx=" << spatial_idx << " already exists";
|
||||
<< "Frame with timestamp_rtp=" << frame.timestamp_rtp
|
||||
<< " and spatial_idx=" << frame.spatial_idx << " already exists";
|
||||
|
||||
frames_[frame_id] = frame;
|
||||
|
||||
if (frame_num_.find(timestamp_rtp) == frame_num_.end()) {
|
||||
frame_num_[timestamp_rtp] = frame_num;
|
||||
}
|
||||
|
||||
return &frames_[frame_id];
|
||||
}
|
||||
|
||||
Frame* VideoCodecStatsImpl::GetFrame(uint32_t timestamp_rtp, int spatial_idx) {
|
||||
if (frame_num_.find(timestamp_rtp) == frame_num_.end()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
FrameId frame_id;
|
||||
frame_id.frame_num = frame_num_[timestamp_rtp];
|
||||
frame_id.spatial_idx = spatial_idx;
|
||||
|
||||
FrameId frame_id{.timestamp_rtp = timestamp_rtp, .spatial_idx = spatial_idx};
|
||||
if (frames_.find(frame_id) == frames_.end()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return &frames_[frame_id];
|
||||
}
|
||||
|
||||
std::vector<Frame> VideoCodecStatsImpl::Merge(
|
||||
const std::vector<Frame>& frames) const {
|
||||
std::vector<Frame> superframes;
|
||||
// Map from frame_num to index in `superframes` vector.
|
||||
std::map<int, int> index;
|
||||
|
||||
for (const auto& f : frames) {
|
||||
if (f.encoded == false && f.decoded == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (index.find(f.frame_num) == index.end()) {
|
||||
index[f.frame_num] = static_cast<int>(superframes.size());
|
||||
superframes.push_back(f);
|
||||
continue;
|
||||
}
|
||||
|
||||
Frame& sf = superframes[index[f.frame_num]];
|
||||
|
||||
sf.width = std::max(sf.width, f.width);
|
||||
sf.height = std::max(sf.height, f.height);
|
||||
sf.size_bytes += f.size_bytes;
|
||||
sf.keyframe |= f.keyframe;
|
||||
|
||||
sf.encode_time = std::max(sf.encode_time, f.encode_time);
|
||||
sf.decode_time += f.decode_time;
|
||||
|
||||
if (f.spatial_idx > sf.spatial_idx) {
|
||||
if (f.qp) {
|
||||
sf.qp = f.qp;
|
||||
}
|
||||
if (f.psnr) {
|
||||
sf.psnr = f.psnr;
|
||||
}
|
||||
}
|
||||
|
||||
sf.spatial_idx = std::max(sf.spatial_idx, f.spatial_idx);
|
||||
sf.temporal_idx = std::max(sf.temporal_idx, f.temporal_idx);
|
||||
|
||||
sf.encoded |= f.encoded;
|
||||
sf.decoded |= f.decoded;
|
||||
}
|
||||
|
||||
return superframes;
|
||||
return &frames_.find(frame_id)->second;
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
|
||||
@ -27,47 +27,36 @@ class VideoCodecStatsImpl : public VideoCodecStats {
|
||||
std::vector<Frame> Slice(
|
||||
absl::optional<Filter> filter = absl::nullopt) const override;
|
||||
|
||||
Stream Aggregate(
|
||||
const std::vector<Frame>& frames,
|
||||
absl::optional<DataRate> bitrate = absl::nullopt,
|
||||
absl::optional<Frequency> framerate = absl::nullopt) const override;
|
||||
Stream Aggregate(const std::vector<Frame>& frames) const override;
|
||||
|
||||
void LogMetrics(MetricsLogger* logger,
|
||||
const Stream& stream,
|
||||
std::string test_case_name) const override;
|
||||
|
||||
// Creates new frame, caches it and returns raw pointer to it.
|
||||
Frame* AddFrame(int frame_num, uint32_t timestamp_rtp, int spatial_idx);
|
||||
void AddFrame(const Frame& frame);
|
||||
|
||||
// Returns raw pointers to requested frame. If frame does not exist, returns
|
||||
// `nullptr`.
|
||||
// Returns raw pointers to previously added frame. If frame does not exist,
|
||||
// returns `nullptr`.
|
||||
Frame* GetFrame(uint32_t timestamp_rtp, int spatial_idx);
|
||||
|
||||
private:
|
||||
struct FrameId {
|
||||
int frame_num;
|
||||
uint32_t timestamp_rtp;
|
||||
int spatial_idx;
|
||||
|
||||
bool operator==(const FrameId& o) const {
|
||||
return frame_num == o.frame_num && spatial_idx == o.spatial_idx;
|
||||
return timestamp_rtp == o.timestamp_rtp && spatial_idx == o.spatial_idx;
|
||||
}
|
||||
|
||||
bool operator<(const FrameId& o) const {
|
||||
if (frame_num < o.frame_num)
|
||||
if (timestamp_rtp < o.timestamp_rtp)
|
||||
return true;
|
||||
if (spatial_idx < o.spatial_idx)
|
||||
if (timestamp_rtp == o.timestamp_rtp && spatial_idx < o.spatial_idx)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
// Merges frame stats from different spatial layers and returns vector of
|
||||
// superframes.
|
||||
std::vector<Frame> Merge(const std::vector<Frame>& frames) const;
|
||||
|
||||
// Map from RTP timestamp to frame number (`Frame::frame_num`).
|
||||
std::map<uint32_t, int> frame_num_;
|
||||
|
||||
std::map<FrameId, Frame> frames_;
|
||||
};
|
||||
|
||||
|
||||
@ -10,6 +10,8 @@
|
||||
|
||||
#include "modules/video_coding/codecs/test/video_codec_stats_impl.h"
|
||||
|
||||
#include <tuple>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "test/gmock.h"
|
||||
#include "test/gtest.h"
|
||||
@ -20,121 +22,127 @@ namespace test {
|
||||
namespace {
|
||||
using ::testing::Return;
|
||||
using ::testing::Values;
|
||||
using Filter = VideoCodecStats::Filter;
|
||||
using Frame = VideoCodecStatsImpl::Frame;
|
||||
using Stream = VideoCodecStats::Stream;
|
||||
} // namespace
|
||||
|
||||
TEST(VideoCodecStatsImpl, AddFrame) {
|
||||
TEST(VideoCodecStatsImpl, AddAndGetFrame) {
|
||||
VideoCodecStatsImpl stats;
|
||||
VideoCodecStatsImpl::Frame* fs =
|
||||
stats.AddFrame(/*frame_num=*/0, /*timestamp_rtp=*/0, /*spatial_idx=*/0);
|
||||
EXPECT_NE(nullptr, fs);
|
||||
fs = stats.GetFrame(/*timestamp_rtp=*/0, /*spatial_idx=*/0);
|
||||
EXPECT_NE(nullptr, fs);
|
||||
stats.AddFrame({.timestamp_rtp = 0, .spatial_idx = 0});
|
||||
stats.AddFrame({.timestamp_rtp = 0, .spatial_idx = 1});
|
||||
stats.AddFrame({.timestamp_rtp = 1, .spatial_idx = 0});
|
||||
|
||||
Frame* fs = stats.GetFrame(/*timestamp_rtp=*/0, /*spatial_idx=*/0);
|
||||
ASSERT_NE(fs, nullptr);
|
||||
EXPECT_EQ(fs->timestamp_rtp, 0u);
|
||||
EXPECT_EQ(fs->spatial_idx, 0);
|
||||
|
||||
fs = stats.GetFrame(/*timestamp_rtp=*/0, /*spatial_idx=*/1);
|
||||
ASSERT_NE(fs, nullptr);
|
||||
EXPECT_EQ(fs->timestamp_rtp, 0u);
|
||||
EXPECT_EQ(fs->spatial_idx, 1);
|
||||
|
||||
fs = stats.GetFrame(/*timestamp_rtp=*/1, /*spatial_idx=*/0);
|
||||
ASSERT_NE(fs, nullptr);
|
||||
EXPECT_EQ(fs->timestamp_rtp, 1u);
|
||||
EXPECT_EQ(fs->spatial_idx, 0);
|
||||
|
||||
fs = stats.GetFrame(/*timestamp_rtp=*/1, /*spatial_idx=*/1);
|
||||
EXPECT_EQ(fs, nullptr);
|
||||
}
|
||||
|
||||
TEST(VideoCodecStatsImpl, GetFrame) {
|
||||
class VideoCodecStatsImplSlicingTest
|
||||
: public ::testing::TestWithParam<std::tuple<Filter, std::vector<int>>> {};
|
||||
|
||||
TEST_P(VideoCodecStatsImplSlicingTest, Slice) {
|
||||
Filter filter = std::get<0>(GetParam());
|
||||
std::vector<int> expected_frames = std::get<1>(GetParam());
|
||||
std::vector<VideoCodecStats::Frame> frames = {
|
||||
{.frame_num = 0, .timestamp_rtp = 0, .spatial_idx = 0, .temporal_idx = 0},
|
||||
{.frame_num = 0, .timestamp_rtp = 0, .spatial_idx = 1, .temporal_idx = 0},
|
||||
{.frame_num = 1, .timestamp_rtp = 1, .spatial_idx = 0, .temporal_idx = 1},
|
||||
{.frame_num = 1,
|
||||
.timestamp_rtp = 1,
|
||||
.spatial_idx = 1,
|
||||
.temporal_idx = 1}};
|
||||
|
||||
VideoCodecStatsImpl stats;
|
||||
stats.AddFrame(/*frame_num=*/0, /*timestamp_rtp=*/0, /*spatial_idx=*/0);
|
||||
VideoCodecStatsImpl::Frame* fs =
|
||||
stats.GetFrame(/*timestamp_rtp=*/0, /*spatial_idx=*/0);
|
||||
EXPECT_NE(nullptr, fs);
|
||||
}
|
||||
stats.AddFrame(frames[0]);
|
||||
stats.AddFrame(frames[1]);
|
||||
stats.AddFrame(frames[2]);
|
||||
stats.AddFrame(frames[3]);
|
||||
|
||||
struct VideoCodecStatsSlicingTestParams {
|
||||
VideoCodecStats::Filter slicer;
|
||||
std::vector<VideoCodecStats::Frame> expected;
|
||||
};
|
||||
|
||||
class VideoCodecStatsSlicingTest
|
||||
: public ::testing::TestWithParam<VideoCodecStatsSlicingTestParams> {
|
||||
public:
|
||||
void SetUp() {
|
||||
// TODO(ssikin): Hard codec 2x2 table would be better.
|
||||
for (int frame_num = 0; frame_num < 2; ++frame_num) {
|
||||
for (int spatial_idx = 0; spatial_idx < 2; ++spatial_idx) {
|
||||
uint32_t timestamp_rtp = 3000 * frame_num;
|
||||
VideoCodecStats::Frame* f =
|
||||
stats_.AddFrame(frame_num, timestamp_rtp, spatial_idx);
|
||||
f->temporal_idx = frame_num;
|
||||
}
|
||||
}
|
||||
std::vector<VideoCodecStats::Frame> slice = stats.Slice(filter);
|
||||
ASSERT_EQ(slice.size(), expected_frames.size());
|
||||
for (size_t i = 0; i < expected_frames.size(); ++i) {
|
||||
Frame& expected = frames[expected_frames[i]];
|
||||
EXPECT_EQ(slice[i].frame_num, expected.frame_num);
|
||||
EXPECT_EQ(slice[i].timestamp_rtp, expected.timestamp_rtp);
|
||||
EXPECT_EQ(slice[i].spatial_idx, expected.spatial_idx);
|
||||
EXPECT_EQ(slice[i].temporal_idx, expected.temporal_idx);
|
||||
}
|
||||
|
||||
protected:
|
||||
VideoCodecStatsImpl stats_;
|
||||
};
|
||||
|
||||
TEST_P(VideoCodecStatsSlicingTest, Slice) {
|
||||
VideoCodecStatsSlicingTestParams test_params = GetParam();
|
||||
std::vector<VideoCodecStats::Frame> frames = stats_.Slice(test_params.slicer);
|
||||
EXPECT_EQ(frames.size(), test_params.expected.size());
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(All,
|
||||
VideoCodecStatsSlicingTest,
|
||||
::testing::ValuesIn({VideoCodecStatsSlicingTestParams(
|
||||
{.slicer = {.first_frame = 0, .last_frame = 1},
|
||||
.expected = {{.frame_num = 0},
|
||||
{.frame_num = 1},
|
||||
{.frame_num = 0},
|
||||
{.frame_num = 1}}})}));
|
||||
|
||||
struct VideoCodecStatsAggregationTestParams {
|
||||
VideoCodecStats::Filter slicer;
|
||||
struct Expected {
|
||||
double decode_time_us;
|
||||
} expected;
|
||||
};
|
||||
|
||||
class VideoCodecStatsAggregationTest
|
||||
: public ::testing::TestWithParam<VideoCodecStatsAggregationTestParams> {
|
||||
public:
|
||||
void SetUp() {
|
||||
// TODO(ssikin): Hard codec 2x2 table would be better. Share with
|
||||
// VideoCodecStatsSlicingTest
|
||||
for (int frame_num = 0; frame_num < 2; ++frame_num) {
|
||||
for (int spatial_idx = 0; spatial_idx < 2; ++spatial_idx) {
|
||||
uint32_t timestamp_rtp = 3000 * frame_num;
|
||||
VideoCodecStats::Frame* f =
|
||||
stats_.AddFrame(frame_num, timestamp_rtp, spatial_idx);
|
||||
f->temporal_idx = frame_num;
|
||||
f->decode_time = TimeDelta::Micros(spatial_idx * 10 + frame_num);
|
||||
f->encoded = true;
|
||||
f->decoded = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
VideoCodecStatsImpl stats_;
|
||||
};
|
||||
|
||||
TEST_P(VideoCodecStatsAggregationTest, Aggregate) {
|
||||
VideoCodecStatsAggregationTestParams test_params = GetParam();
|
||||
std::vector<VideoCodecStats::Frame> frames = stats_.Slice(test_params.slicer);
|
||||
VideoCodecStats::Stream stream = stats_.Aggregate(frames);
|
||||
EXPECT_EQ(stream.decode_time_us.GetAverage(),
|
||||
test_params.expected.decode_time_us);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
All,
|
||||
VideoCodecStatsAggregationTest,
|
||||
::testing::ValuesIn(
|
||||
{VideoCodecStatsAggregationTestParams(
|
||||
{.slicer = {},
|
||||
.expected = {.decode_time_us = (0.0 + 1.0 + 10.0 + 11.0) / 2}}),
|
||||
// Slicing on frame number
|
||||
VideoCodecStatsAggregationTestParams(
|
||||
{.slicer = {.first_frame = 1, .last_frame = 1},
|
||||
.expected = {.decode_time_us = 1.0 + 11.0}}),
|
||||
// Slice on spatial index
|
||||
VideoCodecStatsAggregationTestParams(
|
||||
{.slicer = {.spatial_idx = 1},
|
||||
.expected = {.decode_time_us = (10.0 + 11.0) / 2}}),
|
||||
// Slice on temporal index
|
||||
VideoCodecStatsAggregationTestParams(
|
||||
{.slicer = {.temporal_idx = 0},
|
||||
.expected = {.decode_time_us = 0.0 + 10.0}})}));
|
||||
VideoCodecStatsImplSlicingTest,
|
||||
::testing::Values(
|
||||
std::make_tuple(Filter{}, std::vector<int>{0, 1, 2, 3}),
|
||||
std::make_tuple(Filter{.first_frame = 1}, std::vector<int>{2, 3}),
|
||||
std::make_tuple(Filter{.last_frame = 0}, std::vector<int>{0, 1}),
|
||||
std::make_tuple(Filter{.spatial_idx = 0}, std::vector<int>{0, 2}),
|
||||
std::make_tuple(Filter{.temporal_idx = 1},
|
||||
std::vector<int>{0, 1, 2, 3})));
|
||||
|
||||
TEST(VideoCodecStatsImpl, AggregateBitrate) {
|
||||
std::vector<VideoCodecStats::Frame> frames = {
|
||||
{.frame_num = 0,
|
||||
.timestamp_rtp = 0,
|
||||
.frame_size = DataSize::Bytes(1000),
|
||||
.target_bitrate = DataRate::BytesPerSec(1000)},
|
||||
{.frame_num = 1,
|
||||
.timestamp_rtp = 90000,
|
||||
.frame_size = DataSize::Bytes(2000),
|
||||
.target_bitrate = DataRate::BytesPerSec(1000)}};
|
||||
|
||||
Stream stream = VideoCodecStatsImpl().Aggregate(frames);
|
||||
EXPECT_EQ(stream.encoded_bitrate_kbps.GetAverage(), 12.0);
|
||||
EXPECT_EQ(stream.bitrate_mismatch_pct.GetAverage(), 50.0);
|
||||
}
|
||||
|
||||
TEST(VideoCodecStatsImpl, AggregateFramerate) {
|
||||
std::vector<VideoCodecStats::Frame> frames = {
|
||||
{.frame_num = 0,
|
||||
.timestamp_rtp = 0,
|
||||
.frame_size = DataSize::Bytes(1),
|
||||
.target_framerate = Frequency::Hertz(1)},
|
||||
{.frame_num = 1,
|
||||
.timestamp_rtp = 90000,
|
||||
.frame_size = DataSize::Zero(),
|
||||
.target_framerate = Frequency::Hertz(1)}};
|
||||
|
||||
Stream stream = VideoCodecStatsImpl().Aggregate(frames);
|
||||
EXPECT_EQ(stream.encoded_framerate_fps.GetAverage(), 0.5);
|
||||
EXPECT_EQ(stream.framerate_mismatch_pct.GetAverage(), -50.0);
|
||||
}
|
||||
|
||||
TEST(VideoCodecStatsImpl, AggregateTransmissionTime) {
|
||||
std::vector<VideoCodecStats::Frame> frames = {
|
||||
{.frame_num = 0,
|
||||
.timestamp_rtp = 0,
|
||||
.frame_size = DataSize::Bytes(2),
|
||||
.target_bitrate = DataRate::BytesPerSec(1)},
|
||||
{.frame_num = 1,
|
||||
.timestamp_rtp = 90000,
|
||||
.frame_size = DataSize::Bytes(3),
|
||||
.target_bitrate = DataRate::BytesPerSec(1)}};
|
||||
|
||||
Stream stream = VideoCodecStatsImpl().Aggregate(frames);
|
||||
ASSERT_EQ(stream.transmission_time_ms.NumSamples(), 2);
|
||||
ASSERT_EQ(stream.transmission_time_ms.GetSamples()[0], 2000);
|
||||
ASSERT_EQ(stream.transmission_time_ms.GetSamples()[1], 4000);
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
|
||||
#include "absl/functional/any_invocable.h"
|
||||
#include "api/test/create_video_codec_tester.h"
|
||||
#include "api/test/metrics/global_metrics_logger_and_exporter.h"
|
||||
#include "api/test/videocodec_test_stats.h"
|
||||
#include "api/units/data_rate.h"
|
||||
#include "api/units/frequency.h"
|
||||
@ -84,6 +85,14 @@ struct EncodingSettings {
|
||||
Frequency framerate;
|
||||
// Bitrate of spatial and temporal layers.
|
||||
std::map<LayerId, DataRate> bitrate;
|
||||
|
||||
std::string ToString() const {
|
||||
return std::string(ScalabilityModeToString(scalability_mode)) + "_" +
|
||||
std::to_string(resolution.begin()->second.width) + "x" +
|
||||
std::to_string(resolution.begin()->second.height) + "_" +
|
||||
std::to_string(framerate.hertz()) + "fps" + "_" +
|
||||
std::to_string(bitrate.begin()->second.kbps()) + "kbps";
|
||||
}
|
||||
};
|
||||
|
||||
struct EncodingTestSettings {
|
||||
@ -116,7 +125,7 @@ const EncodingSettings kQvga64Kbps30Fps = {
|
||||
{{.spatial_idx = 0, .temporal_idx = 0}, DataRate::KilobitsPerSec(64)}}};
|
||||
|
||||
const EncodingTestSettings kConstantRateQvga64Kbps30Fps = {
|
||||
.name = "ConstantRateQvga64Kbps30Fps",
|
||||
.name = "ConstantRate",
|
||||
.num_frames = 300,
|
||||
.frame_settings = {{/*frame_num=*/0, kQvga64Kbps30Fps}}};
|
||||
|
||||
@ -409,6 +418,18 @@ std::unique_ptr<VideoCodecTester::Decoder> CreateDecoder(
|
||||
return std::make_unique<TestDecoder>(std::move(decoder), codec_info);
|
||||
}
|
||||
|
||||
void SetTargetRates(const std::map<int, EncodingSettings>& frame_settings,
|
||||
std::vector<VideoCodecStats::Frame>& frames) {
|
||||
for (VideoCodecStats::Frame& f : frames) {
|
||||
const EncodingSettings& settings =
|
||||
std::prev(frame_settings.upper_bound(f.frame_num))->second;
|
||||
LayerId layer_id = {.spatial_idx = f.spatial_idx,
|
||||
.temporal_idx = f.temporal_idx};
|
||||
f.target_bitrate = settings.bitrate.at(layer_id);
|
||||
f.target_framerate = settings.framerate / (1 << f.temporal_idx);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class EncodeDecodeTest
|
||||
@ -435,7 +456,7 @@ class EncodeDecodeTest
|
||||
const ::testing::TestParamInfo<EncodeDecodeTest::ParamType>& info) {
|
||||
return std::string(info.param.encoding_settings.name +
|
||||
info.param.codec.type + info.param.codec.encoder +
|
||||
info.param.codec.decoder + info.param.video.name);
|
||||
info.param.codec.decoder);
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -460,9 +481,16 @@ TEST_P(EncodeDecodeTest, DISABLED_TestEncodeDecode) {
|
||||
VideoCodecStats::Filter slicer = {.first_frame = first_frame,
|
||||
.last_frame = last_frame};
|
||||
std::vector<VideoCodecStats::Frame> frames = stats->Slice(slicer);
|
||||
SetTargetRates(frame_settings, frames);
|
||||
VideoCodecStats::Stream stream = stats->Aggregate(frames);
|
||||
EXPECT_GE(stream.psnr.y.GetAverage(),
|
||||
test_params_.test_expectations.min_apsnr_y);
|
||||
|
||||
stats->LogMetrics(
|
||||
GetGlobalMetricsLogger(), stream,
|
||||
std::string(
|
||||
::testing::UnitTest::GetInstance()->current_test_info()->name()) +
|
||||
"_" + fs->second.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user