diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn index 1bb0675a35..02ef00d4ff 100644 --- a/modules/video_coding/BUILD.gn +++ b/modules/video_coding/BUILD.gn @@ -556,6 +556,7 @@ if (rtc_include_tests) { "../../rtc_base:rtc_task_queue", "../../rtc_base:sequenced_task_checker", "../../system_wrappers", + "../../test:test_common", "../../test:test_support", "../../test:video_test_common", "../../test:video_test_support", @@ -569,6 +570,7 @@ if (rtc_include_tests) { "../../resources/foreman_160x120.yuv", "../../resources/foreman_176x144.yuv", "../../resources/foreman_320x240.yuv", + "../../resources/FourPeople_1280x720_30.yuv", ] if (is_ios || is_mac) { @@ -622,7 +624,6 @@ if (rtc_include_tests) { "../../rtc_base:rtc_base_tests_utils", "../../system_wrappers", "../../test:field_trial", - "../../test:test_common", "../../test:test_support", "../../test:video_test_common", "../../test:video_test_support", diff --git a/modules/video_coding/codecs/test/plot_webrtc_test_logs.py b/modules/video_coding/codecs/test/plot_webrtc_test_logs.py index 97ac56145e..22e415778d 100755 --- a/modules/video_coding/codecs/test/plot_webrtc_test_logs.py +++ b/modules/video_coding/codecs/test/plot_webrtc_test_logs.py @@ -23,32 +23,33 @@ EVENT_START = \ EVENT_END = 'OK ] CodecSettings/VideoProcessorIntegrationTestParameterized.' # Metrics to plot, tuple: (name to parse in file, label to use when plotting). -BITRATE = ('Target bitrate', 'target bitrate (kbps)') -FRAMERATE = ('Target framerate', 'fps') -WIDTH = ('Width', 'width') -HEIGHT = ('Height', 'height') -FILENAME = ('Filename', 'clip') -CODEC_TYPE = ('Codec type', 'Codec') -ENCODER_IMPLEMENTATION_NAME = ('Encoder implementation name', 'enc name') -DECODER_IMPLEMENTATION_NAME = ('Decoder implementation name', 'dec name') -CODEC_IMPLEMENTATION_NAME = ('Codec implementation name', 'codec name') -CORES = ('# CPU cores used', 'CPU cores used') -DENOISING = ('Denoising', 'denoising') -RESILIENCE = ('Resilience', 'resilience') -ERROR_CONCEALMENT = ('Error concealment', 'error concealment') -QP = ('Avg QP', 'QP avg') -CPU_USAGE = ('CPU usage %', 'CPU usage (%)') -PSNR = ('Avg PSNR', 'PSNR (dB)') -SSIM = ('Avg SSIM', 'SSIM') -ENC_BITRATE = ('Encoded bitrate', 'encoded bitrate (kbps)') -NUM_FRAMES = ('# input frames', 'num frames') -NUM_DROPPED_FRAMES = ('# dropped frames', 'num dropped frames') -TIME_TO_TARGET = ('Time to reach target bitrate', +WIDTH = ('width', 'width') +HEIGHT = ('height', 'height') +FILENAME = ('filename', 'clip') +CODEC_TYPE = ('codec_type', 'Codec') +ENCODER_IMPLEMENTATION_NAME = ('enc_impl_name', 'enc name') +DECODER_IMPLEMENTATION_NAME = ('dec_impl_name', 'dec name') +CODEC_IMPLEMENTATION_NAME = ('codec_impl_name', 'codec name') +CORES = ('num_cores', 'CPU cores used') +DENOISING = ('denoising', 'denoising') +RESILIENCE = ('resilience', 'resilience') +ERROR_CONCEALMENT = ('error_concealment', 'error concealment') +CPU_USAGE = ('cpu_usage_percent', 'CPU usage (%)') +BITRATE = ('target_bitrate_kbps', 'target bitrate (kbps)') +FRAMERATE = ('input_framerate_fps', 'fps') +QP = ('avg_qp', 'QP avg') +PSNR = ('avg_psnr', 'PSNR (dB)') +SSIM = ('avg_ssim', 'SSIM') +ENC_BITRATE = ('bitrate_kbps', 'encoded bitrate (kbps)') +NUM_FRAMES = ('num_input_frames', 'num frames') +NUM_DROPPED_FRAMES = ('num_dropped_frames', 'num dropped frames') +TIME_TO_TARGET = ('time_to_reach_target_bitrate_sec', 'time to reach target rate (sec)') -ENCODE_TIME = ('Frame encoding time', 'encode time (us)') -DECODE_TIME = ('Frame decoding time', 'decode time (us)') -AVG_KEY_FRAME_SIZE = ('Avg key frame size', 'avg key frame size (bytes)') -AVG_DELTA_FRAME_SIZE = ('Avg delta frame size', 'avg delta frame size (bytes)') +ENCODE_SPEED_FPS = ('enc_speed_fps', 'encode speed (fps)') +DECODE_SPEED_FPS = ('dec_speed_fps', 'decode speed (fps)') +AVG_KEY_FRAME_SIZE = ('avg_key_frame_size_bytes', 'avg key frame size (bytes)') +AVG_DELTA_FRAME_SIZE = ('avg_delta_frame_size_bytes', + 'avg delta frame size (bytes)') # Settings. SETTINGS = [ @@ -83,8 +84,8 @@ RESULTS = [ ENC_BITRATE, NUM_DROPPED_FRAMES, TIME_TO_TARGET, - ENCODE_TIME, - DECODE_TIME, + ENCODE_SPEED_FPS, + DECODE_SPEED_FPS, QP, CPU_USAGE, AVG_KEY_FRAME_SIZE, diff --git a/modules/video_coding/codecs/test/stats.cc b/modules/video_coding/codecs/test/stats.cc index 009f74a580..ca6f2149cd 100644 --- a/modules/video_coding/codecs/test/stats.cc +++ b/modules/video_coding/codecs/test/stats.cc @@ -9,51 +9,378 @@ */ #include "modules/video_coding/codecs/test/stats.h" + +#include +#include +#include + +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "rtc_base/checks.h" +#include "test/statistics.h" namespace webrtc { namespace test { -std::string FrameStatistic::ToString() const { +namespace { +const int kMaxBitrateMismatchPercent = 20; +} + +std::string FrameStatistics::ToString() const { std::stringstream ss; - ss << "frame " << frame_number; - ss << " " << decoded_width << "x" << decoded_height; - ss << " sl " << simulcast_svc_idx; - ss << " tl " << temporal_layer_idx; - ss << " type " << frame_type; - ss << " length " << encoded_frame_size_bytes; + ss << "frame_number " << frame_number; + ss << " decoded_width " << decoded_width; + ss << " decoded_height " << decoded_height; + ss << " simulcast_svc_idx " << simulcast_svc_idx; + ss << " temporal_layer_idx " << temporal_layer_idx; + ss << " frame_type " << frame_type; + ss << " encoded_frame_size_bytes " << encoded_frame_size_bytes; ss << " qp " << qp; ss << " psnr " << psnr; ss << " ssim " << ssim; - ss << " enc_time_us " << encode_time_us; - ss << " dec_time_us " << decode_time_us; - ss << " rtp_ts " << rtp_timestamp; - ss << " bitrate_kbps " << target_bitrate_kbps; + ss << " encode_time_us " << encode_time_us; + ss << " decode_time_us " << decode_time_us; + ss << " rtp_timestamp " << rtp_timestamp; + ss << " target_bitrate_kbps " << target_bitrate_kbps; return ss.str(); } -FrameStatistic* Stats::AddFrame(size_t timestamp) { - RTC_DCHECK(rtp_timestamp_to_frame_num_.find(timestamp) == - rtp_timestamp_to_frame_num_.end()); - const size_t frame_number = stats_.size(); - rtp_timestamp_to_frame_num_[timestamp] = frame_number; - stats_.emplace_back(frame_number, timestamp); - return &stats_.back(); +std::string VideoStatistics::ToString(std::string prefix) const { + std::stringstream ss; + ss << "\n" << prefix << "target_bitrate_kbps: " << target_bitrate_kbps; + ss << "\n" << prefix << "input_framerate_fps: " << input_framerate_fps; + ss << "\n" << prefix << "spatial_layer_idx: " << spatial_layer_idx; + ss << "\n" << prefix << "temporal_layer_idx: " << temporal_layer_idx; + ss << "\n" << prefix << "width: " << width; + ss << "\n" << prefix << "height: " << height; + ss << "\n" << prefix << "length_bytes: " << length_bytes; + ss << "\n" << prefix << "bitrate_kbps: " << bitrate_kbps; + ss << "\n" << prefix << "framerate_fps: " << framerate_fps; + ss << "\n" << prefix << "enc_speed_fps: " << enc_speed_fps; + ss << "\n" << prefix << "dec_speed_fps: " << dec_speed_fps; + ss << "\n" << prefix << "avg_delay_sec: " << avg_delay_sec; + ss << "\n" + << prefix << "max_key_frame_delay_sec: " << max_key_frame_delay_sec; + ss << "\n" + << prefix << "max_delta_frame_delay_sec: " << max_delta_frame_delay_sec; + ss << "\n" + << prefix << "time_to_reach_target_bitrate_sec: " + << time_to_reach_target_bitrate_sec; + ss << "\n" + << prefix << "avg_key_frame_size_bytes: " << avg_key_frame_size_bytes; + ss << "\n" + << prefix << "avg_delta_frame_size_bytes: " << avg_delta_frame_size_bytes; + ss << "\n" << prefix << "avg_qp: " << avg_qp; + ss << "\n" << prefix << "avg_psnr: " << avg_psnr; + ss << "\n" << prefix << "min_psnr: " << min_psnr; + ss << "\n" << prefix << "avg_ssim: " << avg_ssim; + ss << "\n" << prefix << "min_ssim: " << min_ssim; + ss << "\n" << prefix << "num_input_frames: " << num_input_frames; + ss << "\n" << prefix << "num_encoded_frames: " << num_encoded_frames; + ss << "\n" << prefix << "num_decoded_frames: " << num_decoded_frames; + ss << "\n" + << prefix + << "num_dropped_frames: " << num_input_frames - num_encoded_frames; + ss << "\n" << prefix << "num_key_frames: " << num_key_frames; + ss << "\n" << prefix << "num_spatial_resizes: " << num_spatial_resizes; + ss << "\n" << prefix << "max_nalu_size_bytes: " << max_nalu_size_bytes; + return ss.str(); } -FrameStatistic* Stats::GetFrame(size_t frame_number) { - RTC_CHECK_LT(frame_number, stats_.size()); - return &stats_[frame_number]; +FrameStatistics* Stats::AddFrame(size_t timestamp, size_t layer_idx) { + RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) == + rtp_timestamp_to_frame_num_[layer_idx].end()); + const size_t frame_num = layer_idx_to_stats_[layer_idx].size(); + rtp_timestamp_to_frame_num_[layer_idx][timestamp] = frame_num; + layer_idx_to_stats_[layer_idx].emplace_back(frame_num, timestamp); + return &layer_idx_to_stats_[layer_idx].back(); } -FrameStatistic* Stats::GetFrameWithTimestamp(size_t timestamp) { - RTC_DCHECK(rtp_timestamp_to_frame_num_.find(timestamp) != - rtp_timestamp_to_frame_num_.end()); - return GetFrame(rtp_timestamp_to_frame_num_[timestamp]); +FrameStatistics* Stats::GetFrame(size_t frame_num, size_t layer_idx) { + RTC_CHECK_LT(frame_num, layer_idx_to_stats_[layer_idx].size()); + return &layer_idx_to_stats_[layer_idx][frame_num]; } -size_t Stats::size() const { - return stats_.size(); +FrameStatistics* Stats::GetFrameWithTimestamp(size_t timestamp, + size_t layer_idx) { + RTC_DCHECK(rtp_timestamp_to_frame_num_[layer_idx].find(timestamp) != + rtp_timestamp_to_frame_num_[layer_idx].end()); + + return GetFrame(rtp_timestamp_to_frame_num_[layer_idx][timestamp], layer_idx); +} + +std::vector Stats::SliceAndCalcLayerVideoStatistic( + size_t first_frame_num, + size_t last_frame_num) { + std::vector layer_stats; + + size_t num_spatial_layers = 0; + size_t num_temporal_layers = 0; + GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers, + &num_temporal_layers); + RTC_CHECK_GT(num_spatial_layers, 0); + RTC_CHECK_GT(num_temporal_layers, 0); + + for (size_t spatial_layer_idx = 0; spatial_layer_idx < num_spatial_layers; + ++spatial_layer_idx) { + for (size_t temporal_layer_idx = 0; + temporal_layer_idx < num_temporal_layers; ++temporal_layer_idx) { + VideoStatistics layer_stat = SliceAndCalcVideoStatistic( + first_frame_num, last_frame_num, spatial_layer_idx, + temporal_layer_idx, false); + layer_stats.push_back(layer_stat); + } + } + + return layer_stats; +} + +VideoStatistics Stats::SliceAndCalcAggregatedVideoStatistic( + size_t first_frame_num, + size_t last_frame_num) { + size_t num_spatial_layers = 0; + size_t num_temporal_layers = 0; + GetNumberOfEncodedLayers(first_frame_num, last_frame_num, &num_spatial_layers, + &num_temporal_layers); + RTC_CHECK_GT(num_spatial_layers, 0); + RTC_CHECK_GT(num_temporal_layers, 0); + + return SliceAndCalcVideoStatistic(first_frame_num, last_frame_num, + num_spatial_layers - 1, + num_temporal_layers - 1, true); +} + +size_t Stats::Size(size_t spatial_layer_idx) { + return layer_idx_to_stats_[spatial_layer_idx].size(); +} + +void Stats::Clear() { + layer_idx_to_stats_.clear(); + rtp_timestamp_to_frame_num_.clear(); +} + +FrameStatistics Stats::AggregateFrameStatistic( + size_t frame_num, + size_t spatial_layer_idx, + bool aggregate_independent_layers) { + FrameStatistics frame_stat = *GetFrame(frame_num, spatial_layer_idx); + bool inter_layer_predicted = frame_stat.inter_layer_predicted; + while (spatial_layer_idx-- > 0) { + if (aggregate_independent_layers || inter_layer_predicted) { + FrameStatistics* base_frame_stat = GetFrame(frame_num, spatial_layer_idx); + frame_stat.encoded_frame_size_bytes += + base_frame_stat->encoded_frame_size_bytes; + frame_stat.target_bitrate_kbps += base_frame_stat->target_bitrate_kbps; + + inter_layer_predicted = base_frame_stat->inter_layer_predicted; + } + } + + return frame_stat; +} + +size_t Stats::CalcLayerTargetBitrateKbps(size_t first_frame_num, + size_t last_frame_num, + size_t spatial_layer_idx, + size_t temporal_layer_idx, + bool aggregate_independent_layers) { + std::vector target_bitrate_kbps(temporal_layer_idx + 1, 0); + + // We don't know if superframe includes all required spatial layers because + // of possible frame drops. Run through all frames required range, track + // maximum target bitrate per temporal layers and return sum of these. + // Assume target bitrate in frame statistic is specified per temporal layer. + for (size_t frame_num = first_frame_num; frame_num <= last_frame_num; + ++frame_num) { + FrameStatistics superframe = AggregateFrameStatistic( + frame_num, spatial_layer_idx, aggregate_independent_layers); + + if (superframe.temporal_layer_idx <= temporal_layer_idx) { + target_bitrate_kbps[superframe.temporal_layer_idx] = + std::max(target_bitrate_kbps[superframe.temporal_layer_idx], + superframe.target_bitrate_kbps); + } + } + + return std::accumulate(target_bitrate_kbps.begin(), target_bitrate_kbps.end(), + std::size_t {0}); +} + +VideoStatistics Stats::SliceAndCalcVideoStatistic( + size_t first_frame_num, + size_t last_frame_num, + size_t spatial_layer_idx, + size_t temporal_layer_idx, + bool aggregate_independent_layers) { + VideoStatistics video_stat; + + float buffer_level_bits = 0.0f; + Statistics buffer_level_sec; + + Statistics key_frame_size_bytes; + Statistics delta_frame_size_bytes; + + Statistics frame_encoding_time_us; + Statistics frame_decoding_time_us; + + Statistics psnr; + Statistics ssim; + Statistics qp; + + size_t rtp_timestamp_first_frame = 0; + size_t rtp_timestamp_prev_frame = 0; + + FrameStatistics last_successfully_decoded_frame(0, 0); + + const size_t target_bitrate_kbps = CalcLayerTargetBitrateKbps( + first_frame_num, last_frame_num, spatial_layer_idx, temporal_layer_idx, + aggregate_independent_layers); + + for (size_t frame_num = first_frame_num; frame_num <= last_frame_num; + ++frame_num) { + FrameStatistics frame_stat = AggregateFrameStatistic( + frame_num, spatial_layer_idx, aggregate_independent_layers); + + float time_since_first_frame_sec = + 1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_first_frame) / + kVideoPayloadTypeFrequency; + float time_since_prev_frame_sec = + 1.0f * (frame_stat.rtp_timestamp - rtp_timestamp_prev_frame) / + kVideoPayloadTypeFrequency; + + if (frame_stat.temporal_layer_idx > temporal_layer_idx) { + continue; + } + + buffer_level_bits -= time_since_prev_frame_sec * 1000 * target_bitrate_kbps; + buffer_level_bits = std::max(0.0f, buffer_level_bits); + buffer_level_bits += 8.0 * frame_stat.encoded_frame_size_bytes; + buffer_level_sec.AddSample(buffer_level_bits / + (1000 * target_bitrate_kbps)); + + video_stat.length_bytes += frame_stat.encoded_frame_size_bytes; + + if (frame_stat.encoding_successful) { + ++video_stat.num_encoded_frames; + + if (frame_stat.frame_type == kVideoFrameKey) { + key_frame_size_bytes.AddSample(frame_stat.encoded_frame_size_bytes); + ++video_stat.num_key_frames; + } else { + delta_frame_size_bytes.AddSample(frame_stat.encoded_frame_size_bytes); + } + + frame_encoding_time_us.AddSample(frame_stat.encode_time_us); + qp.AddSample(frame_stat.qp); + + video_stat.max_nalu_size_bytes = std::max(video_stat.max_nalu_size_bytes, + frame_stat.max_nalu_size_bytes); + } + + if (frame_stat.decoding_successful) { + ++video_stat.num_decoded_frames; + + video_stat.width = frame_stat.decoded_width; + video_stat.height = frame_stat.decoded_height; + + psnr.AddSample(frame_stat.psnr); + ssim.AddSample(frame_stat.ssim); + + if (video_stat.num_decoded_frames > 1) { + if (last_successfully_decoded_frame.decoded_width != + frame_stat.decoded_width || + last_successfully_decoded_frame.decoded_height != + frame_stat.decoded_height) { + ++video_stat.num_spatial_resizes; + } + } + + frame_decoding_time_us.AddSample(frame_stat.decode_time_us); + last_successfully_decoded_frame = frame_stat; + } + + if (video_stat.num_input_frames > 0) { + if (video_stat.time_to_reach_target_bitrate_sec == 0.0f) { + const float curr_kbps = + 8.0 * video_stat.length_bytes / 1000 / time_since_first_frame_sec; + const float bitrate_mismatch_percent = + 100 * std::fabs(curr_kbps - target_bitrate_kbps) / + target_bitrate_kbps; + if (bitrate_mismatch_percent < kMaxBitrateMismatchPercent) { + video_stat.time_to_reach_target_bitrate_sec = + time_since_first_frame_sec; + } + } + } + + rtp_timestamp_prev_frame = frame_stat.rtp_timestamp; + if (video_stat.num_input_frames == 0) { + rtp_timestamp_first_frame = frame_stat.rtp_timestamp; + } + + ++video_stat.num_input_frames; + } + + const size_t num_frames = last_frame_num - first_frame_num + 1; + const size_t timestamp_delta = + GetFrame(first_frame_num + 1, spatial_layer_idx)->rtp_timestamp - + GetFrame(first_frame_num, spatial_layer_idx)->rtp_timestamp; + const float input_framerate_fps = + 1.0 * kVideoPayloadTypeFrequency / timestamp_delta; + const float duration_sec = num_frames / input_framerate_fps; + + video_stat.target_bitrate_kbps = target_bitrate_kbps; + video_stat.input_framerate_fps = input_framerate_fps; + + video_stat.spatial_layer_idx = spatial_layer_idx; + video_stat.temporal_layer_idx = temporal_layer_idx; + + video_stat.bitrate_kbps = + static_cast(8 * video_stat.length_bytes / 1000 / duration_sec); + video_stat.framerate_fps = video_stat.num_encoded_frames / duration_sec; + + video_stat.enc_speed_fps = 1000000 / frame_encoding_time_us.Mean(); + video_stat.dec_speed_fps = 1000000 / frame_decoding_time_us.Mean(); + + video_stat.avg_delay_sec = buffer_level_sec.Mean(); + video_stat.max_key_frame_delay_sec = + 8 * key_frame_size_bytes.Max() / 1000 / target_bitrate_kbps; + video_stat.max_delta_frame_delay_sec = + 8 * delta_frame_size_bytes.Max() / 1000 / target_bitrate_kbps; + + video_stat.avg_key_frame_size_bytes = key_frame_size_bytes.Mean(); + video_stat.avg_delta_frame_size_bytes = delta_frame_size_bytes.Mean(); + video_stat.avg_qp = qp.Mean(); + + video_stat.avg_psnr = psnr.Mean(); + video_stat.min_psnr = psnr.Min(); + video_stat.avg_ssim = ssim.Mean(); + video_stat.min_ssim = ssim.Min(); + + return video_stat; +} + +void Stats::GetNumberOfEncodedLayers(size_t first_frame_num, + size_t last_frame_num, + size_t* num_encoded_spatial_layers, + size_t* num_encoded_temporal_layers) { + *num_encoded_spatial_layers = 0; + *num_encoded_temporal_layers = 0; + + const size_t num_spatial_layers = layer_idx_to_stats_.size(); + + for (size_t frame_num = first_frame_num; frame_num <= last_frame_num; + ++frame_num) { + for (size_t spatial_layer_idx = 0; spatial_layer_idx < num_spatial_layers; + ++spatial_layer_idx) { + FrameStatistics* frame_stat = GetFrame(frame_num, spatial_layer_idx); + if (frame_stat->encoding_successful) { + *num_encoded_spatial_layers = std::max( + *num_encoded_spatial_layers, frame_stat->simulcast_svc_idx + 1); + *num_encoded_temporal_layers = std::max( + *num_encoded_temporal_layers, frame_stat->temporal_layer_idx + 1); + } + } + } } } // namespace test diff --git a/modules/video_coding/codecs/test/stats.h b/modules/video_coding/codecs/test/stats.h index 2133a7a796..2330526448 100644 --- a/modules/video_coding/codecs/test/stats.h +++ b/modules/video_coding/codecs/test/stats.h @@ -21,8 +21,8 @@ namespace webrtc { namespace test { // Statistics for one processed frame. -struct FrameStatistic { - FrameStatistic(size_t frame_number, size_t rtp_timestamp) +struct FrameStatistics { + FrameStatistics(size_t frame_number, size_t rtp_timestamp) : frame_number(frame_number), rtp_timestamp(rtp_timestamp) {} std::string ToString() const; @@ -42,6 +42,7 @@ struct FrameStatistic { // Layering. size_t temporal_layer_idx = 0; size_t simulcast_svc_idx = 0; + bool inter_layer_predicted = false; // H264 specific. size_t max_nalu_size_bytes = 0; @@ -62,24 +63,96 @@ struct FrameStatistic { float ssim = 0.0; }; +struct VideoStatistics { + std::string ToString(std::string prefix) const; + + size_t target_bitrate_kbps = 0; + float input_framerate_fps = 0.0f; + + size_t spatial_layer_idx = 0; + size_t temporal_layer_idx = 0; + + size_t width = 0; + size_t height = 0; + + size_t length_bytes = 0; + size_t bitrate_kbps = 0; + float framerate_fps = 0; + + float enc_speed_fps = 0.0f; + float dec_speed_fps = 0.0f; + + float avg_delay_sec = 0.0f; + float max_key_frame_delay_sec = 0.0f; + float max_delta_frame_delay_sec = 0.0f; + float time_to_reach_target_bitrate_sec = 0.0f; + + float avg_key_frame_size_bytes = 0.0f; + float avg_delta_frame_size_bytes = 0.0f; + float avg_qp = 0.0f; + + float avg_psnr = 0.0f; + float min_psnr = 0.0f; + float avg_ssim = 0.0f; + float min_ssim = 0.0f; + + size_t num_input_frames = 0; + size_t num_encoded_frames = 0; + size_t num_decoded_frames = 0; + size_t num_key_frames = 0; + size_t num_spatial_resizes = 0; + size_t max_nalu_size_bytes = 0; +}; + // Statistics for a sequence of processed frames. This class is not thread safe. class Stats { public: Stats() = default; ~Stats() = default; - // Creates a FrameStatistic for the next frame to be processed. - FrameStatistic* AddFrame(size_t timestamp); + // Creates a FrameStatistics for the next frame to be processed. + FrameStatistics* AddFrame(size_t timestamp, size_t spatial_layer_idx); - // Returns the FrameStatistic corresponding to |frame_number| or |timestamp|. - FrameStatistic* GetFrame(size_t frame_number); - FrameStatistic* GetFrameWithTimestamp(size_t timestamp); + // Returns the FrameStatistics corresponding to |frame_number| or |timestamp|. + FrameStatistics* GetFrame(size_t frame_number, size_t spatial_layer_idx); + FrameStatistics* GetFrameWithTimestamp(size_t timestamp, + size_t spatial_layer_idx); - size_t size() const; + std::vector SliceAndCalcLayerVideoStatistic( + size_t first_frame_num, + size_t last_frame_num); + + VideoStatistics SliceAndCalcAggregatedVideoStatistic(size_t first_frame_num, + size_t last_frame_num); + + size_t Size(size_t spatial_layer_idx); + + void Clear(); private: - std::vector stats_; - std::map rtp_timestamp_to_frame_num_; + FrameStatistics AggregateFrameStatistic(size_t frame_num, + size_t spatial_layer_idx, + bool aggregate_independent_layers); + + size_t CalcLayerTargetBitrateKbps(size_t first_frame_num, + size_t last_frame_num, + size_t spatial_layer_idx, + size_t temporal_layer_idx, + bool aggregate_independent_layers); + + VideoStatistics SliceAndCalcVideoStatistic(size_t first_frame_num, + size_t last_frame_num, + size_t spatial_layer_idx, + size_t temporal_layer_idx, + bool aggregate_independent_layers); + + void GetNumberOfEncodedLayers(size_t first_frame_num, + size_t last_frame_num, + size_t* num_encoded_spatial_layers, + size_t* num_encoded_temporal_layers); + + std::map> layer_idx_to_stats_; + std::map> rtp_timestamp_to_frame_num_; }; } // namespace test diff --git a/modules/video_coding/codecs/test/stats_unittest.cc b/modules/video_coding/codecs/test/stats_unittest.cc index 2e54a1e872..3ef46a6a28 100644 --- a/modules/video_coding/codecs/test/stats_unittest.cc +++ b/modules/video_coding/codecs/test/stats_unittest.cc @@ -20,16 +20,16 @@ const size_t kTimestamp = 12345; TEST(StatsTest, AddFrame) { Stats stats; - FrameStatistic* frame_stat = stats.AddFrame(kTimestamp); + FrameStatistics* frame_stat = stats.AddFrame(kTimestamp, 0); EXPECT_EQ(0ull, frame_stat->frame_number); EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp); - EXPECT_EQ(1u, stats.size()); + EXPECT_EQ(1u, stats.Size(0)); } TEST(StatsTest, GetFrame) { Stats stats; - stats.AddFrame(kTimestamp); - FrameStatistic* frame_stat = stats.GetFrame(0u); + stats.AddFrame(kTimestamp, 0); + FrameStatistics* frame_stat = stats.GetFrame(0u, 0); EXPECT_EQ(0u, frame_stat->frame_number); EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp); } @@ -38,17 +38,28 @@ TEST(StatsTest, AddFrames) { Stats stats; const size_t kNumFrames = 1000; for (size_t i = 0; i < kNumFrames; ++i) { - FrameStatistic* frame_stat = stats.AddFrame(kTimestamp + i); + FrameStatistics* frame_stat = stats.AddFrame(kTimestamp + i, 0); EXPECT_EQ(i, frame_stat->frame_number); EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp); } - EXPECT_EQ(kNumFrames, stats.size()); + EXPECT_EQ(kNumFrames, stats.Size(0)); // Get frame. size_t i = 22; - FrameStatistic* frame_stat = stats.GetFrameWithTimestamp(kTimestamp + i); + FrameStatistics* frame_stat = stats.GetFrameWithTimestamp(kTimestamp + i, 0); EXPECT_EQ(i, frame_stat->frame_number); EXPECT_EQ(kTimestamp + i, frame_stat->rtp_timestamp); } +TEST(StatsTest, AddFrameLayering) { + Stats stats; + for (size_t i = 0; i < 3; ++i) { + stats.AddFrame(kTimestamp + i, i); + FrameStatistics* frame_stat = stats.GetFrame(0u, i); + EXPECT_EQ(0u, frame_stat->frame_number); + EXPECT_EQ(kTimestamp, frame_stat->rtp_timestamp - i); + EXPECT_EQ(1u, stats.Size(i)); + } +} + } // namespace test } // namespace webrtc diff --git a/modules/video_coding/codecs/test/test_config.cc b/modules/video_coding/codecs/test/test_config.cc index e85f9300e2..7d2582cf14 100644 --- a/modules/video_coding/codecs/test/test_config.cc +++ b/modules/video_coding/codecs/test/test_config.cc @@ -31,35 +31,33 @@ std::string CodecSpecificToString(const webrtc::VideoCodec& codec) { std::stringstream ss; switch (codec.codecType) { case kVideoCodecVP8: - ss << "\n Complexity : " << codec.VP8().complexity; - ss << "\n Resilience : " << codec.VP8().resilience; - ss << "\n # temporal layers : " + ss << "\ncomplexity: " << codec.VP8().complexity; + ss << "\nresilience: " << codec.VP8().resilience; + ss << "\nnum_temporal_layers: " << static_cast(codec.VP8().numberOfTemporalLayers); - ss << "\n Denoising : " << codec.VP8().denoisingOn; - ss << "\n Automatic resize : " << codec.VP8().automaticResizeOn; - ss << "\n Frame dropping : " << codec.VP8().frameDroppingOn; - ss << "\n Key frame interval : " << codec.VP8().keyFrameInterval; + ss << "\ndenoising: " << codec.VP8().denoisingOn; + ss << "\nautomatic_resize: " << codec.VP8().automaticResizeOn; + ss << "\nframe_dropping: " << codec.VP8().frameDroppingOn; + ss << "\nkey_frame_interval: " << codec.VP8().keyFrameInterval; break; case kVideoCodecVP9: - ss << "\n Complexity : " << codec.VP9().complexity; - ss << "\n Resilience : " << codec.VP9().resilienceOn; - ss << "\n # temporal layers : " + ss << "\ncomplexity: " << codec.VP9().complexity; + ss << "\nresilience: " << codec.VP9().resilienceOn; + ss << "\nnum_temporal_layers: " << static_cast(codec.VP9().numberOfTemporalLayers); - ss << "\n # spatial layers : " + ss << "\nnum_spatial_layers: " << static_cast(codec.VP9().numberOfSpatialLayers); - ss << "\n Denoising : " << codec.VP9().denoisingOn; - ss << "\n Frame dropping : " << codec.VP9().frameDroppingOn; - ss << "\n Key frame interval : " << codec.VP9().keyFrameInterval; - ss << "\n Adaptive QP mode : " << codec.VP9().adaptiveQpMode; - ss << "\n Automatic resize : " << codec.VP9().automaticResizeOn; - ss << "\n # spatial layers : " - << static_cast(codec.VP9().numberOfSpatialLayers); - ss << "\n Flexible mode : " << codec.VP9().flexibleMode; + ss << "\ndenoising: " << codec.VP9().denoisingOn; + ss << "\nframe_dropping: " << codec.VP9().frameDroppingOn; + ss << "\nkey_frame_interval: " << codec.VP9().keyFrameInterval; + ss << "\nadaptive_qp_mode: " << codec.VP9().adaptiveQpMode; + ss << "\nautomatic_resize: " << codec.VP9().automaticResizeOn; + ss << "\nflexible_mode: " << codec.VP9().flexibleMode; break; case kVideoCodecH264: - ss << "\n Frame dropping : " << codec.H264().frameDroppingOn; - ss << "\n Key frame interval : " << codec.H264().keyFrameInterval; - ss << "\n Profile : " << codec.H264().profile; + ss << "\nframe_dropping: " << codec.H264().frameDroppingOn; + ss << "\nkey_frame_interval: " << codec.H264().keyFrameInterval; + ss << "\nprofile: " << codec.H264().profile; break; default: break; @@ -197,20 +195,20 @@ std::vector TestConfig::FrameTypeForFrame(size_t frame_idx) const { std::string TestConfig::ToString() const { std::string codec_type = CodecTypeToPayloadString(codec_settings.codecType); std::stringstream ss; - ss << "\n Filename : " << filename; - ss << "\n # CPU cores used : " << NumberOfCores(); - ss << "\n General:"; - ss << "\n Codec type : " << codec_type; - ss << "\n Start bitrate : " << codec_settings.startBitrate << " kbps"; - ss << "\n Max bitrate : " << codec_settings.maxBitrate << " kbps"; - ss << "\n Min bitrate : " << codec_settings.minBitrate << " kbps"; - ss << "\n Width : " << codec_settings.width; - ss << "\n Height : " << codec_settings.height; - ss << "\n Max frame rate : " << codec_settings.maxFramerate; - ss << "\n QPmax : " << codec_settings.qpMax; - ss << "\n # simulcast streams : " + ss << "\nfilename: " << filename; + ss << "\nwidth: " << codec_settings.width; + ss << "\nheight: " << codec_settings.height; + ss << "\nnum_frames: " << num_frames; + ss << "\nnum_cores: " << NumberOfCores(); + ss << "\ncodec_type: " << codec_type; + ss << "\nmax_framerate_fps: " << codec_settings.maxFramerate; + ss << "\nstart_bitrate_kbps: " << codec_settings.startBitrate; + ss << "\nmax_bitrate_kbps: " << codec_settings.maxBitrate; + ss << "\nmin_bitrate_kbps: " << codec_settings.minBitrate; + ss << "\nmax_qp: " << codec_settings.qpMax; + ss << "\nnum_simulcast_streams : " << static_cast(codec_settings.numberOfSimulcastStreams); - ss << "\n " << codec_type << " specific: "; + ss << "\n" << codec_type << " specific: "; ss << CodecSpecificToString(codec_settings); return ss.str(); } diff --git a/modules/video_coding/codecs/test/test_config.h b/modules/video_coding/codecs/test/test_config.h index 65ce408fc7..d6aba19d4f 100644 --- a/modules/video_coding/codecs/test/test_config.h +++ b/modules/video_coding/codecs/test/test_config.h @@ -59,7 +59,7 @@ struct TestConfig { std::string filename; // File to process. This must be a video file in the YUV format. - std::string input_filename; + std::string filepath; // Number of frames to process. size_t num_frames = 0; diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc index 27653db55b..e10aa5f7d2 100644 --- a/modules/video_coding/codecs/test/videoprocessor.cc +++ b/modules/video_coding/codecs/test/videoprocessor.cc @@ -100,7 +100,7 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder, VideoDecoderList* decoders, FrameReader* input_frame_reader, const TestConfig& config, - std::vector* stats, + Stats* stats, IvfFileWriterList* encoded_frame_writers, FrameWriterList* decoded_frame_writers) : config_(config), @@ -190,7 +190,7 @@ void VideoProcessor::ProcessFrame() { for (size_t simulcast_svc_idx = 0; simulcast_svc_idx < num_simulcast_or_spatial_layers_; ++simulcast_svc_idx) { - stats_->at(simulcast_svc_idx).AddFrame(rtp_timestamp); + stats_->AddFrame(rtp_timestamp, simulcast_svc_idx); } // For the highest measurement accuracy of the encode time, the start/stop @@ -199,8 +199,8 @@ void VideoProcessor::ProcessFrame() { for (size_t simulcast_svc_idx = 0; simulcast_svc_idx < num_simulcast_or_spatial_layers_; ++simulcast_svc_idx) { - FrameStatistic* frame_stat = - stats_->at(simulcast_svc_idx).GetFrame(frame_number); + FrameStatistics* frame_stat = + stats_->GetFrame(frame_number, simulcast_svc_idx); frame_stat->encode_start_ns = encode_start_ns; } @@ -210,8 +210,8 @@ void VideoProcessor::ProcessFrame() { for (size_t simulcast_svc_idx = 0; simulcast_svc_idx < num_simulcast_or_spatial_layers_; ++simulcast_svc_idx) { - FrameStatistic* frame_stat = - stats_->at(simulcast_svc_idx).GetFrame(frame_number); + FrameStatistics* frame_stat = + stats_->GetFrame(frame_number, simulcast_svc_idx); frame_stat->encode_return_code = encode_return_code; } @@ -224,8 +224,8 @@ void VideoProcessor::ProcessFrame() { last_encoded_frames_.end()) { EncodedImage& encoded_image = last_encoded_frames_[simulcast_svc_idx]; - FrameStatistic* frame_stat = - stats_->at(simulcast_svc_idx).GetFrame(frame_number); + FrameStatistics* frame_stat = + stats_->GetFrame(frame_number, simulcast_svc_idx); if (encoded_frame_writers_) { RTC_CHECK(encoded_frame_writers_->at(simulcast_svc_idx) @@ -300,9 +300,8 @@ void VideoProcessor::FrameEncoded( encoded_image._encodedWidth * encoded_image._encodedHeight; frame_wxh_to_simulcast_svc_idx_[frame_wxh] = simulcast_svc_idx; - FrameStatistic* frame_stat = - stats_->at(simulcast_svc_idx) - .GetFrameWithTimestamp(encoded_image._timeStamp); + FrameStatistics* frame_stat = stats_->GetFrameWithTimestamp( + encoded_image._timeStamp, simulcast_svc_idx); const size_t frame_number = frame_stat->frame_number; // Reordering is unexpected. Frames of different layers have the same value @@ -324,13 +323,21 @@ void VideoProcessor::FrameEncoded( frame_stat->encode_time_us = GetElapsedTimeMicroseconds(frame_stat->encode_start_ns, encode_stop_ns); - // TODO(ssilkin): Implement bitrate allocation for VP9 SVC. For now set - // target for base layers equal to total target to avoid devision by zero - // at analysis. - frame_stat->target_bitrate_kbps = - bitrate_allocation_.GetSpatialLayerSum( - codec == kVideoCodecVP9 ? 0 : simulcast_svc_idx) / - 1000; + if (codec == kVideoCodecVP9) { + const CodecSpecificInfoVP9& vp9_info = codec_specific.codecSpecific.VP9; + frame_stat->inter_layer_predicted = vp9_info.inter_layer_predicted; + + // TODO(ssilkin): Implement bitrate allocation for VP9 SVC. For now set + // target for base layers equal to total target to avoid devision by zero + // at analysis. + frame_stat->target_bitrate_kbps = bitrate_allocation_.get_sum_kbps(); + } else { + frame_stat->target_bitrate_kbps = + (bitrate_allocation_.GetBitrate(simulcast_svc_idx, temporal_idx) + + 500) / + 1000; + } + frame_stat->encoded_frame_size_bytes = encoded_image._length; frame_stat->frame_type = encoded_image._frameType; frame_stat->temporal_layer_idx = temporal_idx; @@ -365,9 +372,8 @@ void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame) { const size_t simulcast_svc_idx = frame_wxh_to_simulcast_svc_idx_[decoded_frame.size()]; - FrameStatistic* frame_stat = - stats_->at(simulcast_svc_idx) - .GetFrameWithTimestamp(decoded_frame.timestamp()); + FrameStatistics* frame_stat = stats_->GetFrameWithTimestamp( + decoded_frame.timestamp(), simulcast_svc_idx); const size_t frame_number = frame_stat->frame_number; // Reordering is unexpected. Frames of different layers have the same value @@ -379,9 +385,8 @@ void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame) { // a freeze at playback. for (size_t num_dropped_frames = 0; num_dropped_frames < frame_number; ++num_dropped_frames) { - const FrameStatistic* prev_frame_stat = - stats_->at(simulcast_svc_idx) - .GetFrame(frame_number - num_dropped_frames - 1); + const FrameStatistics* prev_frame_stat = stats_->GetFrame( + frame_number - num_dropped_frames - 1, simulcast_svc_idx); if (prev_frame_stat->decoding_successful) { break; } @@ -465,7 +470,7 @@ void VideoProcessor::CopyEncodedImage(const EncodedImage& encoded_image, void VideoProcessor::CalculateFrameQuality(const VideoFrame& ref_frame, const VideoFrame& dec_frame, - FrameStatistic* frame_stat) { + FrameStatistics* frame_stat) { if (ref_frame.width() == dec_frame.width() || ref_frame.height() == dec_frame.height()) { frame_stat->psnr = I420PSNR(&ref_frame, &dec_frame); @@ -487,7 +492,7 @@ void VideoProcessor::CalculateFrameQuality(const VideoFrame& ref_frame, scaled_buffer->MutableDataU(), scaled_buffer->StrideU(), scaled_buffer->MutableDataV(), scaled_buffer->StrideV(), scaled_buffer->width(), scaled_buffer->height(), - libyuv::kFilterBilinear); + libyuv::kFilterBox); frame_stat->psnr = I420PSNR(*scaled_buffer, *dec_frame.video_frame_buffer()->ToI420()); frame_stat->ssim = diff --git a/modules/video_coding/codecs/test/videoprocessor.h b/modules/video_coding/codecs/test/videoprocessor.h index cddd80855f..8ea2ca597c 100644 --- a/modules/video_coding/codecs/test/videoprocessor.h +++ b/modules/video_coding/codecs/test/videoprocessor.h @@ -56,7 +56,7 @@ class VideoProcessor { VideoDecoderList* decoders, FrameReader* input_frame_reader, const TestConfig& config, - std::vector* stats, + Stats* stats, IvfFileWriterList* encoded_frame_writers, FrameWriterList* decoded_frame_writers); ~VideoProcessor(); @@ -179,7 +179,7 @@ class VideoProcessor { void CalculateFrameQuality(const VideoFrame& ref_frame, const VideoFrame& dec_frame, - FrameStatistic* frame_stat); + FrameStatistics* frame_stat); void WriteDecodedFrameToFile(rtc::Buffer* buffer, size_t simulcast_svc_idx); @@ -237,7 +237,7 @@ class VideoProcessor { RTC_GUARDED_BY(sequence_checker_); // Statistics. - std::vector* const stats_; + Stats* const stats_; rtc::SequencedTaskChecker sequence_checker_; diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc index 757a057162..d5411a12b5 100644 --- a/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc +++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc @@ -35,7 +35,6 @@ #include "rtc_base/file.h" #include "rtc_base/ptr_util.h" #include "system_wrappers/include/sleep.h" -#include "test/statistics.h" #include "test/testsupport/fileutils.h" #include "test/testsupport/metrics/video_metrics.h" @@ -44,10 +43,6 @@ namespace test { namespace { -const int kRtpClockRateHz = 90000; - -const int kMaxBitrateMismatchPercent = 20; - bool RunEncodeInRealTime(const TestConfig& config) { if (config.measure_cpu) { return true; @@ -142,7 +137,8 @@ class VideoProcessorIntegrationTest::CpuProcessTime final { } void Print() const { if (config_.measure_cpu) { - printf("CPU usage %%: %f\n", GetUsagePercent() / config_.NumberOfCores()); + printf("cpu_usage_percent: %f\n", + GetUsagePercent() / config_.NumberOfCores()); printf("\n"); } } @@ -245,123 +241,82 @@ void VideoProcessorIntegrationTest::AnalyzeAllFrames( const std::vector* rc_thresholds, const std::vector* quality_thresholds, const BitstreamThresholds* bs_thresholds) { - const bool is_svc = config_.NumberOfSpatialLayers() > 1; - const size_t number_of_simulcast_or_spatial_layers = - std::max(std::size_t{1}, - std::max(config_.NumberOfSpatialLayers(), - static_cast( - config_.codec_settings.numberOfSimulcastStreams))); - const size_t number_of_temporal_layers = config_.NumberOfTemporalLayers(); - printf("Rate control statistics\n==\n"); - for (size_t rate_update_index = 0; rate_update_index < rate_profiles.size(); - ++rate_update_index) { - const size_t first_frame_number = - (rate_update_index == 0) + for (size_t rate_update_idx = 0; rate_update_idx < rate_profiles.size(); + ++rate_update_idx) { + const size_t first_frame_num = + (rate_update_idx == 0) ? 0 - : rate_profiles[rate_update_index - 1].frame_index_rate_update; - const size_t last_frame_number = - rate_profiles[rate_update_index].frame_index_rate_update - 1; - RTC_CHECK(last_frame_number >= first_frame_number); - const size_t number_of_frames = last_frame_number - first_frame_number + 1; - const float input_duration_sec = - 1.0 * number_of_frames / rate_profiles[rate_update_index].input_fps; + : rate_profiles[rate_update_idx - 1].frame_index_rate_update; + const size_t last_frame_num = + rate_profiles[rate_update_idx].frame_index_rate_update - 1; + RTC_CHECK(last_frame_num >= first_frame_num); - std::vector overall_stats = - ExtractLayerStats(number_of_simulcast_or_spatial_layers - 1, - number_of_temporal_layers - 1, first_frame_number, - last_frame_number, true); + std::vector layer_stats = + stats_.SliceAndCalcLayerVideoStatistic(first_frame_num, last_frame_num); + for (const auto& layer_stat : layer_stats) { + printf("%s\n", layer_stat.ToString("recv_").c_str()); + } - printf("Rate update #%zu:\n", rate_update_index); + VideoStatistics send_stat = stats_.SliceAndCalcAggregatedVideoStatistic( + first_frame_num, last_frame_num); + printf("%s\n", send_stat.ToString("send_").c_str()); const RateControlThresholds* rc_threshold = - rc_thresholds ? &(*rc_thresholds)[rate_update_index] : nullptr; + rc_thresholds ? &(*rc_thresholds)[rate_update_idx] : nullptr; const QualityThresholds* quality_threshold = - quality_thresholds ? &(*quality_thresholds)[rate_update_index] - : nullptr; - AnalyzeAndPrintStats( - overall_stats, rate_profiles[rate_update_index].target_kbps, - rate_profiles[rate_update_index].input_fps, input_duration_sec, - rc_threshold, quality_threshold, bs_thresholds); + quality_thresholds ? &(*quality_thresholds)[rate_update_idx] : nullptr; - if (config_.print_frame_level_stats) { - PrintFrameLevelStats(overall_stats); - } - - for (size_t spatial_layer_number = 0; - spatial_layer_number < number_of_simulcast_or_spatial_layers; - ++spatial_layer_number) { - for (size_t temporal_layer_number = 0; - temporal_layer_number < number_of_temporal_layers; - ++temporal_layer_number) { - std::vector layer_stats = - ExtractLayerStats(spatial_layer_number, temporal_layer_number, - first_frame_number, last_frame_number, is_svc); - - const size_t target_bitrate_kbps = layer_stats[0].target_bitrate_kbps; - const float target_framerate_fps = - 1.0 * rate_profiles[rate_update_index].input_fps / - (1 << (number_of_temporal_layers - temporal_layer_number - 1)); - - printf("Spatial %zu temporal %zu:\n", spatial_layer_number, - temporal_layer_number); - AnalyzeAndPrintStats(layer_stats, target_bitrate_kbps, - target_framerate_fps, input_duration_sec, nullptr, - nullptr, nullptr); - - if (config_.print_frame_level_stats) { - PrintFrameLevelStats(layer_stats); - } - } - } + VerifyVideoStatistic(send_stat, rc_threshold, quality_threshold, + bs_thresholds, + rate_profiles[rate_update_idx].target_kbps, + rate_profiles[rate_update_idx].input_fps); } cpu_process_time_->Print(); } -std::vector VideoProcessorIntegrationTest::ExtractLayerStats( - size_t target_spatial_layer_number, - size_t target_temporal_layer_number, - size_t first_frame_number, - size_t last_frame_number, - bool combine_layers_stats) { - size_t target_bitrate_kbps = 0; - std::vector layer_stats; - - for (size_t frame_number = first_frame_number; - frame_number <= last_frame_number; ++frame_number) { - FrameStatistic superframe_stat = - *stats_.at(target_spatial_layer_number).GetFrame(frame_number); - const size_t tl_idx = superframe_stat.temporal_layer_idx; - if (tl_idx <= target_temporal_layer_number) { - if (combine_layers_stats) { - for (size_t spatial_layer_number = 0; - spatial_layer_number < target_spatial_layer_number; - ++spatial_layer_number) { - const FrameStatistic* frame_stat = - stats_.at(spatial_layer_number).GetFrame(frame_number); - superframe_stat.encoded_frame_size_bytes += - frame_stat->encoded_frame_size_bytes; - superframe_stat.encode_time_us = std::max( - superframe_stat.encode_time_us, frame_stat->encode_time_us); - superframe_stat.decode_time_us = std::max( - superframe_stat.decode_time_us, frame_stat->decode_time_us); - } - } - - // Target bitrate of extracted interval is bitrate of the highest - // spatial and temporal layer. - target_bitrate_kbps = - std::max(target_bitrate_kbps, superframe_stat.target_bitrate_kbps); - - layer_stats.push_back(superframe_stat); - } +void VideoProcessorIntegrationTest::VerifyVideoStatistic( + const VideoStatistics& video_stat, + const RateControlThresholds* rc_thresholds, + const QualityThresholds* quality_thresholds, + const BitstreamThresholds* bs_thresholds, + size_t target_bitrate_kbps, + float input_framerate_fps) { + if (rc_thresholds) { + const float bitrate_mismatch_percent = + 100 * std::fabs(1.0f * video_stat.bitrate_kbps - target_bitrate_kbps) / + target_bitrate_kbps; + const float framerate_mismatch_percent = + 100 * std::fabs(video_stat.framerate_fps - input_framerate_fps) / + input_framerate_fps; + EXPECT_LE(bitrate_mismatch_percent, + rc_thresholds->max_avg_bitrate_mismatch_percent); + EXPECT_LE(video_stat.time_to_reach_target_bitrate_sec, + rc_thresholds->max_time_to_reach_target_bitrate_sec); + EXPECT_LE(framerate_mismatch_percent, + rc_thresholds->max_avg_framerate_mismatch_percent); + EXPECT_LE(video_stat.avg_delay_sec, + rc_thresholds->max_avg_buffer_level_sec); + EXPECT_LE(video_stat.max_key_frame_delay_sec, + rc_thresholds->max_max_key_frame_delay_sec); + EXPECT_LE(video_stat.max_delta_frame_delay_sec, + rc_thresholds->max_max_delta_frame_delay_sec); + EXPECT_LE(video_stat.num_spatial_resizes, + rc_thresholds->max_num_spatial_resizes); + EXPECT_LE(video_stat.num_key_frames, rc_thresholds->max_num_key_frames); } - for (auto& frame_stat : layer_stats) { - frame_stat.target_bitrate_kbps = target_bitrate_kbps; + if (quality_thresholds) { + EXPECT_GT(video_stat.avg_psnr, quality_thresholds->min_avg_psnr); + EXPECT_GT(video_stat.min_psnr, quality_thresholds->min_min_psnr); + EXPECT_GT(video_stat.avg_ssim, quality_thresholds->min_avg_ssim); + EXPECT_GT(video_stat.min_ssim, quality_thresholds->min_min_ssim); } - return layer_stats; + if (bs_thresholds) { + EXPECT_LE(video_stat.max_nalu_size_bytes, + bs_thresholds->max_max_nalu_size_bytes); + } } void VideoProcessorIntegrationTest::CreateEncoderAndDecoder() { @@ -443,9 +398,9 @@ void VideoProcessorIntegrationTest::SetUpAndInitObjects( config_.codec_settings.maxFramerate = initial_framerate_fps; // Create file objects for quality analysis. - source_frame_reader_.reset(new YuvFrameReaderImpl( - config_.input_filename, config_.codec_settings.width, - config_.codec_settings.height)); + source_frame_reader_.reset( + new YuvFrameReaderImpl(config_.filepath, config_.codec_settings.width, + config_.codec_settings.height)); EXPECT_TRUE(source_frame_reader_->Init()); const size_t num_simulcast_or_spatial_layers = std::max( @@ -477,7 +432,7 @@ void VideoProcessorIntegrationTest::SetUpAndInitObjects( } } - stats_.resize(num_simulcast_or_spatial_layers); + stats_.Clear(); cpu_process_time_.reset(new CpuProcessTime(config_)); @@ -519,8 +474,6 @@ void VideoProcessorIntegrationTest::ReleaseAndCloseObjects( void VideoProcessorIntegrationTest::PrintSettings( rtc::TaskQueue* task_queue) const { printf("VideoProcessor settings\n==\n"); - printf(" Total # of frames : %d", - source_frame_reader_->NumberOfFrames()); printf("%s\n", config_.ToString().c_str()); printf("VideoProcessorIntegrationTest settings\n==\n"); @@ -533,205 +486,14 @@ void VideoProcessorIntegrationTest::PrintSettings( sync_event.Set(); }); sync_event.Wait(rtc::Event::kForever); - printf(" Encoder implementation name: %s\n", encoder_name.c_str()); - printf(" Decoder implementation name: %s\n", decoder_name.c_str()); + printf("enc_impl_name: %s\n", encoder_name.c_str()); + printf("dec_impl_name: %s\n", decoder_name.c_str()); if (encoder_name == decoder_name) { - printf(" Codec implementation name : %s_%s\n", config_.CodecName().c_str(), + printf("codec_impl_name: %s_%s\n", config_.CodecName().c_str(), encoder_name.c_str()); } printf("\n"); } -void VideoProcessorIntegrationTest::AnalyzeAndPrintStats( - const std::vector& stats, - const float target_bitrate_kbps, - const float target_framerate_fps, - const float input_duration_sec, - const RateControlThresholds* rc_thresholds, - const QualityThresholds* quality_thresholds, - const BitstreamThresholds* bs_thresholds) { - const size_t num_input_frames = stats.size(); - size_t num_dropped_frames = 0; - size_t num_decoded_frames = 0; - size_t num_spatial_resizes = 0; - size_t num_key_frames = 0; - size_t max_nalu_size_bytes = 0; - - size_t encoded_bytes = 0; - float buffer_level_kbits = 0.0; - float time_to_reach_target_bitrate_sec = -1.0; - - Statistics buffer_level_sec; - Statistics key_frame_size_bytes; - Statistics delta_frame_size_bytes; - - Statistics encoding_time_us; - Statistics decoding_time_us; - Statistics psnr; - Statistics ssim; - - Statistics qp; - - FrameStatistic last_successfully_decoded_frame(0, 0); - for (size_t frame_idx = 0; frame_idx < stats.size(); ++frame_idx) { - const FrameStatistic& frame_stat = stats[frame_idx]; - - const float time_since_first_input_sec = - frame_idx == 0 - ? 0.0 - : 1.0 * (frame_stat.rtp_timestamp - stats[0].rtp_timestamp) / - kRtpClockRateHz; - const float time_since_last_input_sec = - frame_idx == 0 ? 0.0 - : 1.0 * - (frame_stat.rtp_timestamp - - stats[frame_idx - 1].rtp_timestamp) / - kRtpClockRateHz; - - // Testing framework uses constant input framerate. This guarantees even - // sampling, which is important, of buffer level. - buffer_level_kbits -= time_since_last_input_sec * target_bitrate_kbps; - buffer_level_kbits = std::max(0.0f, buffer_level_kbits); - buffer_level_kbits += 8.0 * frame_stat.encoded_frame_size_bytes / 1000; - buffer_level_sec.AddSample(buffer_level_kbits / target_bitrate_kbps); - - encoded_bytes += frame_stat.encoded_frame_size_bytes; - if (frame_stat.encoded_frame_size_bytes == 0) { - ++num_dropped_frames; - } else { - if (frame_stat.frame_type == kVideoFrameKey) { - key_frame_size_bytes.AddSample(frame_stat.encoded_frame_size_bytes); - ++num_key_frames; - } else { - delta_frame_size_bytes.AddSample(frame_stat.encoded_frame_size_bytes); - } - - encoding_time_us.AddSample(frame_stat.encode_time_us); - qp.AddSample(frame_stat.qp); - - max_nalu_size_bytes = - std::max(max_nalu_size_bytes, frame_stat.max_nalu_size_bytes); - } - - if (frame_stat.decoding_successful) { - psnr.AddSample(frame_stat.psnr); - ssim.AddSample(frame_stat.ssim); - if (num_decoded_frames > 0) { - if (last_successfully_decoded_frame.decoded_width != - frame_stat.decoded_width || - last_successfully_decoded_frame.decoded_height != - frame_stat.decoded_height) { - ++num_spatial_resizes; - } - } - decoding_time_us.AddSample(frame_stat.decode_time_us); - last_successfully_decoded_frame = frame_stat; - ++num_decoded_frames; - } - - if (time_to_reach_target_bitrate_sec < 0 && frame_idx > 0) { - const float curr_bitrate_kbps = - (8.0 * encoded_bytes / 1000) / time_since_first_input_sec; - const float bitrate_mismatch_percent = - 100 * std::fabs(curr_bitrate_kbps - target_bitrate_kbps) / - target_bitrate_kbps; - if (bitrate_mismatch_percent < kMaxBitrateMismatchPercent) { - time_to_reach_target_bitrate_sec = time_since_first_input_sec; - } - } - } - - const float encoded_bitrate_kbps = - 8 * encoded_bytes / input_duration_sec / 1000; - const float bitrate_mismatch_percent = - 100 * std::fabs(encoded_bitrate_kbps - target_bitrate_kbps) / - target_bitrate_kbps; - const size_t num_encoded_frames = num_input_frames - num_dropped_frames; - const float encoded_framerate_fps = num_encoded_frames / input_duration_sec; - const float decoded_framerate_fps = num_decoded_frames / input_duration_sec; - const float framerate_mismatch_percent = - 100 * std::fabs(decoded_framerate_fps - target_framerate_fps) / - target_framerate_fps; - const float max_key_frame_delay_sec = - 8 * key_frame_size_bytes.Max() / 1000 / target_bitrate_kbps; - const float max_delta_frame_delay_sec = - 8 * delta_frame_size_bytes.Max() / 1000 / target_bitrate_kbps; - - printf("Frame width : %zu\n", - last_successfully_decoded_frame.decoded_width); - printf("Frame height : %zu\n", - last_successfully_decoded_frame.decoded_height); - printf("Target bitrate : %f kbps\n", target_bitrate_kbps); - printf("Encoded bitrate : %f kbps\n", encoded_bitrate_kbps); - printf("Bitrate mismatch : %f %%\n", bitrate_mismatch_percent); - printf("Time to reach target bitrate : %f sec\n", - time_to_reach_target_bitrate_sec); - printf("Target framerate : %f fps\n", target_framerate_fps); - printf("Encoded framerate : %f fps\n", encoded_framerate_fps); - printf("Decoded framerate : %f fps\n", decoded_framerate_fps); - printf("Frame encoding time : %f us\n", encoding_time_us.Mean()); - printf("Frame decoding time : %f us\n", decoding_time_us.Mean()); - printf("Encoding framerate : %f fps\n", - 1000000 / encoding_time_us.Mean()); - printf("Decoding framerate : %f fps\n", - 1000000 / decoding_time_us.Mean()); - printf("Framerate mismatch percent : %f %%\n", - framerate_mismatch_percent); - printf("Avg buffer level : %f sec\n", buffer_level_sec.Mean()); - printf("Max key frame delay : %f sec\n", max_key_frame_delay_sec); - printf("Max delta frame delay : %f sec\n", - max_delta_frame_delay_sec); - printf("Avg key frame size : %f bytes\n", - key_frame_size_bytes.Mean()); - printf("Avg delta frame size : %f bytes\n", - delta_frame_size_bytes.Mean()); - printf("Avg QP : %f\n", qp.Mean()); - printf("Avg PSNR : %f dB\n", psnr.Mean()); - printf("Min PSNR : %f dB\n", psnr.Min()); - printf("Avg SSIM : %f\n", ssim.Mean()); - printf("Min SSIM : %f\n", ssim.Min()); - printf("# input frames : %zu\n", num_input_frames); - printf("# encoded frames : %zu\n", num_encoded_frames); - printf("# decoded frames : %zu\n", num_decoded_frames); - printf("# dropped frames : %zu\n", num_dropped_frames); - printf("# key frames : %zu\n", num_key_frames); - printf("# encoded bytes : %zu\n", encoded_bytes); - printf("# spatial resizes : %zu\n", num_spatial_resizes); - - if (rc_thresholds) { - EXPECT_LE(bitrate_mismatch_percent, - rc_thresholds->max_avg_bitrate_mismatch_percent); - EXPECT_LE(time_to_reach_target_bitrate_sec, - rc_thresholds->max_time_to_reach_target_bitrate_sec); - EXPECT_LE(framerate_mismatch_percent, - rc_thresholds->max_avg_framerate_mismatch_percent); - EXPECT_LE(buffer_level_sec.Mean(), rc_thresholds->max_avg_buffer_level_sec); - EXPECT_LE(max_key_frame_delay_sec, - rc_thresholds->max_max_key_frame_delay_sec); - EXPECT_LE(max_delta_frame_delay_sec, - rc_thresholds->max_max_delta_frame_delay_sec); - EXPECT_LE(num_spatial_resizes, rc_thresholds->max_num_spatial_resizes); - EXPECT_LE(num_key_frames, rc_thresholds->max_num_key_frames); - } - - if (quality_thresholds) { - EXPECT_GT(psnr.Mean(), quality_thresholds->min_avg_psnr); - EXPECT_GT(psnr.Min(), quality_thresholds->min_min_psnr); - EXPECT_GT(ssim.Mean(), quality_thresholds->min_avg_ssim); - EXPECT_GT(ssim.Min(), quality_thresholds->min_min_ssim); - } - - if (bs_thresholds) { - EXPECT_LE(max_nalu_size_bytes, bs_thresholds->max_max_nalu_size_bytes); - } -} - -void VideoProcessorIntegrationTest::PrintFrameLevelStats( - const std::vector& stats) const { - for (const auto& frame_stat : stats) { - printf("%s\n", frame_stat.ToString().c_str()); - } -} - } // namespace test } // namespace webrtc diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest.h b/modules/video_coding/codecs/test/videoprocessor_integrationtest.h index 7052e05446..3c482f05e1 100644 --- a/modules/video_coding/codecs/test/videoprocessor_integrationtest.h +++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest.h @@ -92,6 +92,8 @@ class VideoProcessorIntegrationTest : public testing::Test { // Config. TestConfig config_; + Stats stats_; + // Can be used by all H.264 tests. const H264KeyframeChecker h264_keyframe_checker_; @@ -114,21 +116,12 @@ class VideoProcessorIntegrationTest : public testing::Test { const std::vector* quality_thresholds, const BitstreamThresholds* bs_thresholds); - std::vector ExtractLayerStats( - size_t target_spatial_layer_number, - size_t target_temporal_layer_number, - size_t first_frame_number, - size_t last_frame_number, - bool combine_layers); - - void AnalyzeAndPrintStats(const std::vector& stats, - float target_bitrate_kbps, - float target_framerate_fps, - float input_duration_sec, + void VerifyVideoStatistic(const VideoStatistics& video_stat, const RateControlThresholds* rc_thresholds, const QualityThresholds* quality_thresholds, - const BitstreamThresholds* bs_thresholds); - void PrintFrameLevelStats(const std::vector& stats) const; + const BitstreamThresholds* bs_thresholds, + size_t target_bitrate_kbps, + float input_framerate_fps); void PrintSettings(rtc::TaskQueue* task_queue) const; @@ -140,7 +133,6 @@ class VideoProcessorIntegrationTest : public testing::Test { std::unique_ptr source_frame_reader_; std::vector> encoded_frame_writers_; std::vector> decoded_frame_writers_; - std::vector stats_; std::unique_ptr processor_; std::unique_ptr cpu_process_time_; }; diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest_libvpx.cc b/modules/video_coding/codecs/test/videoprocessor_integrationtest_libvpx.cc index 50cac8e3d7..8f0d466a60 100644 --- a/modules/video_coding/codecs/test/videoprocessor_integrationtest_libvpx.cc +++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest_libvpx.cc @@ -28,6 +28,7 @@ const int kCifHeight = 288; const int kNumFramesShort = 100; #endif const int kNumFramesLong = 300; +const size_t kBitrateRdPerfKbps[] = {300, 600, 800, 1250, 1750, 2500}; } // namespace class VideoProcessorIntegrationTestLibvpx @@ -35,7 +36,7 @@ class VideoProcessorIntegrationTestLibvpx protected: VideoProcessorIntegrationTestLibvpx() { config_.filename = "foreman_cif"; - config_.input_filename = ResourcePath(config_.filename, "yuv"); + config_.filepath = ResourcePath(config_.filename, "yuv"); config_.num_frames = kNumFramesLong; // Only allow encoder/decoder to use single core, for predictability. config_.use_single_core = true; @@ -44,6 +45,22 @@ class VideoProcessorIntegrationTestLibvpx config_.encoded_frame_checker = &qp_frame_checker_; } + void PrintRdPerf(std::map> rd_stats) { + printf("\n%13s %7s %7s %13s %13s %7s %13s %13s\n", "uplink_kbps", "width", + "height", "downlink_kbps", "framerate_fps", "psnr", "enc_speed_fps", + "dec_speed_fps"); + for (const auto& rd_stat : rd_stats) { + const size_t bitrate_kbps = rd_stat.first; + for (const auto& layer_stat : rd_stat.second) { + printf("%13zu %7zu %7zu %13zu %13.2f %7.2f %13.2f %13.2f\n", + bitrate_kbps, layer_stat.width, layer_stat.height, + layer_stat.bitrate_kbps, layer_stat.framerate_fps, + layer_stat.avg_psnr, layer_stat.enc_speed_fps, + layer_stat.dec_speed_fps); + } + } + } + private: // Verify that the QP parser returns the same QP as the encoder does. const class QpFrameChecker : public TestConfig::EncodedFrameChecker { @@ -303,7 +320,7 @@ TEST_F(VideoProcessorIntegrationTestLibvpx, MAYBE_TemporalLayersVP8) { #endif TEST_F(VideoProcessorIntegrationTestLibvpx, MAYBE_SimulcastVP8) { config_.filename = "ConferenceMotion_1280_720_50"; - config_.input_filename = ResourcePath(config_.filename, "yuv"); + config_.filepath = ResourcePath(config_.filename, "yuv"); config_.num_frames = 100; config_.SetCodecSettings(kVideoCodecVP8, 3, 1, 3, true, true, false, kResilienceOn, 1280, 720); @@ -326,7 +343,7 @@ TEST_F(VideoProcessorIntegrationTestLibvpx, MAYBE_SimulcastVP8) { #endif TEST_F(VideoProcessorIntegrationTestLibvpx, MAYBE_SvcVP9) { config_.filename = "ConferenceMotion_1280_720_50"; - config_.input_filename = ResourcePath(config_.filename, "yuv"); + config_.filepath = ResourcePath(config_.filename, "yuv"); config_.num_frames = 100; config_.SetCodecSettings(kVideoCodecVP9, 1, 3, 3, true, true, false, kResilienceOn, 1280, 720); @@ -341,5 +358,49 @@ TEST_F(VideoProcessorIntegrationTestLibvpx, MAYBE_SvcVP9) { &quality_thresholds, nullptr, nullptr); } +TEST_F(VideoProcessorIntegrationTestLibvpx, DISABLED_SimulcastVP8RdPerf) { + config_.filename = "FourPeople_1280x720_30"; + config_.filepath = ResourcePath(config_.filename, "yuv"); + config_.num_frames = 300; + config_.SetCodecSettings(kVideoCodecVP8, 3, 1, 3, true, true, false, + kResilienceOn, 1280, 720); + + std::map> rd_stats; + for (size_t bitrate_kbps : kBitrateRdPerfKbps) { + std::vector rate_profiles = { + {bitrate_kbps, 30, config_.num_frames}}; + + ProcessFramesAndMaybeVerify(rate_profiles, nullptr, nullptr, nullptr, + nullptr); + + rd_stats[bitrate_kbps] = + stats_.SliceAndCalcLayerVideoStatistic(0, config_.num_frames - 1); + } + + PrintRdPerf(rd_stats); +} + +TEST_F(VideoProcessorIntegrationTestLibvpx, DISABLED_SvcVP9RdPerf) { + config_.filename = "FourPeople_1280x720_30"; + config_.filepath = ResourcePath(config_.filename, "yuv"); + config_.num_frames = 300; + config_.SetCodecSettings(kVideoCodecVP9, 1, 3, 3, true, true, false, + kResilienceOn, 1280, 720); + + std::map> rd_stats; + for (size_t bitrate_kbps : kBitrateRdPerfKbps) { + std::vector rate_profiles = { + {bitrate_kbps, 30, config_.num_frames}}; + + ProcessFramesAndMaybeVerify(rate_profiles, nullptr, nullptr, nullptr, + nullptr); + + rd_stats[bitrate_kbps] = + stats_.SliceAndCalcLayerVideoStatistic(0, config_.num_frames - 1); + } + + PrintRdPerf(rd_stats); +} + } // namespace test } // namespace webrtc diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest_mediacodec.cc b/modules/video_coding/codecs/test/videoprocessor_integrationtest_mediacodec.cc index 3e42e9c10a..df0a7d2e4d 100644 --- a/modules/video_coding/codecs/test/videoprocessor_integrationtest_mediacodec.cc +++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest_mediacodec.cc @@ -28,7 +28,7 @@ class VideoProcessorIntegrationTestMediaCodec protected: VideoProcessorIntegrationTestMediaCodec() { config_.filename = "foreman_cif"; - config_.input_filename = ResourcePath(config_.filename, "yuv"); + config_.filepath = ResourcePath(config_.filename, "yuv"); config_.num_frames = kForemanNumFrames; config_.hw_encoder = true; config_.hw_decoder = true; diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest_openh264.cc b/modules/video_coding/codecs/test/videoprocessor_integrationtest_openh264.cc index e77b2ac3f9..26c8389642 100644 --- a/modules/video_coding/codecs/test/videoprocessor_integrationtest_openh264.cc +++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest_openh264.cc @@ -32,7 +32,7 @@ class VideoProcessorIntegrationTestOpenH264 protected: VideoProcessorIntegrationTestOpenH264() { config_.filename = "foreman_cif"; - config_.input_filename = ResourcePath(config_.filename, "yuv"); + config_.filepath = ResourcePath(config_.filename, "yuv"); config_.num_frames = kNumFrames; // Only allow encoder/decoder to use single core, for predictability. config_.use_single_core = true; diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest_parameterized.cc b/modules/video_coding/codecs/test/videoprocessor_integrationtest_parameterized.cc index d755090c45..84be25c422 100644 --- a/modules/video_coding/codecs/test/videoprocessor_integrationtest_parameterized.cc +++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest_parameterized.cc @@ -59,7 +59,7 @@ class VideoProcessorIntegrationTestParameterized size_t framerate, const std::string& filename) { config_.filename = filename; - config_.input_filename = ResourcePath(filename, "yuv"); + config_.filepath = ResourcePath(filename, "yuv"); config_.use_single_core = kUseSingleCore; config_.measure_cpu = kMeasureCpu; config_.hw_encoder = hw_codec_; @@ -114,5 +114,9 @@ TEST_P(VideoProcessorIntegrationTestParameterized, Process_352x288_30fps) { RunTest(352, 288, 30, "foreman_cif"); } +TEST_P(VideoProcessorIntegrationTestParameterized, Process_1280x720_30fps) { + RunTest(1280, 720, 30, "FourPeople_1280x720_30"); +} + } // namespace test } // namespace webrtc diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest_videotoolbox.cc b/modules/video_coding/codecs/test/videoprocessor_integrationtest_videotoolbox.cc index 58e9233dad..2a861072b9 100644 --- a/modules/video_coding/codecs/test/videoprocessor_integrationtest_videotoolbox.cc +++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest_videotoolbox.cc @@ -27,7 +27,7 @@ class VideoProcessorIntegrationTestVideoToolbox protected: VideoProcessorIntegrationTestVideoToolbox() { config_.filename = "foreman_cif"; - config_.input_filename = ResourcePath(config_.filename, "yuv"); + config_.filepath = ResourcePath(config_.filename, "yuv"); config_.num_frames = kForemanNumFrames; config_.hw_encoder = true; config_.hw_decoder = true; diff --git a/modules/video_coding/codecs/test/videoprocessor_unittest.cc b/modules/video_coding/codecs/test/videoprocessor_unittest.cc index 827ad90e3b..042f2029c5 100644 --- a/modules/video_coding/codecs/test/videoprocessor_unittest.cc +++ b/modules/video_coding/codecs/test/videoprocessor_unittest.cc @@ -54,8 +54,6 @@ class VideoProcessorTest : public testing::Test { config_.SetCodecSettings(kVideoCodecVP8, 1, 1, 1, false, false, false, false, kWidth, kHeight); - stats_.resize(1); - decoder_mock_ = new MockVideoDecoder(); decoders_.push_back(std::unique_ptr(decoder_mock_)); @@ -96,7 +94,7 @@ class VideoProcessorTest : public testing::Test { MockVideoDecoder* decoder_mock_; std::vector> decoders_; MockFrameReader frame_reader_mock_; - std::vector stats_; + Stats stats_; std::unique_ptr video_processor_; };