From cf2df2fb977f822be2ef406f9c52a09c5ebdb561 Mon Sep 17 00:00:00 2001 From: Sebastian Jansson Date: Tue, 2 Apr 2019 11:51:28 +0200 Subject: [PATCH] Bases scenario frame matching on similarity. Refactoring of quality measurement code, basing frame matching on frame thumb likeness. This way the code is robust against variations in timing and frame drops. Bug: webrtc:9510 Change-Id: Ief7266e01f39ca621a529c0da736e5ed1df8560a Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/124401 Commit-Queue: Sebastian Jansson Reviewed-by: Rasmus Brandt Cr-Commit-Position: refs/heads/master@{#27415} --- test/scenario/BUILD.gn | 19 +-- test/scenario/quality_info.h | 22 ++- test/scenario/quality_stats.cc | 195 +++++++++++++++--------- test/scenario/quality_stats.h | 119 +++++++++++---- test/scenario/quality_stats_unittest.cc | 41 +++-- test/scenario/scenario.cc | 7 +- test/scenario/scenario_config.h | 7 +- test/scenario/scenario_unittest.cc | 62 ++++++++ test/scenario/video_stream.cc | 36 ++--- test/scenario/video_stream.h | 14 +- test/scenario/video_stream_unittest.cc | 36 +++-- 11 files changed, 360 insertions(+), 198 deletions(-) diff --git a/test/scenario/BUILD.gn b/test/scenario/BUILD.gn index 64ce904a02..e6f3f465f5 100644 --- a/test/scenario/BUILD.gn +++ b/test/scenario/BUILD.gn @@ -156,6 +156,7 @@ if (rtc_include_tests) { rtc_source_set("scenario_unittests") { testonly = true sources = [ + "quality_stats_unittest.cc", "scenario_unittest.cc", "video_stream_unittest.cc", ] @@ -176,22 +177,4 @@ if (rtc_include_tests) { deps += [ ":scenario_unittest_resources_bundle_data" ] } } - rtc_source_set("scenario_slow_tests") { - testonly = true - sources = [ - "quality_stats_unittest.cc", - ] - deps = [ - ":scenario", - "../../logging:mocks", - "../../rtc_base:checks", - "../../rtc_base:rtc_base_approved", - "../../system_wrappers", - "../../system_wrappers:field_trial", - "../../test:field_trial", - "../../test:test_support", - "//testing/gmock", - "//third_party/abseil-cpp/absl/memory", - ] - } } diff --git a/test/scenario/quality_info.h b/test/scenario/quality_info.h index 9f17af02a2..0cf6eafff6 100644 --- a/test/scenario/quality_info.h +++ b/test/scenario/quality_info.h @@ -11,16 +11,24 @@ #define TEST_SCENARIO_QUALITY_INFO_H_ #include "api/units/timestamp.h" +#include "api/video/video_frame_buffer.h" namespace webrtc { namespace test { -struct VideoFrameQualityInfo { - Timestamp capture_time; - Timestamp received_capture_time; - Timestamp render_time; - int width; - int height; - double psnr; +struct VideoFramePair { + rtc::scoped_refptr captured; + rtc::scoped_refptr decoded; + Timestamp capture_time = Timestamp::MinusInfinity(); + Timestamp render_time = Timestamp::PlusInfinity(); + // A unique identifier for the spatial/temporal layer the decoded frame + // belongs to. Note that this does not reflect the id as defined by the + // underlying layer setup. + int layer_id = 0; + int capture_id = 0; + int decode_id = 0; + // Indicates the repeat count for the decoded frame. Meaning that the same + // decoded frame has matched differend captured frames. + int repeated = 0; }; } // namespace test } // namespace webrtc diff --git a/test/scenario/quality_stats.cc b/test/scenario/quality_stats.cc index 02399896ea..dbfd9ff36d 100644 --- a/test/scenario/quality_stats.cc +++ b/test/scenario/quality_stats.cc @@ -17,96 +17,103 @@ namespace webrtc { namespace test { +namespace { +constexpr int kThumbWidth = 96; +constexpr int kThumbHeight = 96; +} // namespace -VideoQualityAnalyzer::VideoQualityAnalyzer( - std::unique_ptr writer, - std::function frame_info_handler) - : writer_(std::move(writer)), task_queue_("VideoAnalyzer") { - if (writer_) { - PrintHeaders(); - frame_info_handlers_.push_back( - [this](const VideoFrameQualityInfo& info) { PrintFrameInfo(info); }); - } - if (frame_info_handler) - frame_info_handlers_.push_back(frame_info_handler); -} +VideoFrameMatcher::VideoFrameMatcher( + std::vector > + frame_pair_handlers) + : frame_pair_handlers_(frame_pair_handlers), task_queue_("VideoAnalyzer") {} -VideoQualityAnalyzer::~VideoQualityAnalyzer() { +VideoFrameMatcher::~VideoFrameMatcher() { task_queue_.SendTask([] {}); } -void VideoQualityAnalyzer::OnCapturedFrame(const VideoFrame& frame) { - VideoFrame copy = frame; - task_queue_.PostTask([this, copy]() mutable { - captured_frames_.push_back(std::move(copy)); +void VideoFrameMatcher::RegisterLayer(int layer_id) { + task_queue_.PostTask([this, layer_id] { layers_[layer_id] = VideoLayer(); }); +} + +void VideoFrameMatcher::OnCapturedFrame(const VideoFrame& frame, + Timestamp at_time) { + CapturedFrame captured; + captured.id = next_capture_id_++; + captured.capture_time = at_time; + captured.frame = frame.video_frame_buffer(); + captured.thumb = ScaleVideoFrameBuffer(*frame.video_frame_buffer()->ToI420(), + kThumbWidth, kThumbHeight), + task_queue_.PostTask([this, captured]() { + for (auto& layer : layers_) { + CapturedFrame copy = captured; + if (layer.second.last_decode) { + copy.best_score = I420SSE(*captured.thumb->GetI420(), + *layer.second.last_decode->thumb->GetI420()); + copy.best_decode = layer.second.last_decode; + } + layer.second.captured_frames.push_back(std::move(copy)); + } }); } -void VideoQualityAnalyzer::OnDecodedFrame(const VideoFrame& frame) { - VideoFrame decoded = frame; - RTC_CHECK(frame.ntp_time_ms()); - RTC_CHECK(frame.timestamp()); - task_queue_.PostTask([this, decoded] { - // TODO(srte): Add detection and handling of lost frames. - RTC_CHECK(!captured_frames_.empty()); - VideoFrame captured = std::move(captured_frames_.front()); - captured_frames_.pop_front(); - VideoFrameQualityInfo decoded_info = - VideoFrameQualityInfo{Timestamp::us(captured.timestamp_us()), - Timestamp::ms(decoded.timestamp() / 90.0), - Timestamp::ms(decoded.render_time_ms()), - decoded.width(), - decoded.height(), - I420PSNR(&captured, &decoded)}; - for (auto& handler : frame_info_handlers_) - handler(decoded_info); +void VideoFrameMatcher::OnDecodedFrame(const VideoFrame& frame, + Timestamp render_time, + int layer_id) { + rtc::scoped_refptr decoded(new DecodedFrame{}); + decoded->render_time = render_time; + decoded->frame = frame.video_frame_buffer(); + decoded->thumb = ScaleVideoFrameBuffer(*frame.video_frame_buffer()->ToI420(), + kThumbWidth, kThumbHeight); + decoded->render_time = render_time; + + task_queue_.PostTask([this, decoded, layer_id] { + auto& layer = layers_[layer_id]; + decoded->id = layer.next_decoded_id++; + layer.last_decode = decoded; + for (auto& captured : layer.captured_frames) { + double score = + I420SSE(*captured.thumb->GetI420(), *decoded->thumb->GetI420()); + if (score < captured.best_score) { + captured.best_score = score; + captured.best_decode = decoded; + captured.matched = false; + } else { + captured.matched = true; + } + } + while (!layer.captured_frames.empty() && + layer.captured_frames.front().matched) { + HandleMatch(layer.captured_frames.front(), layer_id); + layer.captured_frames.pop_front(); + } }); } -bool VideoQualityAnalyzer::Active() const { - return !frame_info_handlers_.empty(); +bool VideoFrameMatcher::Active() const { + return !frame_pair_handlers_.empty(); } -void VideoQualityAnalyzer::PrintHeaders() { - writer_->Write("capt recv_capt render width height psnr\n"); -} - -void VideoQualityAnalyzer::PrintFrameInfo(const VideoFrameQualityInfo& sample) { - LogWriteFormat(writer_.get(), "%.3f %.3f %.3f %i %i %.3f\n", - sample.capture_time.seconds(), - sample.received_capture_time.seconds(), - sample.render_time.seconds(), sample.width, - sample.height, sample.psnr); -} - -void VideoQualityStats::HandleFrameInfo(VideoFrameQualityInfo sample) { - total++; - if (sample.render_time.IsInfinite()) { - ++lost; - } else { - ++valid; - end_to_end_seconds.AddSample( - (sample.render_time - sample.capture_time).seconds()); - psnr.AddSample(sample.psnr); +void VideoFrameMatcher::Finalize() { + for (auto& layer : layers_) { + while (!layer.second.captured_frames.empty()) { + HandleMatch(layer.second.captured_frames.front(), layer.first); + layer.second.captured_frames.pop_front(); + } } } ForwardingCapturedFrameTap::ForwardingCapturedFrameTap( Clock* clock, - VideoQualityAnalyzer* analyzer, + VideoFrameMatcher* matcher, rtc::VideoSourceInterface* source) - : clock_(clock), analyzer_(analyzer), source_(source) {} + : clock_(clock), matcher_(matcher), source_(source) {} ForwardingCapturedFrameTap::~ForwardingCapturedFrameTap() {} void ForwardingCapturedFrameTap::OnFrame(const VideoFrame& frame) { RTC_CHECK(sink_); - VideoFrame copy = frame; - if (frame.ntp_time_ms() == 0) - copy.set_ntp_time_ms(clock_->CurrentNtpInMilliseconds()); - copy.set_timestamp(copy.ntp_time_ms() * 90); - analyzer_->OnCapturedFrame(copy); - sink_->OnFrame(copy); + matcher_->OnCapturedFrame(frame, Timestamp::ms(clock_->TimeInMilliseconds())); + sink_->OnFrame(frame); } void ForwardingCapturedFrameTap::OnDiscardedFrame() { RTC_CHECK(sink_); @@ -126,11 +133,61 @@ void ForwardingCapturedFrameTap::RemoveSink( sink_ = nullptr; } -DecodedFrameTap::DecodedFrameTap(VideoQualityAnalyzer* analyzer) - : analyzer_(analyzer) {} +DecodedFrameTap::DecodedFrameTap(VideoFrameMatcher* matcher, int layer_id) + : matcher_(matcher), layer_id_(layer_id) { + matcher_->RegisterLayer(layer_id_); +} void DecodedFrameTap::OnFrame(const VideoFrame& frame) { - analyzer_->OnDecodedFrame(frame); + matcher_->OnDecodedFrame(frame, Timestamp::ms(frame.render_time_ms()), + layer_id_); +} + +VideoQualityAnalyzer::VideoQualityAnalyzer( + VideoQualityAnalyzerConfig config, + std::unique_ptr writer) + : config_(config), writer_(std::move(writer)) { + if (writer_) { + PrintHeaders(); + } +} + +VideoQualityAnalyzer::~VideoQualityAnalyzer() = default; + +void VideoQualityAnalyzer::PrintHeaders() { + writer_->Write( + "capture_time render_time capture_width capture_height render_width " + "render_height psnr\n"); +} + +std::function VideoQualityAnalyzer::Handler() { + return [this](VideoFramePair pair) { HandleFramePair(pair); }; +} + +void VideoQualityAnalyzer::HandleFramePair(VideoFramePair sample) { + double psnr = NAN; + RTC_CHECK(sample.captured); + ++stats_.captures_count; + if (!sample.decoded) { + ++stats_.lost_count; + } else { + psnr = I420PSNR(*sample.captured->ToI420(), *sample.decoded->ToI420()); + ++stats_.valid_count; + stats_.end_to_end_seconds.AddSample( + (sample.render_time - sample.capture_time).seconds()); + stats_.psnr.AddSample(psnr); + } + if (writer_) { + LogWriteFormat(writer_.get(), "%.3f %.3f %.3f %i %i %i %i %.3f\n", + sample.capture_time.seconds(), + sample.render_time.seconds(), + sample.captured->width(), sample.captured->height(), + sample.decoded->width(), sample.decoded->height(), psnr); + } +} + +VideoQualityStats VideoQualityAnalyzer::stats() const { + return stats_; } } // namespace test diff --git a/test/scenario/quality_stats.h b/test/scenario/quality_stats.h index 0a0ce12447..26dd6a32ac 100644 --- a/test/scenario/quality_stats.h +++ b/test/scenario/quality_stats.h @@ -11,7 +11,9 @@ #define TEST_SCENARIO_QUALITY_STATS_H_ #include +#include #include +#include #include #include @@ -20,6 +22,7 @@ #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" +#include "rtc_base/ref_counted_object.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/clock.h" @@ -31,46 +34,72 @@ namespace webrtc { namespace test { -class VideoQualityAnalyzer { +class VideoFrameMatcher { public: - VideoQualityAnalyzer( - std::unique_ptr writer, - std::function frame_info_handler); - ~VideoQualityAnalyzer(); - void OnCapturedFrame(const VideoFrame& frame); - void OnDecodedFrame(const VideoFrame& frame); - void Synchronize(); + explicit VideoFrameMatcher( + std::vector> + frame_pair_handlers); + ~VideoFrameMatcher(); + void RegisterLayer(int layer_id); + void OnCapturedFrame(const VideoFrame& frame, Timestamp at_time); + void OnDecodedFrame(const VideoFrame& frame, + Timestamp render_time, + int layer_id); bool Active() const; Clock* clock(); private: - void PrintHeaders(); - void PrintFrameInfo(const VideoFrameQualityInfo& sample); - const std::unique_ptr writer_; - std::vector> - frame_info_handlers_; - std::deque captured_frames_; + struct DecodedFrameBase { + int id; + Timestamp render_time = Timestamp::PlusInfinity(); + rtc::scoped_refptr frame; + rtc::scoped_refptr thumb; + int repeat_count = 0; + }; + using DecodedFrame = rtc::RefCountedObject; + struct CapturedFrame { + int id; + Timestamp capture_time = Timestamp::PlusInfinity(); + rtc::scoped_refptr frame; + rtc::scoped_refptr thumb; + double best_score = INFINITY; + rtc::scoped_refptr best_decode; + bool matched = false; + }; + struct VideoLayer { + int layer_id; + std::deque captured_frames; + rtc::scoped_refptr last_decode; + int next_decoded_id = 1; + }; + void HandleMatch(CapturedFrame& captured, int layer_id) { + VideoFramePair frame_pair; + frame_pair.layer_id = layer_id; + frame_pair.captured = captured.frame; + frame_pair.capture_id = captured.id; + if (captured.best_decode) { + frame_pair.decode_id = captured.best_decode->id; + frame_pair.capture_time = captured.capture_time; + frame_pair.decoded = captured.best_decode->frame; + frame_pair.render_time = captured.best_decode->render_time; + frame_pair.repeated = captured.best_decode->repeat_count++; + } + for (auto& handler : frame_pair_handlers_) + handler(frame_pair); + } + void Finalize(); + int next_capture_id_ = 1; + std::vector> frame_pair_handlers_; + std::map layers_; TaskQueueForTest task_queue_; }; -struct VideoQualityStats { - int total = 0; - int valid = 0; - int lost = 0; - Statistics end_to_end_seconds; - Statistics frame_size; - Statistics psnr; - Statistics ssim; - - void HandleFrameInfo(VideoFrameQualityInfo sample); -}; - class ForwardingCapturedFrameTap : public rtc::VideoSinkInterface, public rtc::VideoSourceInterface { public: ForwardingCapturedFrameTap(Clock* clock, - VideoQualityAnalyzer* analyzer, + VideoFrameMatcher* matcher, rtc::VideoSourceInterface* source); ForwardingCapturedFrameTap(ForwardingCapturedFrameTap&) = delete; ForwardingCapturedFrameTap& operator=(ForwardingCapturedFrameTap&) = delete; @@ -88,7 +117,7 @@ class ForwardingCapturedFrameTap private: Clock* const clock_; - VideoQualityAnalyzer* const analyzer_; + VideoFrameMatcher* const matcher_; rtc::VideoSourceInterface* const source_; VideoSinkInterface* sink_; int discarded_count_ = 0; @@ -96,12 +125,42 @@ class ForwardingCapturedFrameTap class DecodedFrameTap : public rtc::VideoSinkInterface { public: - explicit DecodedFrameTap(VideoQualityAnalyzer* analyzer); + explicit DecodedFrameTap(VideoFrameMatcher* matcher, int layer_id); // VideoSinkInterface interface void OnFrame(const VideoFrame& frame) override; private: - VideoQualityAnalyzer* const analyzer_; + VideoFrameMatcher* const matcher_; + int layer_id_; +}; +struct VideoQualityAnalyzerConfig { + double psnr_coverage = 1; +}; +struct VideoQualityStats { + int captures_count = 0; + int valid_count = 0; + int lost_count = 0; + Statistics end_to_end_seconds; + Statistics frame_size; + Statistics psnr; +}; + +class VideoQualityAnalyzer { + public: + explicit VideoQualityAnalyzer( + VideoQualityAnalyzerConfig config = VideoQualityAnalyzerConfig(), + std::unique_ptr writer = nullptr); + ~VideoQualityAnalyzer(); + void HandleFramePair(VideoFramePair sample); + VideoQualityStats stats() const; + void PrintHeaders(); + void PrintFrameInfo(const VideoFramePair& sample); + std::function Handler(); + + private: + const VideoQualityAnalyzerConfig config_; + VideoQualityStats stats_; + const std::unique_ptr writer_; }; } // namespace test } // namespace webrtc diff --git a/test/scenario/quality_stats_unittest.cc b/test/scenario/quality_stats_unittest.cc index e3806cfedf..273723cd89 100644 --- a/test/scenario/quality_stats_unittest.cc +++ b/test/scenario/quality_stats_unittest.cc @@ -13,50 +13,49 @@ namespace webrtc { namespace test { namespace { -VideoStreamConfig AnalyzerVideoConfig(VideoQualityStats* stats) { +void CreateAnalyzedStream(Scenario* s, + NetworkNodeConfig network_config, + VideoQualityAnalyzer* analyzer) { VideoStreamConfig config; config.encoder.codec = VideoStreamConfig::Encoder::Codec::kVideoCodecVP8; config.encoder.implementation = VideoStreamConfig::Encoder::Implementation::kSoftware; - config.analyzer.frame_quality_handler = [stats](VideoFrameQualityInfo info) { - stats->HandleFrameInfo(info); - }; - return config; + config.hooks.frame_pair_handlers = {analyzer->Handler()}; + auto route = s->CreateRoutes(s->CreateClient("caller", CallClientConfig()), + {s->CreateSimulationNode(network_config)}, + s->CreateClient("callee", CallClientConfig()), + {s->CreateSimulationNode(NetworkNodeConfig())}); + s->CreateVideoStream(route->forward(), config); } } // namespace TEST(ScenarioAnalyzerTest, PsnrIsHighWhenNetworkIsGood) { - VideoQualityStats stats; + VideoQualityAnalyzer analyzer; { Scenario s; NetworkNodeConfig good_network; good_network.simulation.bandwidth = DataRate::kbps(1000); - auto route = s.CreateRoutes(s.CreateClient("caller", CallClientConfig()), - {s.CreateSimulationNode(good_network)}, - s.CreateClient("callee", CallClientConfig()), - {s.CreateSimulationNode(NetworkNodeConfig())}); - s.CreateVideoStream(route->forward(), AnalyzerVideoConfig(&stats)); + CreateAnalyzedStream(&s, good_network, &analyzer); s.RunFor(TimeDelta::seconds(1)); } - EXPECT_GT(stats.psnr.Mean(), 46); + // This is mainty a regression test, the target is based on previous runs and + // might change due to changes in configuration and encoder etc. + EXPECT_GT(analyzer.stats().psnr.Mean(), 45); } TEST(ScenarioAnalyzerTest, PsnrIsLowWhenNetworkIsBad) { - VideoQualityStats stats; + VideoQualityAnalyzer analyzer; { Scenario s; NetworkNodeConfig bad_network; bad_network.simulation.bandwidth = DataRate::kbps(100); bad_network.simulation.loss_rate = 0.02; - auto route = s.CreateRoutes(s.CreateClient("caller", CallClientConfig()), - {s.CreateSimulationNode(bad_network)}, - s.CreateClient("callee", CallClientConfig()), - {s.CreateSimulationNode(NetworkNodeConfig())}); - - s.CreateVideoStream(route->forward(), AnalyzerVideoConfig(&stats)); - s.RunFor(TimeDelta::seconds(2)); + CreateAnalyzedStream(&s, bad_network, &analyzer); + s.RunFor(TimeDelta::seconds(1)); } - EXPECT_LT(stats.psnr.Mean(), 40); + // This is mainty a regression test, the target is based on previous runs and + // might change due to changes in configuration and encoder etc. + EXPECT_LT(analyzer.stats().psnr.Mean(), 43); } } // namespace test } // namespace webrtc diff --git a/test/scenario/scenario.cc b/test/scenario/scenario.cc index 6abc5d5210..54639de59e 100644 --- a/test/scenario/scenario.cc +++ b/test/scenario/scenario.cc @@ -270,11 +270,8 @@ VideoStreamPair* Scenario::CreateVideoStream( VideoStreamPair* Scenario::CreateVideoStream( std::pair clients, VideoStreamConfig config) { - std::unique_ptr quality_logger; - if (config.analyzer.log_to_file) - quality_logger = clients.first->GetLogWriter(".video_quality.txt"); - video_streams_.emplace_back(new VideoStreamPair( - clients.first, clients.second, config, std::move(quality_logger))); + video_streams_.emplace_back( + new VideoStreamPair(clients.first, clients.second, config)); return video_streams_.back().get(); } diff --git a/test/scenario/scenario_config.h b/test/scenario/scenario_config.h index c6ddb23105..bc0fd0150a 100644 --- a/test/scenario/scenario_config.h +++ b/test/scenario/scenario_config.h @@ -181,10 +181,9 @@ struct VideoStreamConfig { enum Type { kFake } type = kFake; std::string sync_group; } render; - struct analyzer { - bool log_to_file = false; - std::function frame_quality_handler; - } analyzer; + struct Hooks { + std::vector> frame_pair_handlers; + } hooks; }; struct AudioStreamConfig { diff --git a/test/scenario/scenario_unittest.cc b/test/scenario/scenario_unittest.cc index 4a4b7f638d..8d43007469 100644 --- a/test/scenario/scenario_unittest.cc +++ b/test/scenario/scenario_unittest.cc @@ -55,5 +55,67 @@ TEST(ScenarioTest, StartsAndStopsWithoutErrors) { EXPECT_TRUE(packet_received); EXPECT_TRUE(bitrate_changed); } +namespace { +void SetupVideoCall(Scenario& s, VideoQualityAnalyzer* analyzer) { + CallClientConfig call_config; + auto* alice = s.CreateClient("alice", call_config); + auto* bob = s.CreateClient("bob", call_config); + NetworkNodeConfig network_config; + network_config.simulation.bandwidth = DataRate::kbps(1000); + network_config.simulation.delay = TimeDelta::ms(50); + auto alice_net = s.CreateSimulationNode(network_config); + auto bob_net = s.CreateSimulationNode(network_config); + auto route = s.CreateRoutes(alice, {alice_net}, bob, {bob_net}); + VideoStreamConfig video; + if (analyzer) { + video.source.capture = VideoStreamConfig::Source::Capture::kVideoFile; + video.source.video_file.name = "foreman_cif"; + video.source.video_file.width = 352; + video.source.video_file.height = 288; + video.source.framerate = 30; + video.encoder.codec = VideoStreamConfig::Encoder::Codec::kVideoCodecVP8; + video.encoder.implementation = + VideoStreamConfig::Encoder::Implementation::kSoftware; + video.hooks.frame_pair_handlers = {analyzer->Handler()}; + } + s.CreateVideoStream(route->forward(), video); + s.CreateAudioStream(route->forward(), AudioStreamConfig()); +} +} // namespace + +TEST(ScenarioTest, SimTimeEncoding) { + VideoQualityAnalyzerConfig analyzer_config; + analyzer_config.psnr_coverage = 0.1; + VideoQualityAnalyzer analyzer(analyzer_config); + { + Scenario s("scenario/encode_sim", false); + SetupVideoCall(s, &analyzer); + s.RunFor(TimeDelta::seconds(60)); + } + // Regression tests based on previous runs. + EXPECT_NEAR(analyzer.stats().psnr.Mean(), 38, 2); + EXPECT_EQ(analyzer.stats().lost_count, 0); +} + +TEST(ScenarioTest, RealTimeEncoding) { + VideoQualityAnalyzerConfig analyzer_config; + analyzer_config.psnr_coverage = 0.1; + VideoQualityAnalyzer analyzer(analyzer_config); + { + Scenario s("scenario/encode_real", true); + SetupVideoCall(s, &analyzer); + s.RunFor(TimeDelta::seconds(10)); + } + // Regression tests based on previous runs. + EXPECT_NEAR(analyzer.stats().psnr.Mean(), 38, 2); + EXPECT_LT(analyzer.stats().lost_count, 2); +} + +TEST(ScenarioTest, SimTimeFakeing) { + Scenario s("scenario/encode_sim", false); + SetupVideoCall(s, nullptr); + s.RunFor(TimeDelta::seconds(10)); +} + } // namespace test } // namespace webrtc diff --git a/test/scenario/video_stream.cc b/test/scenario/video_stream.cc index 9a0b531073..7d1a26c757 100644 --- a/test/scenario/video_stream.cc +++ b/test/scenario/video_stream.cc @@ -336,7 +336,7 @@ VideoReceiveStream::Config CreateVideoReceiveStreamConfig( SendVideoStream::SendVideoStream(CallClient* sender, VideoStreamConfig config, Transport* send_transport, - VideoQualityAnalyzer* analyzer) + VideoFrameMatcher* matcher) : sender_(sender), config_(config) { video_capturer_ = absl::make_unique( sender_->clock_, CreateFrameGenerator(sender_->clock_, config.source), @@ -395,14 +395,10 @@ SendVideoStream::SendVideoStream(CallClient* sender, send_stream_ = sender_->call_->CreateVideoSendStream( std::move(send_config), std::move(encoder_config)); } - std::vector > - frame_info_handlers; - if (config.analyzer.frame_quality_handler) - frame_info_handlers.push_back(config.analyzer.frame_quality_handler); - if (analyzer->Active()) { - frame_tap_.reset(new ForwardingCapturedFrameTap(sender_->clock_, analyzer, - video_capturer_.get())); + if (matcher->Active()) { + frame_tap_ = absl::make_unique( + sender_->clock_, matcher, video_capturer_.get()); send_stream_->SetSource(frame_tap_.get(), config.encoder.degradation_preference); } else { @@ -481,9 +477,8 @@ ReceiveVideoStream::ReceiveVideoStream(CallClient* receiver, SendVideoStream* send_stream, size_t chosen_stream, Transport* feedback_transport, - VideoQualityAnalyzer* analyzer) + VideoFrameMatcher* matcher) : receiver_(receiver), config_(config) { - if (config.encoder.codec == VideoStreamConfig::Encoder::Codec::kVideoCodecGeneric) { decoder_factory_ = absl::make_unique( @@ -501,9 +496,9 @@ ReceiveVideoStream::ReceiveVideoStream(CallClient* receiver, num_streams = config.encoder.layers.spatial; for (size_t i = 0; i < num_streams; ++i) { rtc::VideoSinkInterface* renderer = &fake_renderer_; - if (analyzer->Active() && i == chosen_stream) { - analyzer_ = absl::make_unique(analyzer); - renderer = analyzer_.get(); + if (matcher->Active()) { + render_taps_.emplace_back(absl::make_unique(matcher, i)); + renderer = render_taps_.back().get(); } auto recv_config = CreateVideoReceiveStreamConfig( config, feedback_transport, decoder, renderer, @@ -556,21 +551,18 @@ void ReceiveVideoStream::Stop() { VideoStreamPair::~VideoStreamPair() = default; -VideoStreamPair::VideoStreamPair( - CallClient* sender, - CallClient* receiver, - VideoStreamConfig config, - std::unique_ptr quality_writer) +VideoStreamPair::VideoStreamPair(CallClient* sender, + CallClient* receiver, + VideoStreamConfig config) : config_(config), - analyzer_(std::move(quality_writer), - config.analyzer.frame_quality_handler), - send_stream_(sender, config, sender->transport_.get(), &analyzer_), + matcher_(config.hooks.frame_pair_handlers), + send_stream_(sender, config, sender->transport_.get(), &matcher_), receive_stream_(receiver, config, &send_stream_, /*chosen_stream=*/0, receiver->transport_.get(), - &analyzer_) {} + &matcher_) {} } // namespace test } // namespace webrtc diff --git a/test/scenario/video_stream.h b/test/scenario/video_stream.h index 3bd2498953..1c2bc11e47 100644 --- a/test/scenario/video_stream.h +++ b/test/scenario/video_stream.h @@ -48,7 +48,7 @@ class SendVideoStream { SendVideoStream(CallClient* sender, VideoStreamConfig config, Transport* send_transport, - VideoQualityAnalyzer* analyzer); + VideoFrameMatcher* matcher); rtc::CriticalSection crit_; std::vector ssrcs_; @@ -81,12 +81,13 @@ class ReceiveVideoStream { SendVideoStream* send_stream, size_t chosen_stream, Transport* feedback_transport, - VideoQualityAnalyzer* analyzer); + VideoFrameMatcher* matcher); std::vector receive_streams_; FlexfecReceiveStream* flecfec_stream_ = nullptr; FakeVideoRenderer fake_renderer_; - std::unique_ptr> analyzer_; + std::vector>> + render_taps_; CallClient* const receiver_; const VideoStreamConfig config_; std::unique_ptr decoder_factory_; @@ -101,18 +102,17 @@ class VideoStreamPair { ~VideoStreamPair(); SendVideoStream* send() { return &send_stream_; } ReceiveVideoStream* receive() { return &receive_stream_; } - VideoQualityAnalyzer* analyzer() { return &analyzer_; } + VideoFrameMatcher* matcher() { return &matcher_; } private: friend class Scenario; VideoStreamPair(CallClient* sender, CallClient* receiver, - VideoStreamConfig config, - std::unique_ptr quality_writer); + VideoStreamConfig config); const VideoStreamConfig config_; - VideoQualityAnalyzer analyzer_; + VideoFrameMatcher matcher_; SendVideoStream send_stream_; ReceiveVideoStream receive_stream_; }; diff --git a/test/scenario/video_stream_unittest.cc b/test/scenario/video_stream_unittest.cc index dcb4e1bd93..936a518ecd 100644 --- a/test/scenario/video_stream_unittest.cc +++ b/test/scenario/video_stream_unittest.cc @@ -36,9 +36,8 @@ TEST(VideoStreamTest, DISABLED_ReceivesFramesFromFileBasedStreams) { {s.CreateSimulationNode(NetworkNodeConfig())}); s.CreateVideoStream(route->forward(), [&](VideoStreamConfig* c) { - c->analyzer.frame_quality_handler = [&](const VideoFrameQualityInfo&) { - frame_counts[0]++; - }; + c->hooks.frame_pair_handlers = { + [&](const VideoFramePair&) { frame_counts[0]++; }}; c->source.capture = Capture::kVideoFile; c->source.video_file.name = "foreman_cif"; c->source.video_file.width = 352; @@ -48,9 +47,8 @@ TEST(VideoStreamTest, DISABLED_ReceivesFramesFromFileBasedStreams) { c->encoder.codec = Codec::kVideoCodecVP8; }); s.CreateVideoStream(route->forward(), [&](VideoStreamConfig* c) { - c->analyzer.frame_quality_handler = [&](const VideoFrameQualityInfo&) { - frame_counts[1]++; - }; + c->hooks.frame_pair_handlers = { + [&](const VideoFramePair&) { frame_counts[1]++; }}; c->source.capture = Capture::kImageSlides; c->source.slides.images.crop.width = 320; c->source.slides.images.crop.height = 240; @@ -70,11 +68,14 @@ TEST(VideoStreamTest, DISABLED_ReceivesFramesFromFileBasedStreams) { } // TODO(srte): Enable this after resolving flakiness issues. -TEST(VideoStreamTest, DISABLED_RecievesVp8SimulcastFrames) { +TEST(VideoStreamTest, RecievesVp8SimulcastFrames) { TimeDelta kRunTime = TimeDelta::ms(500); int kFrameRate = 30; - std::atomic frame_count(0); + std::deque> frame_counts(3); + frame_counts[0] = 0; + frame_counts[1] = 0; + frame_counts[2] = 0; { Scenario s; auto route = s.CreateRoutes(s.CreateClient("caller", CallClientConfig()), @@ -84,15 +85,18 @@ TEST(VideoStreamTest, DISABLED_RecievesVp8SimulcastFrames) { s.CreateVideoStream(route->forward(), [&](VideoStreamConfig* c) { // TODO(srte): Replace with code checking for all simulcast streams when // there's a hook available for that. - c->analyzer.frame_quality_handler = [&](const VideoFrameQualityInfo&) { - frame_count++; - }; + c->hooks.frame_pair_handlers = {[&](const VideoFramePair& info) { + frame_counts[info.layer_id]++; + RTC_DCHECK(info.decoded); + printf("%i: [%3i->%3i, %i], %i->%i, \n", info.layer_id, info.capture_id, + info.decode_id, info.repeated, info.captured->width(), + info.decoded->width()); + }}; c->source.framerate = kFrameRate; // The resolution must be high enough to allow smaller layers to be // created. c->source.generator.width = 1024; c->source.generator.height = 768; - c->encoder.implementation = CodecImpl::kSoftware; c->encoder.codec = Codec::kVideoCodecVP8; // By enabling multiple spatial layers, simulcast will be enabled for VP8. @@ -101,11 +105,13 @@ TEST(VideoStreamTest, DISABLED_RecievesVp8SimulcastFrames) { s.RunFor(kRunTime); } - // Using 20% error margin to avoid flakyness. + // Using high error margin to avoid flakyness. const int kExpectedCount = - static_cast(kRunTime.seconds() * kFrameRate * 0.8); + static_cast(kRunTime.seconds() * kFrameRate * 0.5); - EXPECT_GE(frame_count, kExpectedCount); + EXPECT_GE(frame_counts[0], kExpectedCount); + EXPECT_GE(frame_counts[1], kExpectedCount); + EXPECT_GE(frame_counts[2], kExpectedCount); } } // namespace test } // namespace webrtc