diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc index f4f2715bd8..c99498be6b 100644 --- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc +++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc @@ -15,12 +15,10 @@ #include "api/optional.h" #include "api/video/i420_buffer.h" #include "common_video/libyuv/include/webrtc_libyuv.h" +#include "modules/video_coding/codecs/test/video_codec_unittest.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp8/temporal_layers.h" #include "modules/video_coding/utility/vp8_header_parser.h" -#include "rtc_base/checks.h" -#include "rtc_base/timeutils.h" -#include "test/field_trial.h" #include "test/frame_utils.h" #include "test/gtest.h" #include "test/testsupport/fileutils.h" @@ -45,102 +43,25 @@ void Calc16ByteAlignedStride(int width, int* stride_y, int* stride_uv) { } } // namespace -class EncodedImageCallbackTestImpl : public webrtc::EncodedImageCallback { - public: - Result OnEncodedImage(const EncodedImage& encoded_frame, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { - EXPECT_GT(encoded_frame._length, 0u); - VerifyQpParser(encoded_frame); - - if (encoded_frame_._size != encoded_frame._size) { - delete[] encoded_frame_._buffer; - frame_buffer_.reset(new uint8_t[encoded_frame._size]); - } - RTC_DCHECK(frame_buffer_); - memcpy(frame_buffer_.get(), encoded_frame._buffer, encoded_frame._length); - encoded_frame_ = encoded_frame; - encoded_frame_._buffer = frame_buffer_.get(); - - // Skip |codec_name|, to avoid allocating. - EXPECT_STREQ("libvpx", codec_specific_info->codec_name); - EXPECT_EQ(kVideoCodecVP8, codec_specific_info->codecType); - EXPECT_EQ(0u, codec_specific_info->codecSpecific.VP8.simulcastIdx); - codec_specific_info_.codecType = codec_specific_info->codecType; - codec_specific_info_.codecSpecific = codec_specific_info->codecSpecific; - complete_ = true; - return Result(Result::OK, 0); - } - - void VerifyQpParser(const EncodedImage& encoded_frame) const { - int qp; - ASSERT_TRUE(vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp)); - EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP."; - } - - bool EncodeComplete() { - if (complete_) { - complete_ = false; - return true; - } - return false; - } - - EncodedImage encoded_frame_; - CodecSpecificInfo codec_specific_info_; - std::unique_ptr frame_buffer_; - bool complete_ = false; -}; - -class DecodedImageCallbackTestImpl : public webrtc::DecodedImageCallback { - public: - int32_t Decoded(VideoFrame& frame) override { - RTC_NOTREACHED(); - return -1; - } - int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override { - RTC_NOTREACHED(); - return -1; - } - void Decoded(VideoFrame& frame, - rtc::Optional decode_time_ms, - rtc::Optional qp) override { - EXPECT_GT(frame.width(), 0); - EXPECT_GT(frame.height(), 0); - EXPECT_TRUE(qp); - frame_ = frame; - qp_ = qp; - complete_ = true; - } - - bool DecodeComplete() { - if (complete_) { - complete_ = false; - return true; - } - return false; - } - - rtc::Optional frame_; - rtc::Optional qp_; - bool complete_ = false; -}; - -class TestVp8Impl : public ::testing::Test { - public: - TestVp8Impl() : TestVp8Impl("") {} - explicit TestVp8Impl(const std::string& field_trials) - : override_field_trials_(field_trials), - encoder_(VP8Encoder::Create()), - decoder_(VP8Decoder::Create()) {} - virtual ~TestVp8Impl() {} - +class TestVp8Impl : public VideoCodecUnitTest { protected: - virtual void SetUp() { - encoder_->RegisterEncodeCompleteCallback(&encoded_cb_); - decoder_->RegisterDecodeCompleteCallback(&decoded_cb_); - SetupCodecSettings(); - SetupInputFrame(); + std::unique_ptr CreateEncoder() override { + return VP8Encoder::Create(); + } + + std::unique_ptr CreateDecoder() override { + return VP8Decoder::Create(); + } + + VideoCodec codec_settings() override { + VideoCodec codec_settings; + webrtc::test::CodecSettings(kVideoCodecVP8, &codec_settings); + codec_settings.VP8()->denoisingOn = true; + codec_settings.VP8()->frameDroppingOn = false; + codec_settings.VP8()->automaticResizeOn = false; + codec_settings.VP8()->complexity = kComplexityNormal; + codec_settings.VP8()->tl_factory = &tl_factory_; + return codec_settings; } void SetupInputFrame() { @@ -169,62 +90,51 @@ class TestVp8Impl : public ::testing::Test { fclose(file); } - void SetupCodecSettings() { - webrtc::test::CodecSettings(kVideoCodecVP8, &codec_settings_); - codec_settings_.maxBitrate = 4000; - codec_settings_.width = kWidth; - codec_settings_.height = kHeight; - codec_settings_.VP8()->denoisingOn = true; - codec_settings_.VP8()->frameDroppingOn = false; - codec_settings_.VP8()->automaticResizeOn = false; - codec_settings_.VP8()->complexity = kComplexityNormal; - codec_settings_.VP8()->tl_factory = &tl_factory_; - } - - void InitEncodeDecode() { - EXPECT_EQ( - WEBRTC_VIDEO_CODEC_OK, - encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize)); - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - decoder_->InitDecode(&codec_settings_, kNumCores)); - } - - void EncodeFrame() { - EXPECT_FALSE(encoded_cb_.EncodeComplete()); + void EncodeAndWaitForFrame(EncodedImage* encoded_frame, + CodecSpecificInfo* codec_specific_info) { EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame_, nullptr, nullptr)); - EXPECT_TRUE(encoded_cb_.EncodeComplete()); + ASSERT_TRUE(WaitForEncodedFrame(encoded_frame, codec_specific_info)); + VerifyQpParser(*encoded_frame); + EXPECT_STREQ("libvpx", codec_specific_info->codec_name); + EXPECT_EQ(kVideoCodecVP8, codec_specific_info->codecType); + EXPECT_EQ(0u, codec_specific_info->codecSpecific.VP8.simulcastIdx); } - void ExpectFrameWith(int16_t picture_id, - int tl0_pic_idx, - uint8_t temporal_idx) { + void EncodeAndExpectFrameWith(int16_t picture_id, + int tl0_pic_idx, + uint8_t temporal_idx) { + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); EXPECT_EQ(picture_id % (1 << 15), - encoded_cb_.codec_specific_info_.codecSpecific.VP8.pictureId); + codec_specific_info.codecSpecific.VP8.pictureId); EXPECT_EQ(tl0_pic_idx % (1 << 8), - encoded_cb_.codec_specific_info_.codecSpecific.VP8.tl0PicIdx); - EXPECT_EQ(temporal_idx, - encoded_cb_.codec_specific_info_.codecSpecific.VP8.temporalIdx); + codec_specific_info.codecSpecific.VP8.tl0PicIdx); + EXPECT_EQ(temporal_idx, codec_specific_info.codecSpecific.VP8.temporalIdx); + } + + void VerifyQpParser(const EncodedImage& encoded_frame) const { + int qp; + EXPECT_GT(encoded_frame._length, 0u); + ASSERT_TRUE(vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp)); + EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP."; } - test::ScopedFieldTrials override_field_trials_; - EncodedImageCallbackTestImpl encoded_cb_; - DecodedImageCallbackTestImpl decoded_cb_; - std::unique_ptr input_frame_; - const std::unique_ptr encoder_; - const std::unique_ptr decoder_; - VideoCodec codec_settings_; TemporalLayersFactory tl_factory_; }; TEST_F(TestVp8Impl, SetRateAllocation) { + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); + const int kBitrateBps = 300000; BitrateAllocation bitrate_allocation; bitrate_allocation.SetBitrate(0, 0, kBitrateBps); EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED, encoder_->SetRateAllocation(bitrate_allocation, codec_settings_.maxFramerate)); - InitEncodeDecode(); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, + encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize)); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->SetRateAllocation(bitrate_allocation, codec_settings_.maxFramerate)); @@ -234,7 +144,11 @@ TEST_F(TestVp8Impl, EncodeFrameAndRelease) { EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize)); - EncodeFrame(); + + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED, encoder_->Encode(*input_frame_, nullptr, nullptr)); @@ -247,13 +161,15 @@ TEST_F(TestVp8Impl, InitDecode) { } TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) { - InitEncodeDecode(); - EncodeFrame(); - EXPECT_EQ(kInitialTimestampRtp, encoded_cb_.encoded_frame_._timeStamp); - EXPECT_EQ(kInitialTimestampMs, encoded_cb_.encoded_frame_.capture_time_ms_); - EXPECT_EQ(kWidth, static_cast(encoded_cb_.encoded_frame_._encodedWidth)); - EXPECT_EQ(kHeight, - static_cast(encoded_cb_.encoded_frame_._encodedHeight)); + SetupInputFrame(); + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + + EXPECT_EQ(kInitialTimestampRtp, encoded_frame._timeStamp); + EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_); + EXPECT_EQ(kWidth, static_cast(encoded_frame._encodedWidth)); + EXPECT_EQ(kHeight, static_cast(encoded_frame._encodedHeight)); } // We only test the encoder here, since the decoded frame rotation is set based @@ -261,26 +177,34 @@ TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) { // TODO(brandtr): Consider passing through the rotation flag through the decoder // in the same way as done in the encoder. TEST_F(TestVp8Impl, EncodedRotationEqualsInputRotation) { - InitEncodeDecode(); input_frame_->set_rotation(kVideoRotation_0); - EncodeFrame(); - EXPECT_EQ(kVideoRotation_0, encoded_cb_.encoded_frame_.rotation_); + + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + EXPECT_EQ(kVideoRotation_0, encoded_frame.rotation_); input_frame_->set_rotation(kVideoRotation_90); - EncodeFrame(); - EXPECT_EQ(kVideoRotation_90, encoded_cb_.encoded_frame_.rotation_); + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_); } TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) { - InitEncodeDecode(); - EncodeFrame(); + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + // First frame should be a key frame. - encoded_cb_.encoded_frame_._frameType = kVideoFrameKey; + encoded_frame._frameType = kVideoFrameKey; EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - decoder_->Decode(encoded_cb_.encoded_frame_, false, nullptr)); - EXPECT_TRUE(decoded_cb_.DecodeComplete()); - EXPECT_GT(I420PSNR(input_frame_.get(), &*decoded_cb_.frame_), 36); - EXPECT_EQ(encoded_cb_.encoded_frame_.qp_, *decoded_cb_.qp_); + decoder_->Decode(encoded_frame, false, nullptr)); + std::unique_ptr decoded_frame; + rtc::Optional decoded_qp; + ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); + ASSERT_TRUE(decoded_frame); + ASSERT_TRUE(decoded_qp); + EXPECT_GT(I420PSNR(input_frame_.get(), decoded_frame.get()), 36); + EXPECT_EQ(encoded_frame.qp_, *decoded_qp); } TEST_F(TestVp8Impl, ChecksSimulcastSettings) { @@ -347,18 +271,25 @@ TEST_F(TestVp8Impl, ChecksSimulcastSettings) { #define MAYBE_AlignedStrideEncodeDecode AlignedStrideEncodeDecode #endif TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) { - InitEncodeDecode(); - EncodeFrame(); + SetupInputFrame(); + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + // First frame should be a key frame. - encoded_cb_.encoded_frame_._frameType = kVideoFrameKey; - encoded_cb_.encoded_frame_.ntp_time_ms_ = kTestNtpTimeMs; + encoded_frame._frameType = kVideoFrameKey; + encoded_frame.ntp_time_ms_ = kTestNtpTimeMs; EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - decoder_->Decode(encoded_cb_.encoded_frame_, false, nullptr)); - EXPECT_TRUE(decoded_cb_.DecodeComplete()); + decoder_->Decode(encoded_frame, false, nullptr)); + + std::unique_ptr decoded_frame; + rtc::Optional decoded_qp; + ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); + ASSERT_TRUE(decoded_frame); // Compute PSNR on all planes (faster than SSIM). - EXPECT_GT(I420PSNR(input_frame_.get(), &*decoded_cb_.frame_), 36); - EXPECT_EQ(kInitialTimestampRtp, decoded_cb_.frame_->timestamp()); - EXPECT_EQ(kTestNtpTimeMs, decoded_cb_.frame_->ntp_time_ms()); + EXPECT_GT(I420PSNR(input_frame_.get(), decoded_frame.get()), 36); + EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp()); + EXPECT_EQ(kTestNtpTimeMs, decoded_frame->ntp_time_ms()); } #if defined(WEBRTC_ANDROID) @@ -367,48 +298,53 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) { #define MAYBE_DecodeWithACompleteKeyFrame DecodeWithACompleteKeyFrame #endif TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) { - InitEncodeDecode(); - EncodeFrame(); + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + // Setting complete to false -> should return an error. - encoded_cb_.encoded_frame_._completeFrame = false; + encoded_frame._completeFrame = false; EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, - decoder_->Decode(encoded_cb_.encoded_frame_, false, nullptr)); + decoder_->Decode(encoded_frame, false, nullptr)); // Setting complete back to true. Forcing a delta frame. - encoded_cb_.encoded_frame_._frameType = kVideoFrameDelta; - encoded_cb_.encoded_frame_._completeFrame = true; + encoded_frame._frameType = kVideoFrameDelta; + encoded_frame._completeFrame = true; EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, - decoder_->Decode(encoded_cb_.encoded_frame_, false, nullptr)); + decoder_->Decode(encoded_frame, false, nullptr)); // Now setting a key frame. - encoded_cb_.encoded_frame_._frameType = kVideoFrameKey; + encoded_frame._frameType = kVideoFrameKey; EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - decoder_->Decode(encoded_cb_.encoded_frame_, false, nullptr)); - ASSERT_TRUE(decoded_cb_.frame_); - EXPECT_GT(I420PSNR(input_frame_.get(), &*decoded_cb_.frame_), 36); + decoder_->Decode(encoded_frame, false, nullptr)); + std::unique_ptr decoded_frame; + rtc::Optional decoded_qp; + ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); + ASSERT_TRUE(decoded_frame); + EXPECT_GT(I420PSNR(input_frame_.get(), decoded_frame.get()), 36); } TEST_F(TestVp8Impl, EncoderWith2TemporalLayersRetainsRtpStateAfterRelease) { codec_settings_.VP8()->numberOfTemporalLayers = 2; - InitEncodeDecode(); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, + encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize)); // Temporal layer 0. - EncodeFrame(); - EXPECT_EQ(0, encoded_cb_.codec_specific_info_.codecSpecific.VP8.temporalIdx); - int16_t picture_id = - encoded_cb_.codec_specific_info_.codecSpecific.VP8.pictureId; - int tl0_pic_idx = - encoded_cb_.codec_specific_info_.codecSpecific.VP8.tl0PicIdx; + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); + + EXPECT_EQ(0, codec_specific_info.codecSpecific.VP8.temporalIdx); + const int16_t picture_id = codec_specific_info.codecSpecific.VP8.pictureId; + const int tl0_pic_idx = codec_specific_info.codecSpecific.VP8.tl0PicIdx; // Temporal layer 1. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 1, tl0_pic_idx + 0, 1); + + EncodeAndExpectFrameWith(picture_id + 1, tl0_pic_idx + 0, 1); // Temporal layer 0. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 2, tl0_pic_idx + 1, 0); + EncodeAndExpectFrameWith(picture_id + 2, tl0_pic_idx + 1, 0); // Temporal layer 1. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 3, tl0_pic_idx + 1, 1); + EncodeAndExpectFrameWith(picture_id + 3, tl0_pic_idx + 1, 1); // Reinit. EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); @@ -417,45 +353,40 @@ TEST_F(TestVp8Impl, EncoderWith2TemporalLayersRetainsRtpStateAfterRelease) { // Temporal layer 0. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 4, tl0_pic_idx + 2, 0); + EncodeAndExpectFrameWith(picture_id + 4, tl0_pic_idx + 2, 0); // Temporal layer 1. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 5, tl0_pic_idx + 2, 1); + EncodeAndExpectFrameWith(picture_id + 5, tl0_pic_idx + 2, 1); // Temporal layer 0. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 6, tl0_pic_idx + 3, 0); + EncodeAndExpectFrameWith(picture_id + 6, tl0_pic_idx + 3, 0); // Temporal layer 1. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 7, tl0_pic_idx + 3, 1); + EncodeAndExpectFrameWith(picture_id + 7, tl0_pic_idx + 3, 1); } TEST_F(TestVp8Impl, EncoderWith3TemporalLayersRetainsRtpStateAfterRelease) { codec_settings_.VP8()->numberOfTemporalLayers = 3; - InitEncodeDecode(); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, + encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize)); + + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info); // Temporal layer 0. - EncodeFrame(); - EXPECT_EQ(0, encoded_cb_.codec_specific_info_.codecSpecific.VP8.temporalIdx); - int16_t picture_id = - encoded_cb_.codec_specific_info_.codecSpecific.VP8.pictureId; - int tl0_pic_idx = - encoded_cb_.codec_specific_info_.codecSpecific.VP8.tl0PicIdx; + EXPECT_EQ(0, codec_specific_info.codecSpecific.VP8.temporalIdx); + const int16_t picture_id = codec_specific_info.codecSpecific.VP8.pictureId; + const int tl0_pic_idx = codec_specific_info.codecSpecific.VP8.tl0PicIdx; // Temporal layer 2. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 1, tl0_pic_idx + 0, 2); + EncodeAndExpectFrameWith(picture_id + 1, tl0_pic_idx + 0, 2); // Temporal layer 1. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 2, tl0_pic_idx + 0, 1); + EncodeAndExpectFrameWith(picture_id + 2, tl0_pic_idx + 0, 1); // Temporal layer 2. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 3, tl0_pic_idx + 0, 2); + EncodeAndExpectFrameWith(picture_id + 3, tl0_pic_idx + 0, 2); // Reinit. EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); @@ -464,20 +395,16 @@ TEST_F(TestVp8Impl, EncoderWith3TemporalLayersRetainsRtpStateAfterRelease) { // Temporal layer 0. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 4, tl0_pic_idx + 1, 0); + EncodeAndExpectFrameWith(picture_id + 4, tl0_pic_idx + 1, 0); // Temporal layer 2. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 5, tl0_pic_idx + 1, 2); + EncodeAndExpectFrameWith(picture_id + 5, tl0_pic_idx + 1, 2); // Temporal layer 1. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 6, tl0_pic_idx + 1, 1); + EncodeAndExpectFrameWith(picture_id + 6, tl0_pic_idx + 1, 1); // Temporal layer 2. input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement); - EncodeFrame(); - ExpectFrameWith(picture_id + 7, tl0_pic_idx + 1, 2); + EncodeAndExpectFrameWith(picture_id + 7, tl0_pic_idx + 1, 2); } TEST_F(TestVp8Impl, ScalingDisabledIfAutomaticResizeOff) {