Delete CodecSpecificInfo argument from VideoEncoder::Encode

Bug: webrtc:10379
Change-Id: If9f92eb1e5891df284881082c53f0b1db1c26a38
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/125900
Reviewed-by: Kári Helgason <kthelgason@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Rasmus Brandt <brandtr@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26992}
This commit is contained in:
Niels Möller 2019-03-06 12:00:33 +01:00 committed by Commit Bot
parent 1e42761b39
commit c8d2e73ed0
23 changed files with 148 additions and 168 deletions

View File

@ -189,7 +189,7 @@ void VideoEncoderSoftwareFallbackWrapperTest::EncodeFrame(int expected_ret) {
.set_rotation(webrtc::kVideoRotation_0)
.set_timestamp_us(0)
.build());
EXPECT_EQ(expected_ret, fallback_wrapper_->Encode(*frame_, nullptr, &types));
EXPECT_EQ(expected_ret, fallback_wrapper_->Encode(*frame_, &types));
}
void VideoEncoderSoftwareFallbackWrapperTest::UtilizeFallbackEncoder() {
@ -295,8 +295,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
// Encoding a frame using the fallback should arrive at the new callback.
std::vector<FrameType> types(1, kVideoFrameKey);
frame_->set_timestamp(frame_->timestamp() + 1000);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
fallback_wrapper_->Encode(*frame_, nullptr, &types));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Encode(*frame_, &types));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_->Release());
}
@ -605,7 +604,7 @@ TEST(SoftwareFallbackEncoderTest, HwRateControllerTrusted) {
VideoFrame frame = VideoFrame::Builder()
.set_video_frame_buffer(I420Buffer::Create(100, 100))
.build();
wrapper->Encode(frame, nullptr, nullptr);
wrapper->Encode(frame, nullptr);
EXPECT_FALSE(wrapper->GetEncoderInfo().has_trusted_rate_controller);
}
@ -645,7 +644,7 @@ TEST(SoftwareFallbackEncoderTest, ReportsHardwareAccelerated) {
VideoFrame frame = VideoFrame::Builder()
.set_video_frame_buffer(I420Buffer::Create(100, 100))
.build();
wrapper->Encode(frame, nullptr, nullptr);
wrapper->Encode(frame, nullptr);
EXPECT_FALSE(wrapper->GetEncoderInfo().is_hardware_accelerated);
}
@ -669,7 +668,7 @@ TEST(SoftwareFallbackEncoderTest, ReportsInternalSource) {
VideoFrame frame = VideoFrame::Builder()
.set_video_frame_buffer(I420Buffer::Create(100, 100))
.build();
wrapper->Encode(frame, nullptr, nullptr);
wrapper->Encode(frame, nullptr);
EXPECT_FALSE(wrapper->GetEncoderInfo().has_internal_source);
}

View File

@ -101,6 +101,19 @@ VideoEncoder::EncoderInfo::EncoderInfo(const EncoderInfo&) = default;
VideoEncoder::EncoderInfo::~EncoderInfo() = default;
// Implementations of the interface must implement one or the other of these two
// methods.
int32_t VideoEncoder::Encode(const VideoFrame& frame,
const std::vector<FrameType>* frame_types) {
return Encode(frame, nullptr, frame_types);
}
int32_t VideoEncoder::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
return Encode(frame, frame_types);
}
int32_t VideoEncoder::SetRates(uint32_t bitrate, uint32_t framerate) {
RTC_NOTREACHED() << "SetRate(uint32_t, uint32_t) is deprecated.";
return -1;

View File

@ -241,9 +241,13 @@ class RTC_EXPORT VideoEncoder {
// WEBRTC_VIDEO_CODEC_ERR_PARAMETER
// WEBRTC_VIDEO_CODEC_MEMORY
// WEBRTC_VIDEO_CODEC_ERROR
virtual int32_t Encode(const VideoFrame& frame,
const std::vector<FrameType>* frame_types);
// TODO(bugs.webrtc.org/10379): Deprecated. Delete, and make above method pure
// virtual, as soon as downstream applications are updated.
virtual int32_t Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) = 0;
const std::vector<FrameType>* frame_types);
// Inform the encoder about the new target bit rate.
//

View File

@ -88,7 +88,6 @@ class VideoEncoderSoftwareFallbackWrapper final : public VideoEncoder {
int32_t Release() override;
int32_t Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int32_t SetRateAllocation(const VideoBitrateAllocation& bitrate_allocation,
uint32_t framerate) override;
@ -253,16 +252,15 @@ int32_t VideoEncoderSoftwareFallbackWrapper::Release() {
int32_t VideoEncoderSoftwareFallbackWrapper::Encode(
const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
if (use_fallback_encoder_)
return fallback_encoder_->Encode(frame, codec_specific_info, frame_types);
int32_t ret = encoder_->Encode(frame, codec_specific_info, frame_types);
return fallback_encoder_->Encode(frame, frame_types);
int32_t ret = encoder_->Encode(frame, frame_types);
// If requested, try a software fallback.
bool fallback_requested = (ret == WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
if (fallback_requested && InitFallbackEncoder()) {
// Start using the fallback with this frame.
return fallback_encoder_->Encode(frame, codec_specific_info, frame_types);
return fallback_encoder_->Encode(frame, frame_types);
}
return ret;
}

View File

@ -44,9 +44,8 @@ int EncoderSimulcastProxy::InitEncode(const VideoCodec* inst,
}
int EncoderSimulcastProxy::Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
return encoder_->Encode(input_image, codec_specific_info, frame_types);
return encoder_->Encode(input_image, frame_types);
}
int EncoderSimulcastProxy::RegisterEncodeCompleteCallback(

View File

@ -46,7 +46,6 @@ class EncoderSimulcastProxy : public VideoEncoder {
int number_of_cores,
size_t max_payload_size) override;
int Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
int SetRateAllocation(const VideoBitrateAllocation& bitrate,

View File

@ -338,7 +338,6 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
int SimulcastEncoderAdapter::Encode(
const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_);
@ -399,8 +398,8 @@ int SimulcastEncoderAdapter::Encode(
if ((dst_width == src_width && dst_height == src_height) ||
input_image.video_frame_buffer()->type() ==
VideoFrameBuffer::Type::kNative) {
int ret = streaminfos_[stream_idx].encoder->Encode(
input_image, codec_specific_info, &stream_frame_types);
int ret = streaminfos_[stream_idx].encoder->Encode(input_image,
&stream_frame_types);
if (ret != WEBRTC_VIDEO_CODEC_OK) {
return ret;
}
@ -426,8 +425,8 @@ int SimulcastEncoderAdapter::Encode(
.set_rotation(webrtc::kVideoRotation_0)
.set_timestamp_ms(input_image.render_time_ms())
.build();
int ret = streaminfos_[stream_idx].encoder->Encode(
frame, codec_specific_info, &stream_frame_types);
int ret =
streaminfos_[stream_idx].encoder->Encode(frame, &stream_frame_types);
if (ret != WEBRTC_VIDEO_CODEC_OK) {
return ret;
}

View File

@ -45,7 +45,6 @@ class SimulcastEncoderAdapter : public VideoEncoder {
int number_of_cores,
size_t max_payload_size) override;
int Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
int SetRateAllocation(const VideoBitrateAllocation& bitrate,

View File

@ -574,7 +574,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
EXPECT_CALL(*original_encoders[2], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(3, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*original_encoders[1], Release())
@ -599,7 +599,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
EXPECT_CALL(*original_encoders[1], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(2, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*original_encoders[1], Release())
@ -619,7 +619,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
EXPECT_CALL(*original_encoders[0], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(1, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_EQ(0, adapter_->Release());
@ -643,7 +643,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
EXPECT_CALL(*new_encoders[2], Encode(_, _, _))
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
frame_types.resize(3, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
EXPECT_CALL(*original_encoders[0], Release())
.WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
EXPECT_CALL(*new_encoders[1], Release())
@ -891,7 +891,7 @@ TEST_F(TestSimulcastEncoderAdapterFake,
for (MockVideoEncoder* encoder : helper_->factory()->encoders())
EXPECT_CALL(*encoder, Encode(::testing::Ref(input_frame), _, _)).Times(1);
std::vector<FrameType> frame_types(3, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
}
TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
@ -918,7 +918,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
.build();
std::vector<FrameType> frame_types(3, kVideoFrameKey);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE,
adapter_->Encode(input_frame, nullptr, &frame_types));
adapter_->Encode(input_frame, &frame_types));
}
TEST_F(TestSimulcastEncoderAdapterFake, TestInitFailureCleansUpEncoders) {
@ -1033,7 +1033,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ActivatesCorrectStreamsInInitEncode) {
std::vector<FrameType> frame_types;
frame_types.resize(3, kVideoFrameKey);
EXPECT_EQ(0, adapter_->Encode(input_frame, nullptr, &frame_types));
EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
}
TEST_F(TestSimulcastEncoderAdapterFake, TrustedRateControl) {

View File

@ -65,8 +65,7 @@ class TestH264Impl : public VideoCodecUnitTest {
TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
VideoFrame* input_frame = NextInputFrame();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -93,7 +92,7 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -111,8 +110,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
TEST_F(TestH264Impl, MAYBE_EncodedColorSpaceEqualsInputColorSpace) {
VideoFrame* input_frame = NextInputFrame();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -127,7 +125,7 @@ TEST_F(TestH264Impl, MAYBE_EncodedColorSpaceEqualsInputColorSpace) {
.build();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(input_frame_w_color_space, nullptr, nullptr));
encoder_->Encode(input_frame_w_color_space, nullptr));
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
ASSERT_TRUE(encoded_frame.ColorSpace());
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
@ -135,7 +133,7 @@ TEST_F(TestH264Impl, MAYBE_EncodedColorSpaceEqualsInputColorSpace) {
TEST_F(TestH264Impl, MAYBE_DecodedColorSpaceEqualsEncodedColorSpace) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));

View File

@ -43,7 +43,6 @@ class MultiplexEncoderAdapter : public VideoEncoder {
int number_of_cores,
size_t max_payload_size) override;
int Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
int SetRateAllocation(const VideoBitrateAllocation& bitrate,

View File

@ -138,7 +138,6 @@ int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
int MultiplexEncoderAdapter::Encode(
const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
if (!encoded_complete_callback_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
@ -181,8 +180,7 @@ int MultiplexEncoderAdapter::Encode(
++picture_index_;
// Encode YUV
int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info,
&adjusted_frame_types);
int rv = encoders_[kYUVStream]->Encode(input_image, &adjusted_frame_types);
// If we do not receive an alpha frame, we send a single frame for this
// |picture_index_|. The receiver will receive |frame_count| as 1 which
@ -208,8 +206,7 @@ int MultiplexEncoderAdapter::Encode(
.set_rotation(input_image.rotation())
.set_id(input_image.id())
.build();
rv = encoders_[kAXXStream]->Encode(alpha_image, codec_specific_info,
&adjusted_frame_types);
rv = encoders_[kAXXStream]->Encode(alpha_image, &adjusted_frame_types);
return rv;
}

View File

@ -219,8 +219,7 @@ TEST_P(TestMultiplexAdapter, ConstructAndDestructEncoder) {
TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -238,8 +237,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*yuva_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -264,8 +262,7 @@ TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -284,8 +281,7 @@ TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*yuva_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -311,8 +307,7 @@ TEST_P(TestMultiplexAdapter, ImageIndexIncreases) {
std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
const size_t expected_num_encoded_frames = 3;
for (size_t i = 0; i < expected_num_encoded_frames; ++i) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*yuva_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));

View File

@ -288,8 +288,7 @@ void VideoProcessor::ProcessFrame() {
const std::vector<FrameType> frame_types =
(frame_number == 0) ? std::vector<FrameType>{kVideoFrameKey}
: std::vector<FrameType>{kVideoFrameDelta};
const int encode_return_code =
encoder_->Encode(input_frame, nullptr, &frame_types);
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
frame_stat->encode_return_code = encode_return_code;

View File

@ -81,7 +81,7 @@ class TestVp8Impl : public VideoCodecUnitTest {
frame_types.emplace_back(FrameType::kVideoFrameDelta);
}
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(input_frame, nullptr, &frame_types));
encoder_->Encode(input_frame, &frame_types));
ASSERT_TRUE(WaitForEncodedFrame(encoded_frame, codec_specific_info));
VerifyQpParser(*encoded_frame);
VideoEncoder::EncoderInfo encoder_info = encoder_->GetEncoderInfo();
@ -138,7 +138,7 @@ TEST_F(TestVp8Impl, EncodeFrameAndRelease) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
}
TEST_F(TestVp8Impl, InitDecode) {

View File

@ -122,8 +122,7 @@ TEST_F(TestVp9Impl, DISABLED_EncodeDecode) {
TEST_F(TestVp9Impl, EncodeDecode) {
#endif
VideoFrame* input_frame = NextInputFrame();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -155,8 +154,7 @@ TEST_F(TestVp9Impl, EncodeDecode) {
TEST_F(TestVp9Impl, EncodedRotationEqualsInputRotation) {
VideoFrame* input_frame = NextInputFrame();
input_frame->set_rotation(kVideoRotation_0);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -164,8 +162,7 @@ TEST_F(TestVp9Impl, EncodedRotationEqualsInputRotation) {
input_frame = NextInputFrame();
input_frame->set_rotation(kVideoRotation_90);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_);
}
@ -173,8 +170,7 @@ TEST_F(TestVp9Impl, EncodedRotationEqualsInputRotation) {
TEST_F(TestVp9Impl, EncodedColorSpaceEqualsInputColorSpace) {
// Video frame without explicit color space information.
VideoFrame* input_frame = NextInputFrame();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -188,7 +184,7 @@ TEST_F(TestVp9Impl, EncodedColorSpaceEqualsInputColorSpace) {
.set_color_space(color_space)
.build();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(input_frame_w_hdr, nullptr, nullptr));
encoder_->Encode(input_frame_w_hdr, nullptr));
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
ASSERT_TRUE(encoded_frame.ColorSpace());
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
@ -196,7 +192,7 @@ TEST_F(TestVp9Impl, EncodedColorSpaceEqualsInputColorSpace) {
TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -226,7 +222,7 @@ TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -244,7 +240,7 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
TEST_F(TestVp9Impl, ParserQpEqualsEncodedQp) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -265,7 +261,7 @@ TEST_F(TestVp9Impl, EncoderWith2TemporalLayers) {
// Temporal layer 0.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -273,17 +269,17 @@ TEST_F(TestVp9Impl, EncoderWith2TemporalLayers) {
// Temporal layer 1.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ExpectFrameWith(1);
// Temporal layer 0.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ExpectFrameWith(0);
// Temporal layer 1.
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ExpectFrameWith(1);
}
@ -295,7 +291,7 @@ TEST_F(TestVp9Impl, EncoderWith2SpatialLayers) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frame;
std::vector<CodecSpecificInfo> codec_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_info));
@ -386,7 +382,7 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
SetWaitForEncodedFramesThreshold(sl_idx + 1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frame;
std::vector<CodecSpecificInfo> codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
@ -405,7 +401,7 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) {
for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
SetWaitForEncodedFramesThreshold(sl_idx);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frame;
std::vector<CodecSpecificInfo> codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
@ -435,7 +431,7 @@ TEST_F(TestVp9Impl, EndOfPicture) {
codec_settings_.maxFramerate));
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> frames;
std::vector<CodecSpecificInfo> codec_specific;
@ -455,7 +451,7 @@ TEST_F(TestVp9Impl, EndOfPicture) {
SetWaitForEncodedFramesThreshold(1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
EXPECT_FALSE(frames[0].SpatialIndex());
@ -489,7 +485,7 @@ TEST_F(TestVp9Impl, InterLayerPred) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> frames;
std::vector<CodecSpecificInfo> codec_specific;
@ -505,7 +501,7 @@ TEST_F(TestVp9Impl, InterLayerPred) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific));
// Delta frame.
@ -550,7 +546,7 @@ TEST_F(TestVp9Impl,
++frame_num) {
SetWaitForEncodedFramesThreshold(sl_idx + 1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frame;
std::vector<CodecSpecificInfo> codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
@ -606,7 +602,7 @@ TEST_F(TestVp9Impl,
++frame_num) {
SetWaitForEncodedFramesThreshold(sl_idx + 1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frame;
std::vector<CodecSpecificInfo> codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
@ -667,7 +663,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
for (int i = 0; i < 3; ++i) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 2u);
}
@ -682,7 +678,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
// Encode 1 frame.
SetWaitForEncodedFramesThreshold(1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 1u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
@ -701,7 +697,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) {
// Encode 1 frame.
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 2u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
@ -745,7 +741,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
for (int i = 0; i < 3; ++i) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 2u);
}
@ -761,7 +757,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
for (int i = 0; i < 11; ++i) {
SetWaitForEncodedFramesThreshold(1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 1u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
@ -782,7 +778,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) {
// Encode 1 frame.
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
ASSERT_EQ(codec_specific_info.size(), 2u);
EXPECT_EQ(encoded_frame[0]._frameType, kVideoFrameDelta);
@ -832,7 +828,7 @@ TEST_F(TestVp9Impl, EnablingNewLayerIsDelayedInScreenshareAndAddsSsInfo) {
++frame_num) {
SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frames;
std::vector<CodecSpecificInfo> codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
@ -850,7 +846,7 @@ TEST_F(TestVp9Impl, EnablingNewLayerIsDelayedInScreenshareAndAddsSsInfo) {
for (size_t frame_num = 0; frame_num < num_dropped_frames; ++frame_num) {
SetWaitForEncodedFramesThreshold(1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
// First layer is dropped due to frame rate cap. The last layer should not
// be enabled yet.
std::vector<EncodedImage> encoded_frames;
@ -860,7 +856,7 @@ TEST_F(TestVp9Impl, EnablingNewLayerIsDelayedInScreenshareAndAddsSsInfo) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
// Now all 3 layers should be encoded.
std::vector<EncodedImage> encoded_frames;
std::vector<CodecSpecificInfo> codec_specific_info;
@ -910,7 +906,7 @@ TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) {
++frame_num) {
SetWaitForEncodedFramesThreshold(num_spatial_layers);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frames;
std::vector<CodecSpecificInfo> codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
@ -920,7 +916,7 @@ TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) {
for (size_t frame_num = 0; frame_num < num_dropped_frames - 2; ++frame_num) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
// First layer is dropped due to frame rate cap. The last layer should not
// be enabled yet.
std::vector<EncodedImage> encoded_frames;
@ -942,7 +938,7 @@ TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) {
// Expect back one frame.
SetWaitForEncodedFramesThreshold(1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
// First layer is dropped due to frame rate cap. The last layer should not
// be enabled yet.
std::vector<EncodedImage> encoded_frames;
@ -956,7 +952,7 @@ TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) {
SetWaitForEncodedFramesThreshold(2);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
std::vector<EncodedImage> encoded_frames;
std::vector<CodecSpecificInfo> codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
@ -1000,7 +996,7 @@ TEST_F(TestVp9Impl, DisableNewLayerInVideoDelaysSsInfoTillTL0) {
// Encode one TL0 frame
SetWaitForEncodedFramesThreshold(num_spatial_layers);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0u);
@ -1016,14 +1012,14 @@ TEST_F(TestVp9Impl, DisableNewLayerInVideoDelaysSsInfoTillTL0) {
// is not provided here.
SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 1u);
// Next is TL0 frame, which should have delayed SS structure.
SetWaitForEncodedFramesThreshold(num_spatial_layers - 1);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
EXPECT_EQ(codec_specific_info[0].codecSpecific.VP9.temporal_idx, 0u);
EXPECT_TRUE(codec_specific_info[0].codecSpecific.VP9.ss_data_available);
@ -1051,7 +1047,7 @@ TEST_F(TestVp9Impl,
encoder_->SetRateAllocation(bitrate_allocation,
codec_settings_.maxFramerate));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_info));
@ -1066,7 +1062,7 @@ TEST_F(TestVp9Impl, ScalabilityStructureIsAvailableInFlexibleMode) {
0 /* max payload size (unused) */));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
@ -1186,7 +1182,7 @@ TEST_P(TestVp9ImplWithLayering, FlexibleMode) {
++frame_num) {
SetWaitForEncodedFramesThreshold(num_spatial_layers_);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
const bool is_key_frame = frame_num == 0;
const size_t gof_idx = frame_num % gof.num_frames_in_gof;
@ -1225,7 +1221,7 @@ TEST_P(TestVp9ImplWithLayering, ExternalRefControl) {
++frame_num) {
SetWaitForEncodedFramesThreshold(num_spatial_layers_);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*NextInputFrame(), nullptr, nullptr));
encoder_->Encode(*NextInputFrame(), nullptr));
const bool is_key_frame = frame_num == 0;
const size_t gof_idx = frame_num % gof.num_frames_in_gof;
@ -1269,8 +1265,7 @@ TEST_F(TestVp9ImplFrameDropping, PreEncodeFrameDropping) {
VideoFrame* input_frame = NextInputFrame();
for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
const size_t timestamp = input_frame->timestamp() +
kVideoPayloadTypeFrequency / input_framerate_fps;
input_frame->set_timestamp(static_cast<uint32_t>(timestamp));
@ -1324,8 +1319,7 @@ TEST_F(TestVp9ImplFrameDropping, DifferentFrameratePerSpatialLayer) {
VideoFrame* input_frame = NextInputFrame();
for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
const size_t timestamp = input_frame->timestamp() +
kVideoPayloadTypeFrequency / input_framerate_fps;
input_frame->set_timestamp(static_cast<uint32_t>(timestamp));
@ -1380,8 +1374,7 @@ TEST_F(TestVp9ImplFrameDropping, LayerMaxFramerateIsCappedByCodecMaxFramerate) {
VideoFrame* input_frame = NextInputFrame();
for (size_t frame_num = 0; frame_num < num_input_frames; ++frame_num) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
const size_t timestamp = input_frame->timestamp() +
kVideoPayloadTypeFrequency / input_framerate_fps;
input_frame->set_timestamp(static_cast<uint32_t>(timestamp));
@ -1432,8 +1425,7 @@ TEST_F(TestVp9ImplProfile2, EncodeDecode) {
return;
VideoFrame* input_frame = NextInputFrame();
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*input_frame, nullptr, nullptr));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));

View File

@ -303,11 +303,11 @@ void SimulcastTestFixtureImpl::RunActiveStreamsTest(
ExpectStreams(kVideoFrameKey, active_streams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, active_streams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::UpdateActiveStreams(
@ -399,33 +399,33 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
frame_types[0] = kVideoFrameKey;
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
frame_types[1] = kVideoFrameKey;
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
frame_types[2] = kVideoFrameKey;
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
@ -434,11 +434,11 @@ void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
@ -447,11 +447,11 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
@ -461,11 +461,11 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingOneStream() {
@ -474,11 +474,11 @@ void SimulcastTestFixtureImpl::TestPaddingOneStream() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
@ -488,11 +488,11 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestSendAllStreams() {
@ -501,11 +501,11 @@ void SimulcastTestFixtureImpl::TestSendAllStreams() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, 3);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestDisablingStreams() {
@ -514,44 +514,44 @@ void SimulcastTestFixtureImpl::TestDisablingStreams() {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
ExpectStreams(kVideoFrameDelta, 3);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
// We should only get two streams and padding for one.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
ExpectStreams(kVideoFrameDelta, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
// We should only get the first stream and padding for two.
SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
// We don't have enough bitrate for the thumbnail stream, but we should get
// it anyway with current configuration.
SetRates(kTargetBitrates[0] - 1, 30);
ExpectStreams(kVideoFrameDelta, 1);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
// We should only get two streams and padding for one.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 2);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
// We should get all three streams.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 3);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestActiveStreams() {
@ -628,7 +628,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
.Times(1)
.WillRepeatedly(Return(
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
// Switch back.
DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
@ -647,7 +647,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
.set_rotation(webrtc::kVideoRotation_0)
.set_timestamp_us(0)
.build());
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
}
void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
@ -675,7 +675,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -683,7 +683,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
// Next frame: #1.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -691,7 +691,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
// Next frame: #2.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -699,7 +699,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
// Next frame: #3.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -707,7 +707,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
// Next frame: #4.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -715,7 +715,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
// Next frame: #5.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -745,7 +745,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -753,7 +753,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
// Next frame: #1.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -761,7 +761,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
// Next frame: #2.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -769,7 +769,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
// Next frame: #3.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -777,7 +777,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
// Next frame: #4.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -785,7 +785,7 @@ void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
// Next frame: #5.
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, true, false, expected_layer_sync);
VerifyTemporalIdxAndSyncForAllSpatialLayers(
@ -819,7 +819,7 @@ void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
plane_offset[kVPlane] = kColorV;
CreateImage(input_buffer_, plane_offset);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
// Change color.
plane_offset[kYPlane] += 1;
@ -827,7 +827,7 @@ void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
plane_offset[kVPlane] += 1;
CreateImage(input_buffer_, plane_offset);
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
EncodedImage encoded_frame;
// Only encoding one frame - so will be a key frame.
@ -866,7 +866,7 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
return EncodedImageCallback::Result(
EncodedImageCallback::Result::OK, 0);
}));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
EXPECT_CALL(decoder_callback, Decoded(_, _, _))
.WillOnce(testing::Invoke([](VideoFrame& decodedImage,

View File

@ -84,11 +84,9 @@ TEST(ObjCVideoEncoderFactoryTest, EncodeReturnsOKOnSuccess) {
.set_rotation(webrtc::kVideoRotation_0)
.set_timestamp_us(0)
.build();
webrtc::CodecSpecificInfo info;
info.codecType = webrtc::kVideoCodecH264;
std::vector<webrtc::FrameType> frame_types;
EXPECT_EQ(encoder->Encode(frame, &info, &frame_types), WEBRTC_VIDEO_CODEC_OK);
EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_OK);
}
TEST(ObjCVideoEncoderFactoryTest, EncodeReturnsErrorOnFail) {
@ -104,11 +102,9 @@ TEST(ObjCVideoEncoderFactoryTest, EncodeReturnsErrorOnFail) {
.set_rotation(webrtc::kVideoRotation_0)
.set_timestamp_us(0)
.build();
webrtc::CodecSpecificInfo info;
info.codecType = webrtc::kVideoCodecH264;
std::vector<webrtc::FrameType> frame_types;
EXPECT_EQ(encoder->Encode(frame, &info, &frame_types), WEBRTC_VIDEO_CODEC_ERROR);
EXPECT_EQ(encoder->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_ERROR);
}
TEST(ObjCVideoEncoderFactoryTest, ReleaseEncodeReturnsOKOnSuccess) {

View File

@ -87,7 +87,6 @@ int32_t QualityAnalyzingVideoEncoder::Release() {
int32_t QualityAnalyzingVideoEncoder::Encode(
const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
{
rtc::CritScope crit(&lock_);
@ -98,7 +97,7 @@ int32_t QualityAnalyzingVideoEncoder::Encode(
RTC_DCHECK_LT(timestamp_to_frame_id_list_.size(), kMaxFrameInPipelineCount);
}
analyzer_->OnFramePreEncode(frame);
int32_t result = delegate_->Encode(frame, codec_specific_info, frame_types);
int32_t result = delegate_->Encode(frame, frame_types);
if (result != WEBRTC_VIDEO_CODEC_OK) {
// If origin encoder failed, then cleanup data for this frame.
{

View File

@ -68,7 +68,6 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder,
EncodedImageCallback* callback) override;
int32_t Release() override;
int32_t Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int32_t SetRates(uint32_t bitrate, uint32_t framerate) override;
int32_t SetRateAllocation(const VideoBitrateAllocation& allocation,

View File

@ -62,9 +62,8 @@ class VideoEncoderProxyFactory final : public VideoEncoderFactory {
private:
int32_t Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override {
return encoder_->Encode(input_image, codec_specific_info, frame_types);
return encoder_->Encode(input_image, frame_types);
}
int32_t InitEncode(const VideoCodec* config,
int32_t number_of_cores,

View File

@ -142,12 +142,11 @@ class QualityTestVideoEncoder : public VideoEncoder,
}
int32_t Release() override { return encoder_->Release(); }
int32_t Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
if (analyzer_) {
analyzer_->PreEncodeOnFrame(frame);
}
return encoder_->Encode(frame, codec_specific_info, frame_types);
return encoder_->Encode(frame, frame_types);
}
int32_t SetRateAllocation(const VideoBitrateAllocation& allocation,
uint32_t framerate) override {

View File

@ -1268,8 +1268,7 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
frame_encoder_timer_.OnEncodeStarted(out_frame.timestamp(),
out_frame.render_time_ms());
const int32_t encode_status =
encoder_->Encode(out_frame, nullptr, &next_frame_types_);
const int32_t encode_status = encoder_->Encode(out_frame, &next_frame_types_);
if (encode_status < 0) {
RTC_LOG(LS_ERROR) << "Failed to encode frame. Error code: "
@ -1307,7 +1306,6 @@ void VideoStreamEncoder::SendKeyFrame() {
.set_rotation(kVideoRotation_0)
.set_timestamp_us(0)
.build(),
nullptr,
&next_frame_types_) == WEBRTC_VIDEO_CODEC_OK) {
// Try to remove just-performed keyframe request, if stream still exists.
next_frame_types_[0] = kVideoFrameDelta;