Deprecate RTPFragmentationHeader argument to VideoDecoder::Decode
Intend to delete in a later cl. Bug: webrtc:6471 Change-Id: Icf0fcd40e0d3287dc59b684fae6552b40b47204a Reviewed-on: https://webrtc-review.googlesource.com/39511 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Reviewed-by: Philip Eliasson <philipel@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23162}
This commit is contained in:
parent
ea562b40f8
commit
8df3a388a3
@ -41,4 +41,23 @@ const char* VideoDecoder::ImplementationName() const {
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
int32_t VideoDecoder::Decode(
|
||||
const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
return Decode(input_image, missing_frames, nullptr, codec_specific_info,
|
||||
render_time_ms);
|
||||
}
|
||||
|
||||
int32_t VideoDecoder::Decode(
|
||||
const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info /* = NULL */,
|
||||
int64_t render_time_ms /* = -1 */) {
|
||||
return Decode(input_image, missing_frames, codec_specific_info,
|
||||
render_time_ms);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -56,11 +56,20 @@ class VideoDecoder {
|
||||
virtual int32_t InitDecode(const VideoCodec* codec_settings,
|
||||
int32_t number_of_cores) = 0;
|
||||
|
||||
virtual int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms);
|
||||
|
||||
// TODO(nisse): Deprecated. Delete this method, and make the above pure
|
||||
// virtual, after downstream projects are updated. The default implementations
|
||||
// of this method and the above ensures that during the transition, subclasses
|
||||
// can choose to implement one or the other.
|
||||
virtual int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info = NULL,
|
||||
int64_t render_time_ms = -1) = 0;
|
||||
int64_t render_time_ms = -1);
|
||||
|
||||
virtual int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) = 0;
|
||||
|
||||
@ -57,7 +57,6 @@ int32_t FakeWebRtcVideoDecoder::InitDecode(const webrtc::VideoCodec*, int32_t) {
|
||||
|
||||
int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&,
|
||||
bool,
|
||||
const webrtc::RTPFragmentationHeader*,
|
||||
const webrtc::CodecSpecificInfo*,
|
||||
int64_t) {
|
||||
num_frames_received_++;
|
||||
|
||||
@ -40,7 +40,6 @@ class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder {
|
||||
virtual int32_t InitDecode(const webrtc::VideoCodec*, int32_t);
|
||||
virtual int32_t Decode(const webrtc::EncodedImage&,
|
||||
bool,
|
||||
const webrtc::RTPFragmentationHeader*,
|
||||
const webrtc::CodecSpecificInfo*,
|
||||
int64_t);
|
||||
virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback*);
|
||||
|
||||
@ -30,7 +30,6 @@ class ScopedVideoDecoder : public webrtc::VideoDecoder {
|
||||
int32_t Release() override;
|
||||
int32_t Decode(const webrtc::EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
bool PrefersLateDecoding() const override;
|
||||
@ -64,10 +63,9 @@ int32_t ScopedVideoDecoder::Release() {
|
||||
int32_t ScopedVideoDecoder::Decode(
|
||||
const webrtc::EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
return decoder_->Decode(input_image, missing_frames, fragmentation,
|
||||
return decoder_->Decode(input_image, missing_frames,
|
||||
codec_specific_info, render_time_ms);
|
||||
}
|
||||
|
||||
|
||||
@ -90,7 +90,6 @@ bool VideoDecoderSoftwareFallbackWrapper::InitFallbackDecoder() {
|
||||
int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
|
||||
const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
TRACE_EVENT0("webrtc", "VideoDecoderSoftwareFallbackWrapper::Decode");
|
||||
@ -99,7 +98,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
case DecoderType::kHardware: {
|
||||
int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
ret = hw_decoder_->Decode(input_image, missing_frames, fragmentation,
|
||||
ret = hw_decoder_->Decode(input_image, missing_frames,
|
||||
codec_specific_info, render_time_ms);
|
||||
if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
|
||||
return ret;
|
||||
@ -116,7 +115,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
|
||||
}
|
||||
case DecoderType::kFallback:
|
||||
return fallback_decoder_->Decode(input_image, missing_frames,
|
||||
fragmentation, codec_specific_info,
|
||||
codec_specific_info,
|
||||
render_time_ms);
|
||||
default:
|
||||
RTC_NOTREACHED();
|
||||
|
||||
@ -33,7 +33,6 @@ class VideoDecoderSoftwareFallbackWrapper : public VideoDecoder {
|
||||
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
|
||||
|
||||
@ -34,7 +34,6 @@ class VideoDecoderSoftwareFallbackWrapperTest : public ::testing::Test {
|
||||
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override {
|
||||
++decode_count_;
|
||||
@ -76,7 +75,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, InitializesDecoder) {
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->init_decode_count_)
|
||||
<< "Initialized decoder should not be reinitialized.";
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
@ -92,7 +91,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->init_decode_count_)
|
||||
<< "Should not have attempted reinitializing the fallback decoder on "
|
||||
"keyframe.";
|
||||
@ -108,12 +107,12 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, IsSoftwareFallbackSticky) {
|
||||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
||||
// Software fallback should be sticky, fake_decoder_ shouldn't be used.
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_)
|
||||
<< "Decoder shouldn't be used after failure.";
|
||||
|
||||
@ -128,10 +127,10 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, DoesNotFallbackOnEveryError) {
|
||||
EncodedImage encoded_image;
|
||||
EXPECT_EQ(
|
||||
fake_decoder_->decode_return_code_,
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1));
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1));
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(2, fake_decoder_->decode_count_)
|
||||
<< "Decoder should be active even though previous decode failed.";
|
||||
}
|
||||
@ -142,14 +141,14 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, UsesHwDecoderAfterReinit) {
|
||||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(1, fake_decoder_->decode_count_);
|
||||
|
||||
fallback_wrapper_.Release();
|
||||
fallback_wrapper_.InitDecode(&codec, 2);
|
||||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(2, fake_decoder_->decode_count_)
|
||||
<< "Should not be using fallback after reinit.";
|
||||
}
|
||||
@ -163,7 +162,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest, ForwardsReleaseCall) {
|
||||
fallback_wrapper_.InitDecode(&codec, 2);
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
EXPECT_EQ(2, fake_decoder_->release_count_)
|
||||
<< "Decoder should be released during fallback.";
|
||||
fallback_wrapper_.Release();
|
||||
@ -201,7 +200,7 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
||||
|
||||
fake_decoder_->decode_return_code_ = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
||||
EncodedImage encoded_image;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, -1);
|
||||
// Hard coded expected value since libvpx is the software implementation name
|
||||
// for VP8. Change accordingly if the underlying implementation does.
|
||||
EXPECT_STREQ("libvpx (fallback from: fake-decoder)",
|
||||
|
||||
@ -140,7 +140,6 @@ class NullVideoDecoder : public webrtc::VideoDecoder {
|
||||
|
||||
int32_t Decode(const webrtc::EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override {
|
||||
RTC_LOG(LS_ERROR) << "The NullVideoDecoder doesn't support decoding.";
|
||||
|
||||
@ -279,7 +279,6 @@ int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
|
||||
|
||||
int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
bool /*missing_frames*/,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!IsInitialized()) {
|
||||
|
||||
@ -49,7 +49,6 @@ class H264DecoderImpl : public H264Decoder {
|
||||
// |missing_frames|, |fragmentation| and |render_time_ms| are ignored.
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool /*missing_frames*/,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* codec_specific_info = nullptr,
|
||||
int64_t render_time_ms = -1) override;
|
||||
|
||||
|
||||
@ -152,7 +152,6 @@ int I420Decoder::InitDecode(const VideoCodec* codecSettings,
|
||||
|
||||
int I420Decoder::Decode(const EncodedImage& inputImage,
|
||||
bool /*missingFrames*/,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* /*codecSpecificInfo*/,
|
||||
int64_t /*renderTimeMs*/) {
|
||||
if (inputImage._buffer == NULL) {
|
||||
|
||||
@ -108,7 +108,6 @@ class I420Decoder : public VideoDecoder {
|
||||
// <0 - Error
|
||||
int Decode(const EncodedImage& inputImage,
|
||||
bool missingFrames,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* /*codecSpecificInfo*/,
|
||||
int64_t /*renderTimeMs*/) override;
|
||||
|
||||
|
||||
@ -34,7 +34,6 @@ class MultiplexDecoderAdapter : public VideoDecoder {
|
||||
int32_t number_of_cores) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
|
||||
@ -111,7 +111,6 @@ int32_t MultiplexDecoderAdapter::InitDecode(const VideoCodec* codec_settings,
|
||||
int32_t MultiplexDecoderAdapter::Decode(
|
||||
const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
const MultiplexImage& image =
|
||||
@ -127,7 +126,7 @@ int32_t MultiplexDecoderAdapter::Decode(
|
||||
int32_t rv = 0;
|
||||
for (size_t i = 0; i < image.image_components.size(); i++) {
|
||||
rv = decoders_[image.image_components[i].component_index]->Decode(
|
||||
image.image_components[i].encoded_image, missing_frames, nullptr,
|
||||
image.image_components[i].encoded_image, missing_frames,
|
||||
nullptr, render_time_ms);
|
||||
if (rv != WEBRTC_VIDEO_CODEC_OK)
|
||||
return rv;
|
||||
|
||||
@ -130,7 +130,7 @@ TEST_F(TestMultiplexAdapter, EncodeDecodeI420Frame) {
|
||||
|
||||
EXPECT_EQ(
|
||||
WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, &codec_specific_info));
|
||||
decoder_->Decode(encoded_frame, false, &codec_specific_info, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -148,7 +148,7 @@ TEST_F(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, nullptr));
|
||||
decoder_->Decode(encoded_frame, false, nullptr));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
||||
@ -142,7 +142,6 @@ int LibvpxVp8Decoder::InitDecode(const VideoCodec* inst, int number_of_cores) {
|
||||
|
||||
int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!inited_) {
|
||||
|
||||
@ -34,7 +34,6 @@ class LibvpxVp8Decoder : public VP8Decoder {
|
||||
|
||||
int Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) override;
|
||||
|
||||
|
||||
@ -160,7 +160,7 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||
// First frame should be a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr));
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
@ -246,7 +246,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
encoded_frame.ntp_time_ms_ = kTestNtpTimeMs;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr));
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
@ -272,16 +272,16 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
|
||||
// Setting complete to false -> should return an error.
|
||||
encoded_frame._completeFrame = false;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame, false, nullptr));
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
// Setting complete back to true. Forcing a delta frame.
|
||||
encoded_frame._frameType = kVideoFrameDelta;
|
||||
encoded_frame._completeFrame = true;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame, false, nullptr));
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
// Now setting a key frame.
|
||||
encoded_frame._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr));
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
||||
@ -991,7 +991,6 @@ int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
|
||||
|
||||
int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) {
|
||||
if (!inited_) {
|
||||
|
||||
@ -145,7 +145,6 @@ class VP9DecoderImpl : public VP9Decoder {
|
||||
|
||||
int Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t /*render_time_ms*/) override;
|
||||
|
||||
|
||||
@ -229,9 +229,7 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
||||
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
|
||||
|
||||
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
|
||||
const RTPFragmentationHeader dummy_header;
|
||||
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
|
||||
&dummy_header,
|
||||
frame.CodecSpecific(), frame.RenderTimeMs());
|
||||
|
||||
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
|
||||
|
||||
@ -69,10 +69,9 @@ class MockVideoDecoder : public VideoDecoder {
|
||||
public:
|
||||
MOCK_METHOD2(InitDecode,
|
||||
int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
|
||||
MOCK_METHOD5(Decode,
|
||||
MOCK_METHOD4(Decode,
|
||||
int32_t(const EncodedImage& inputImage,
|
||||
bool missingFrames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
int64_t renderTimeMs));
|
||||
MOCK_METHOD1(RegisterDecodeCompleteCallback,
|
||||
|
||||
@ -65,7 +65,7 @@ class TestVideoReceiver : public ::testing::Test {
|
||||
++header->header.sequenceNumber;
|
||||
}
|
||||
receiver_->Process();
|
||||
EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(0);
|
||||
EXPECT_CALL(decoder_, Decode(_, _, _, _)).Times(0);
|
||||
EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_->Decode(100));
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ class TestVideoReceiver : public ::testing::Test {
|
||||
++header->header.sequenceNumber;
|
||||
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
|
||||
receiver_->Process();;
|
||||
EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(1);
|
||||
EXPECT_CALL(decoder_, Decode(_, _, _, _)).Times(1);
|
||||
EXPECT_EQ(0, receiver_->Decode(100));
|
||||
}
|
||||
|
||||
|
||||
@ -67,7 +67,6 @@ class MediaCodecVideoDecoder : public VideoDecoder, public rtc::MessageHandler {
|
||||
|
||||
int32_t Decode(
|
||||
const EncodedImage& inputImage, bool missingFrames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codecSpecificInfo = NULL,
|
||||
int64_t renderTimeMs = -1) override;
|
||||
|
||||
@ -359,7 +358,6 @@ int32_t MediaCodecVideoDecoder::ProcessHWErrorOnCodecThread() {
|
||||
int32_t MediaCodecVideoDecoder::Decode(
|
||||
const EncodedImage& inputImage,
|
||||
bool missingFrames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
int64_t renderTimeMs) {
|
||||
if (sw_fallback_required_) {
|
||||
|
||||
@ -86,7 +86,6 @@ int32_t VideoDecoderWrapper::InitDecodeInternal(JNIEnv* jni) {
|
||||
int32_t VideoDecoderWrapper::Decode(
|
||||
const EncodedImage& image_param,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
|
||||
|
||||
@ -35,7 +35,6 @@ class VideoDecoderWrapper : public VideoDecoder {
|
||||
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
|
||||
|
||||
@ -50,7 +50,6 @@ class ObjCVideoDecoder : public VideoDecoder {
|
||||
|
||||
int32_t Decode(const EncodedImage &input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader * /* fragmentation */,
|
||||
const CodecSpecificInfo *codec_specific_info = NULL,
|
||||
int64_t render_time_ms = -1) {
|
||||
RTCEncodedImage *encodedImage =
|
||||
|
||||
@ -74,7 +74,7 @@ TEST(ObjCVideoDecoderFactoryTest, DecodeReturnsOKOnSuccess) {
|
||||
webrtc::CodecSpecificInfo info;
|
||||
info.codecType = webrtc::kVideoCodecH264;
|
||||
|
||||
EXPECT_EQ(decoder->Decode(encoded_image, false, nullptr, &info, 0), WEBRTC_VIDEO_CODEC_OK);
|
||||
EXPECT_EQ(decoder->Decode(encoded_image, false, &info, 0), WEBRTC_VIDEO_CODEC_OK);
|
||||
}
|
||||
|
||||
TEST(ObjCVideoDecoderFactoryTest, DecodeReturnsErrorOnFail) {
|
||||
@ -84,7 +84,7 @@ TEST(ObjCVideoDecoderFactoryTest, DecodeReturnsErrorOnFail) {
|
||||
webrtc::CodecSpecificInfo info;
|
||||
info.codecType = webrtc::kVideoCodecH264;
|
||||
|
||||
EXPECT_EQ(decoder->Decode(encoded_image, false, nullptr, &info, 0), WEBRTC_VIDEO_CODEC_ERROR);
|
||||
EXPECT_EQ(decoder->Decode(encoded_image, false, &info, 0), WEBRTC_VIDEO_CODEC_ERROR);
|
||||
}
|
||||
|
||||
TEST(ObjCVideoDecoderFactoryTest, ReleaseDecodeReturnsOKOnSuccess) {
|
||||
|
||||
@ -30,7 +30,6 @@ int32_t FakeDecoder::InitDecode(const VideoCodec* config,
|
||||
|
||||
int32_t FakeDecoder::Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
if (input._encodedWidth > 0 && input._encodedHeight > 0) {
|
||||
@ -66,7 +65,6 @@ const char* FakeDecoder::ImplementationName() const {
|
||||
|
||||
int32_t FakeH264Decoder::Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
uint8_t value = 0;
|
||||
@ -85,7 +83,6 @@ int32_t FakeH264Decoder::Decode(const EncodedImage& input,
|
||||
}
|
||||
return FakeDecoder::Decode(input,
|
||||
missing_frames,
|
||||
fragmentation,
|
||||
codec_specific_info,
|
||||
render_time_ms);
|
||||
}
|
||||
|
||||
@ -29,7 +29,6 @@ class FakeDecoder : public VideoDecoder {
|
||||
|
||||
int32_t Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
|
||||
@ -54,7 +53,6 @@ class FakeH264Decoder : public FakeDecoder {
|
||||
|
||||
int32_t Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
};
|
||||
@ -65,7 +63,6 @@ class FakeNullDecoder : public FakeDecoder {
|
||||
|
||||
int32_t Decode(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override {
|
||||
return 0;
|
||||
|
||||
@ -50,10 +50,9 @@ class MockVideoDecoder : public VideoDecoder {
|
||||
public:
|
||||
MOCK_METHOD2(InitDecode,
|
||||
int32_t(const VideoCodec* config, int32_t number_of_cores));
|
||||
MOCK_METHOD5(Decode,
|
||||
MOCK_METHOD4(Decode,
|
||||
int32_t(const EncodedImage& input,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms));
|
||||
MOCK_METHOD1(RegisterDecodeCompleteCallback,
|
||||
@ -128,7 +127,7 @@ TEST_F(VideoReceiveStreamTest, CreateFrameFromH264FmtpSpropAndIdr) {
|
||||
}));
|
||||
EXPECT_CALL(mock_h264_video_decoder_, RegisterDecodeCompleteCallback(_));
|
||||
video_receive_stream_->Start();
|
||||
EXPECT_CALL(mock_h264_video_decoder_, Decode(_, false, _, _, _));
|
||||
EXPECT_CALL(mock_h264_video_decoder_, Decode(_, false, _, _));
|
||||
RtpPacketReceived parsed_packet;
|
||||
ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
|
||||
rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
|
||||
|
||||
@ -206,7 +206,6 @@ VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeNextFrame(
|
||||
int32_t decode_result =
|
||||
decoder->Decode(frame->EncodedImage(),
|
||||
false, // missing_frame
|
||||
nullptr, // rtp fragmentation header
|
||||
nullptr, // codec specific info
|
||||
frame->RenderTimeMs());
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user