Remove unnecessary RTPFragmentationHeader from VideoProcessor callbacks.

This test is and should be independent of RTP, so we don't need the
information provided in this struct.

BUG=webrtc:6634

Review-Url: https://codereview.webrtc.org/2995403002
Cr-Commit-Position: refs/heads/master@{#19443}
This commit is contained in:
brandtr 2017-08-22 03:33:11 -07:00 committed by Commit Bot
parent bdbc8895f3
commit 4553562fe3
2 changed files with 9 additions and 22 deletions

View File

@ -286,10 +286,8 @@ int VideoProcessor::NumberSpatialResizes() {
return num_spatial_resizes_;
}
void VideoProcessor::FrameEncoded(
webrtc::VideoCodecType codec,
const EncodedImage& encoded_image,
const webrtc::RTPFragmentationHeader* fragmentation) {
void VideoProcessor::FrameEncoded(webrtc::VideoCodecType codec,
const EncodedImage& encoded_image) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
// For the highest measurement accuracy of the encode time, the start/stop
@ -395,11 +393,6 @@ void VideoProcessor::FrameEncoded(
}
frame_info->manipulated_length = copied_image._length;
// Keep track of if frames are lost due to packet loss so we can tell
// this to the encoder (this is handled by the RTP logic in the full stack).
// TODO(kjellander): Pass fragmentation header to the decoder when
// CL 172001 has been submitted and PacketManipulator supports this.
// For the highest measurement accuracy of the decode time, the start/stop
// time recordings should wrap the Decode call as tightly as possible.
frame_info->decode_start_ns = rtc::TimeNanos();

View File

@ -208,14 +208,14 @@ class VideoProcessor {
RTC_CHECK(codec_specific_info);
if (task_queue_ && !task_queue_->IsCurrent()) {
task_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
new EncodeCallbackTask(video_processor_, encoded_image,
codec_specific_info, fragmentation)));
task_queue_->PostTask(
std::unique_ptr<rtc::QueuedTask>(new EncodeCallbackTask(
video_processor_, encoded_image, codec_specific_info)));
return Result(Result::OK, 0);
}
video_processor_->FrameEncoded(codec_specific_info->codecType,
encoded_image, fragmentation);
encoded_image);
return Result(Result::OK, 0);
}
@ -224,20 +224,17 @@ class VideoProcessor {
public:
EncodeCallbackTask(VideoProcessor* video_processor,
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation)
const webrtc::CodecSpecificInfo* codec_specific_info)
: video_processor_(video_processor),
buffer_(encoded_image._buffer, encoded_image._length),
encoded_image_(encoded_image),
codec_specific_info_(*codec_specific_info) {
encoded_image_._buffer = buffer_.data();
RTC_CHECK(fragmentation);
fragmentation_.CopyFrom(*fragmentation);
}
bool Run() override {
video_processor_->FrameEncoded(codec_specific_info_.codecType,
encoded_image_, &fragmentation_);
encoded_image_);
return true;
}
@ -246,7 +243,6 @@ class VideoProcessor {
rtc::Buffer buffer_;
webrtc::EncodedImage encoded_image_;
const webrtc::CodecSpecificInfo codec_specific_info_;
webrtc::RTPFragmentationHeader fragmentation_;
};
VideoProcessor* const video_processor_;
@ -267,7 +263,6 @@ class VideoProcessor {
[this, image]() { video_processor_->FrameDecoded(image); });
return 0;
}
video_processor_->FrameDecoded(image);
return 0;
}
@ -290,8 +285,7 @@ class VideoProcessor {
// Invoked by the callback adapter when a frame has completed encoding.
void FrameEncoded(webrtc::VideoCodecType codec,
const webrtc::EncodedImage& encodedImage,
const webrtc::RTPFragmentationHeader* fragmentation);
const webrtc::EncodedImage& encodedImage);
// Invoked by the callback adapter when a frame has completed decoding.
void FrameDecoded(const webrtc::VideoFrame& image);