Add accessor methods for RTP timestamp of EncodedImage.

Intention is to make the member private, but downstream callers
must be updated to use the accessor methods first.

Bug: webrtc:9378
Change-Id: I3495bd8d545b7234fbea10abfd14f082caa420b6
Reviewed-on: https://webrtc-review.googlesource.com/82160
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Sebastian Jansson <srte@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24352}
This commit is contained in:
Niels Möller 2018-08-16 10:24:12 +02:00 committed by Commit Bot
parent bcdf5f1a94
commit 2377588c82
52 changed files with 163 additions and 166 deletions

View File

@ -17,13 +17,5 @@ bool EncodedFrame::delayed_by_retransmission() const {
return 0;
}
uint32_t EncodedFrame::Timestamp() const {
return timestamp;
}
void EncodedFrame::SetTimestamp(uint32_t rtp_timestamp) {
timestamp = rtp_timestamp;
}
} // namespace video_coding
} // namespace webrtc

View File

@ -58,10 +58,6 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
virtual bool GetBitstream(uint8_t* destination) const = 0;
// The capture timestamp of this frame, using the 90 kHz RTP clock.
virtual uint32_t Timestamp() const;
virtual void SetTimestamp(uint32_t rtp_timestamp);
// When this frame was received.
virtual int64_t ReceivedTime() const = 0;
@ -78,7 +74,6 @@ class EncodedFrame : public webrtc::VCMEncodedFrame {
bool is_keyframe() const { return num_references == 0; }
VideoLayerFrameId id;
uint32_t timestamp = 0;
// TODO(philipel): Add simple modify/access functions to prevent adding too
// many |references|.

View File

@ -331,7 +331,7 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
}
bool send_result = rtp_modules_[stream_index]->SendOutgoingData(
encoded_image._frameType, rtp_config_.payload_type,
encoded_image._timeStamp, encoded_image.capture_time_ms_,
encoded_image.Timestamp(), encoded_image.capture_time_ms_,
encoded_image._buffer, encoded_image._length, fragmentation,
&rtp_video_header, &frame_id);
if (!send_result)

View File

@ -140,7 +140,7 @@ class RtpVideoSenderTestFixture {
TEST(RtpVideoSenderTest, SendOnOneModule) {
uint8_t payload = 'a';
EncodedImage encoded_image;
encoded_image._timeStamp = 1;
encoded_image.SetTimestamp(1);
encoded_image.capture_time_ms_ = 2;
encoded_image._frameType = kVideoFrameKey;
encoded_image._buffer = &payload;
@ -170,7 +170,7 @@ TEST(RtpVideoSenderTest, SendOnOneModule) {
TEST(RtpVideoSenderTest, SendSimulcastSetActive) {
uint8_t payload = 'a';
EncodedImage encoded_image;
encoded_image._timeStamp = 1;
encoded_image.SetTimestamp(1);
encoded_image.capture_time_ms_ = 2;
encoded_image._frameType = kVideoFrameKey;
encoded_image._buffer = &payload;
@ -217,7 +217,7 @@ TEST(RtpVideoSenderTest, SendSimulcastSetActive) {
TEST(RtpVideoSenderTest, SendSimulcastSetActiveModules) {
uint8_t payload = 'a';
EncodedImage encoded_image;
encoded_image._timeStamp = 1;
encoded_image.SetTimestamp(1);
encoded_image.capture_time_ms_ = 2;
encoded_image._frameType = kVideoFrameKey;
encoded_image._buffer = &payload;

View File

@ -37,6 +37,14 @@ class EncodedImage {
EncodedImage(const EncodedImage&);
EncodedImage(uint8_t* buffer, size_t length, size_t size);
// TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency
// with the VideoFrame class.
// Set frame timestamp (90kHz).
void SetTimestamp(uint32_t timestamp) { _timeStamp = timestamp; }
// Get frame timestamp (90kHz).
uint32_t Timestamp() const { return _timeStamp; }
void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
absl::optional<int> SpatialIndex() const {
@ -52,6 +60,8 @@ class EncodedImage {
uint32_t _encodedWidth = 0;
uint32_t _encodedHeight = 0;
// TODO(nisse): Make private, once users have been updated
// to use accessor methods.
uint32_t _timeStamp = 0;
// NTP time of the capture time in local timebase in milliseconds.
int64_t ntp_time_ms_ = 0;

View File

@ -343,7 +343,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
last_encoded_image_simulcast_index_ =
codec_specific_info->codecSpecific.VP8.simulcastIdx;
}
return Result(Result::OK, encoded_image._timeStamp);
return Result(Result::OK, encoded_image.Timestamp());
}
bool GetLastEncodedImageInfo(int* out_width,

View File

@ -304,7 +304,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
VideoFrame::Builder()
.set_video_frame_buffer(input_frame->video_frame_buffer())
.set_timestamp_us(input_frame->timestamp_us())
.set_timestamp_rtp(input_image._timeStamp)
.set_timestamp_rtp(input_image.Timestamp())
.set_rotation(input_frame->rotation())
.set_color_space(color_space)
.build();

View File

@ -496,7 +496,7 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
encoded_images_[i]._encodedWidth = configurations_[i].width;
encoded_images_[i]._encodedHeight = configurations_[i].height;
encoded_images_[i]._timeStamp = input_frame.timestamp();
encoded_images_[i].SetTimestamp(input_frame.timestamp());
encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
encoded_images_[i].rotation_ = input_frame.rotation();

View File

@ -84,7 +84,7 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
}
_encodedImage._frameType = kVideoFrameKey;
_encodedImage._timeStamp = inputImage.timestamp();
_encodedImage.SetTimestamp(inputImage.timestamp());
_encodedImage._encodedHeight = inputImage.height();
_encodedImage._encodedWidth = inputImage.width();
@ -200,7 +200,7 @@ int I420Decoder::Decode(const EncodedImage& inputImage,
return WEBRTC_VIDEO_CODEC_MEMORY;
}
VideoFrame decoded_image(frame_buffer, inputImage._timeStamp, 0,
VideoFrame decoded_image(frame_buffer, inputImage.Timestamp(), 0,
webrtc::kVideoRotation_0);
_decodeCompleteCallback->Decoded(decoded_image);
return WEBRTC_VIDEO_CODEC_OK;

View File

@ -139,10 +139,10 @@ int32_t MultiplexDecoderAdapter::Decode(
}
if (image.component_count == 1) {
RTC_DCHECK(decoded_data_.find(input_image._timeStamp) ==
RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) ==
decoded_data_.end());
decoded_data_.emplace(std::piecewise_construct,
std::forward_as_tuple(input_image._timeStamp),
std::forward_as_tuple(input_image.Timestamp()),
std::forward_as_tuple(kAXXStream));
}
int32_t rv = 0;

View File

@ -258,7 +258,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
image_component.codec_type = frame_headers[i].codec_type;
EncodedImage encoded_image = combined_image;
encoded_image._timeStamp = combined_image._timeStamp;
encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type;
encoded_image._size =
static_cast<size_t>(frame_headers[i].bitstream_length);

View File

@ -258,7 +258,8 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
encodedImage._length);
rtc::CritScope cs(&crit_);
const auto& stashed_image_itr = stashed_images_.find(encodedImage._timeStamp);
const auto& stashed_image_itr =
stashed_images_.find(encodedImage.Timestamp());
const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
RTC_DCHECK(stashed_image_itr != stashed_images_.end());
MultiplexImage& stashed_image = stashed_image_itr->second;

View File

@ -352,7 +352,7 @@ void VideoProcessor::FrameEncoded(
GetLayerIndices(codec_specific, &spatial_idx, &temporal_idx);
FrameStatistics* frame_stat =
stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
const size_t frame_number = frame_stat->frame_number;
// Ensure that the encode order is monotonically increasing, within this
@ -428,7 +428,7 @@ void VideoProcessor::FrameEncoded(
if (!layer_dropped) {
base_image = &merged_encoded_frames_[i];
base_stat =
stats_->GetFrameWithTimestamp(encoded_image._timeStamp, i);
stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), i);
} else if (base_image && !base_stat->non_ref_for_inter_layer_pred) {
DecodeFrame(*base_image, i);
}
@ -526,7 +526,7 @@ void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
size_t spatial_idx) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
FrameStatistics* frame_stat =
stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
frame_stat->decode_start_ns = rtc::TimeNanos();
frame_stat->decode_return_code =
@ -551,7 +551,7 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
for (int base_idx = static_cast<int>(spatial_idx) - 1; base_idx >= 0;
--base_idx) {
EncodedImage lower_layer = merged_encoded_frames_.at(base_idx);
if (lower_layer._timeStamp == encoded_image._timeStamp) {
if (lower_layer.Timestamp() == encoded_image.Timestamp()) {
base_image = lower_layer;
break;
}

View File

@ -254,7 +254,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
vpx_codec_err_t vpx_ret =
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
if (ret != 0) {
// Reset to avoid requesting key frames too often.
if (ret < 0 && propagation_cnt_ > 0)

View File

@ -881,7 +881,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(
break;
}
}
encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
encoded_images_[encoder_idx].capture_time_ms_ =
input_image.render_time_ms();
encoded_images_[encoder_idx].rotation_ = input_image.rotation();

View File

@ -136,7 +136,7 @@ TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
CodecSpecificInfo codec_specific_info;
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
EXPECT_EQ(kInitialTimestampRtp, encoded_frame._timeStamp);
EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_);
EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));

View File

@ -1009,7 +1009,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
}
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
encoded_image_._timeStamp = input_image_->timestamp();
encoded_image_.SetTimestamp(input_image_->timestamp());
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_.rotation_ = input_image_->rotation();
encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
@ -1046,9 +1046,9 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
if (end_of_picture) {
const uint32_t timestamp_ms =
1000 * encoded_image_._timeStamp / kVideoPayloadTypeFrequency;
1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
output_framerate_.Update(1, timestamp_ms);
last_encoded_frame_rtp_timestamp_ = encoded_image_._timeStamp;
last_encoded_frame_rtp_timestamp_ = encoded_image_.Timestamp();
}
}
}
@ -1190,7 +1190,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
int ret =
ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
if (ret != 0) {
return ret;
}

View File

@ -58,7 +58,7 @@ bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
assert(frame != NULL);
if (in_initial_state_)
return false;
return !IsNewerTimestamp(frame->TimeStamp(), time_stamp_);
return !IsNewerTimestamp(frame->Timestamp(), time_stamp_);
}
bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
@ -73,7 +73,7 @@ void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
if (!UsingFlexibleMode(frame))
UpdateSyncState(frame);
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
time_stamp_ = frame->TimeStamp();
time_stamp_ = frame->Timestamp();
picture_id_ = frame->PictureId();
temporal_id_ = frame->TemporalId();
tl0_pic_id_ = frame->Tl0PicId();
@ -143,7 +143,7 @@ bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
// Continuous empty packets or continuous frames can be dropped if we
// advance the sequence number.
sequence_num_ = frame->GetHighSeqNum();
time_stamp_ = frame->TimeStamp();
time_stamp_ = frame->Timestamp();
return true;
}
return false;

View File

@ -35,8 +35,8 @@ void VCMEncodedFrame::Free() {
}
void VCMEncodedFrame::Reset() {
SetTimestamp(0);
_renderTimeMs = -1;
_timeStamp = 0;
_payloadType = 0;
_frameType = kVideoFrameDelta;
_encodedWidth = 0;

View File

@ -64,10 +64,12 @@ class VCMEncodedFrame : protected EncodedImage {
* Get frame length
*/
size_t Length() const { return _length; }
/**
* Get frame timestamp (90kHz)
* Frame RTP timestamp (90kHz)
*/
uint32_t TimeStamp() const { return _timeStamp; }
using EncodedImage::Timestamp;
using EncodedImage::SetTimestamp;
/**
* Get render time in milliseconds
*/

View File

@ -87,7 +87,7 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
if (kStateEmpty == _state) {
// First packet (empty and/or media) inserted into this frame.
// store some info and set some initial values.
_timeStamp = packet.timestamp;
SetTimestamp(packet.timestamp);
// We only take the ntp timestamp of the first packet of a frame.
ntp_time_ms_ = packet.ntp_time_ms_;
_codec = packet.codec;
@ -213,7 +213,6 @@ int VCMFrameBuffer::NumPackets() const {
void VCMFrameBuffer::Reset() {
TRACE_EVENT0("webrtc", "VCMFrameBuffer::Reset");
_length = 0;
_timeStamp = 0;
_sessionInfo.Reset();
_payloadType = 0;
_nackCount = 0;

View File

@ -117,7 +117,8 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
next_frame_it_ = frame_it;
if (frame->RenderTime() == -1)
frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
frame->SetRenderTime(
timing_->RenderTimeMs(frame->Timestamp(), now_ms));
wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms);
// This will cause the frame buffer to prefer high framerate rather
@ -146,7 +147,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
if (!frame->delayed_by_retransmission()) {
int64_t frame_delay;
if (inter_frame_delay_.CalculateDelay(frame->timestamp, &frame_delay,
if (inter_frame_delay_.CalculateDelay(frame->Timestamp(), &frame_delay,
frame->ReceivedTime())) {
jitter_estimator_->UpdateEstimate(frame_delay, frame->size());
}
@ -163,7 +164,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
if (HasBadRenderTiming(*frame, now_ms)) {
jitter_estimator_->Reset();
timing_->Reset();
frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
frame->SetRenderTime(timing_->RenderTimeMs(frame->Timestamp(), now_ms));
}
UpdateJitterDelay();
@ -177,17 +178,17 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
const VideoLayerFrameId& frame_key = next_frame_it_->first;
const bool frame_is_higher_spatial_layer_of_last_decoded_frame =
last_decoded_frame_timestamp_ == frame->timestamp &&
last_decoded_frame_timestamp_ == frame->Timestamp() &&
last_decoded_frame_key.picture_id == frame_key.picture_id &&
last_decoded_frame_key.spatial_layer < frame_key.spatial_layer;
if (AheadOrAt(last_decoded_frame_timestamp_, frame->timestamp) &&
if (AheadOrAt(last_decoded_frame_timestamp_, frame->Timestamp()) &&
!frame_is_higher_spatial_layer_of_last_decoded_frame) {
// TODO(brandtr): Consider clearing the entire buffer when we hit
// these conditions.
RTC_LOG(LS_WARNING)
<< "Frame with (timestamp:picture_id:spatial_id) ("
<< frame->timestamp << ":" << frame->id.picture_id << ":"
<< frame->Timestamp() << ":" << frame->id.picture_id << ":"
<< static_cast<int>(frame->id.spatial_layer) << ")"
<< " sent to decoder after frame with"
<< " (timestamp:picture_id:spatial_id) ("
@ -198,7 +199,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
}
AdvanceLastDecodedFrame(next_frame_it_);
last_decoded_frame_timestamp_ = frame->timestamp;
last_decoded_frame_timestamp_ = frame->Timestamp();
*frame_out = std::move(frame);
return kFrameFound;
}
@ -297,7 +298,7 @@ void FrameBuffer::UpdatePlayoutDelays(const EncodedFrame& frame) {
timing_->set_max_playout_delay(playout_delay.max_ms);
if (!frame.delayed_by_retransmission())
timing_->IncomingTimestamp(frame.timestamp, frame.ReceivedTime());
timing_->IncomingTimestamp(frame.Timestamp(), frame.ReceivedTime());
}
int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
@ -343,7 +344,7 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
if (last_decoded_frame_it_ != frames_.end() &&
id <= last_decoded_frame_it_->first) {
if (AheadOf(frame->timestamp, last_decoded_frame_timestamp_) &&
if (AheadOf(frame->Timestamp(), last_decoded_frame_timestamp_) &&
frame->is_keyframe()) {
// If this frame has a newer timestamp but an earlier picture id then we
// assume there has been a jump in the picture id due to some encoder

View File

@ -90,8 +90,6 @@ class FrameObjectFake : public EncodedFrame {
public:
bool GetBitstream(uint8_t* destination) const override { return true; }
uint32_t Timestamp() const override { return timestamp; }
int64_t ReceivedTime() const override { return 0; }
int64_t RenderTime() const override { return _renderTimeMs; }
@ -165,7 +163,7 @@ class TestFrameBuffer2 : public ::testing::Test {
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
frame->id.picture_id = picture_id;
frame->id.spatial_layer = spatial_layer;
frame->timestamp = ts_ms * 90;
frame->SetTimestamp(ts_ms * 90);
frame->num_references = references.size();
frame->inter_layer_predicted = inter_layer_predicted;
for (size_t r = 0; r < references.size(); ++r)
@ -520,7 +518,7 @@ TEST_F(TestFrameBuffer2, StatsCallback) {
frame->SetSize(kFrameSize);
frame->id.picture_id = pid;
frame->id.spatial_layer = 0;
frame->timestamp = ts;
frame->SetTimestamp(ts);
frame->num_references = 0;
frame->inter_layer_predicted = false;

View File

@ -26,7 +26,6 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
: packet_buffer_(packet_buffer),
first_seq_num_(first_seq_num),
last_seq_num_(last_seq_num),
timestamp_(0),
received_time_(received_time),
times_nacked_(times_nacked) {
VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
@ -41,7 +40,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
CopyCodecSpecific(&first_packet->video_header);
_completeFrame = true;
_payloadType = first_packet->payloadType;
_timeStamp = first_packet->timestamp;
SetTimestamp(first_packet->timestamp);
ntp_time_ms_ = first_packet->ntp_time_ms_;
_frameType = first_packet->frameType;
@ -69,7 +68,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
_encodedHeight = first_packet->height;
// EncodedFrame members
timestamp = first_packet->timestamp;
SetTimestamp(first_packet->timestamp);
VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
RTC_CHECK(last_packet);
@ -140,10 +139,6 @@ bool RtpFrameObject::GetBitstream(uint8_t* destination) const {
return packet_buffer_->GetBitstream(*this, destination);
}
uint32_t RtpFrameObject::Timestamp() const {
return timestamp_;
}
int64_t RtpFrameObject::ReceivedTime() const {
return received_time_;
}

View File

@ -37,7 +37,6 @@ class RtpFrameObject : public EncodedFrame {
enum FrameType frame_type() const;
VideoCodecType codec_type() const;
bool GetBitstream(uint8_t* destination) const override;
uint32_t Timestamp() const override;
int64_t ReceivedTime() const override;
int64_t RenderTime() const override;
bool delayed_by_retransmission() const override;
@ -49,7 +48,6 @@ class RtpFrameObject : public EncodedFrame {
VideoCodecType codec_type_;
uint16_t first_seq_num_;
uint16_t last_seq_num_;
uint32_t timestamp_;
int64_t received_time_;
// Equal to times nacked of the packet with the highet times nacked

View File

@ -211,7 +211,7 @@ int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
frame.EncodedImage()._timeStamp);
frame.Timestamp());
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
@ -225,7 +225,7 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
} else {
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
}
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
_callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
@ -234,13 +234,13 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
if (ret < WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
<< frame.TimeStamp() << ", error code: " << ret;
_callback->Pop(frame.TimeStamp());
<< frame.Timestamp() << ", error code: " << ret;
_callback->Pop(frame.Timestamp());
return ret;
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
// No output
_callback->Pop(frame.TimeStamp());
_callback->Pop(frame.Timestamp());
}
return ret;
}

View File

@ -265,13 +265,14 @@ absl::optional<int64_t> VCMEncodedFrameCallback::ExtractEncodeStartTime(
// Because some hardware encoders don't preserve capture timestamp we
// use RTP timestamps here.
while (!encode_start_list->empty() &&
IsNewerTimestamp(encoded_image->_timeStamp,
IsNewerTimestamp(encoded_image->Timestamp(),
encode_start_list->front().rtp_timestamp)) {
post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
encode_start_list->pop_front();
}
if (encode_start_list->size() > 0 &&
encode_start_list->front().rtp_timestamp == encoded_image->_timeStamp) {
encode_start_list->front().rtp_timestamp ==
encoded_image->Timestamp()) {
result.emplace(encode_start_list->front().encode_start_time_ms);
if (encoded_image->capture_time_ms_ !=
encode_start_list->front().capture_time_ms) {
@ -365,8 +366,8 @@ void VCMEncodedFrameCallback::FillTimingInfo(size_t simulcast_svc_idx,
int64_t clock_offset_ms = now_ms - encoded_image->timing_.encode_finish_ms;
// Translate capture timestamp to local WebRTC clock.
encoded_image->capture_time_ms_ += clock_offset_ms;
encoded_image->_timeStamp =
static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90);
encoded_image->SetTimestamp(
static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90));
encode_start_ms.emplace(encoded_image->timing_.encode_start_ms +
clock_offset_ms);
}
@ -389,7 +390,7 @@ EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image._timeStamp);
"timestamp", encoded_image.Timestamp());
size_t simulcast_svc_idx = 0;
if (codec_specific->codecType == kVideoCodecVP9) {
if (codec_specific->codecSpecific.VP9.num_spatial_layers > 1)

View File

@ -94,7 +94,7 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
CodecSpecificInfo codec_specific;
image._length = FrameSize(min_frame_size, max_frame_size, s, i);
image.capture_time_ms_ = current_timestamp;
image._timeStamp = static_cast<uint32_t>(current_timestamp * 90);
image.SetTimestamp(static_cast<uint32_t>(current_timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
codec_specific.codecSpecific.generic.simulcast_idx = s;
callback.OnEncodeStarted(static_cast<uint32_t>(current_timestamp * 90),
@ -187,7 +187,7 @@ TEST(TestVCMEncodedFrameCallback, NoTimingFrameIfNoEncodeStartTime) {
int64_t timestamp = 1;
image._length = 500;
image.capture_time_ms_ = timestamp;
image._timeStamp = static_cast<uint32_t>(timestamp * 90);
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
codec_specific.codecSpecific.generic.simulcast_idx = 0;
FakeEncodedImageCallback sink;
@ -204,7 +204,7 @@ TEST(TestVCMEncodedFrameCallback, NoTimingFrameIfNoEncodeStartTime) {
// New frame, now skip OnEncodeStarted. Should not result in timing frame.
image.capture_time_ms_ = ++timestamp;
image._timeStamp = static_cast<uint32_t>(timestamp * 90);
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_FALSE(sink.WasTimingFrame());
}
@ -219,7 +219,7 @@ TEST(TestVCMEncodedFrameCallback, AdjustsCaptureTimeForInternalSourceEncoder) {
int64_t timestamp = 1;
image._length = 500;
image.capture_time_ms_ = timestamp;
image._timeStamp = static_cast<uint32_t>(timestamp * 90);
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
codec_specific.codecSpecific.generic.simulcast_idx = 0;
FakeEncodedImageCallback sink;
@ -237,7 +237,7 @@ TEST(TestVCMEncodedFrameCallback, AdjustsCaptureTimeForInternalSourceEncoder) {
// New frame, but this time with encode timestamps set in timing_.
// This should be a timing frame.
image.capture_time_ms_ = ++timestamp;
image._timeStamp = static_cast<uint32_t>(timestamp * 90);
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
image.timing_.encode_start_ms = timestamp + kEncodeStartDelayMs;
image.timing_.encode_finish_ms = timestamp + kEncodeFinishDelayMs;
callback.OnEncodedImage(image, &codec_specific, nullptr);
@ -263,27 +263,27 @@ TEST(TestVCMEncodedFrameCallback, NotifiesAboutDroppedFrames) {
// Any non-zero bitrate needed to be set before the first frame.
callback.OnTargetBitrateChanged(500, 0);
image.capture_time_ms_ = kTimestampMs1;
image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
EXPECT_EQ(0u, sink.GetNumFramesDropped());
callback.OnEncodedImage(image, &codec_specific, nullptr);
image.capture_time_ms_ = kTimestampMs2;
image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
// No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
// that frame with timestamp2 was dropped.
EXPECT_EQ(0u, sink.GetNumFramesDropped());
image.capture_time_ms_ = kTimestampMs3;
image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
image.capture_time_ms_ = kTimestampMs4;
image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
}
@ -299,8 +299,8 @@ TEST(TestVCMEncodedFrameCallback, RestoresCaptureTimestamps) {
// Any non-zero bitrate needed to be set before the first frame.
callback.OnTargetBitrateChanged(500, 0);
image.capture_time_ms_ = kTimestampMs; // Incorrect timesetamp.
image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
image.capture_time_ms_ = 0; // Incorrect timesetamp.
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(kTimestampMs, sink.GetLastCaptureTimestamp());

View File

@ -54,7 +54,7 @@ bool HasNonEmptyState(FrameListPair pair) {
}
void FrameList::InsertFrame(VCMFrameBuffer* frame) {
insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
insert(rbegin().base(), FrameListPair(frame->Timestamp(), frame));
}
VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
@ -110,7 +110,7 @@ void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
}
free_frames->push_back(oldest_frame);
TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
oldest_frame->TimeStamp());
oldest_frame->Timestamp());
erase(begin());
}
}
@ -212,7 +212,7 @@ void Vp9SsMap::UpdateFrames(FrameList* frames) {
continue;
}
SsMap::iterator ss_it;
if (Find(frame_it.second->TimeStamp(), &ss_it)) {
if (Find(frame_it.second->Timestamp(), &ss_it)) {
if (gof_idx >= ss_it->second.num_frames_in_gof) {
continue; // Assume corresponding SS not yet received.
}
@ -528,7 +528,7 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
}
}
*timestamp = oldest_frame->TimeStamp();
*timestamp = oldest_frame->Timestamp();
return true;
}
@ -564,7 +564,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
// Wait for this one to get complete.
waiting_for_completion_.frame_size = frame->Length();
waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
waiting_for_completion_.timestamp = frame->TimeStamp();
waiting_for_completion_.timestamp = frame->Timestamp();
}
}
@ -715,8 +715,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
if (previous_state != kStateComplete) {
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
frame->TimeStamp());
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->Timestamp(), "timestamp",
frame->Timestamp());
}
if (buffer_state > 0) {
@ -831,7 +831,7 @@ bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
for (FrameList::const_iterator it = decodable_frames_.begin();
it != decodable_frames_.end(); ++it) {
VCMFrameBuffer* decodable_frame = it->second;
if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
if (IsNewerTimestamp(decodable_frame->Timestamp(), frame.Timestamp())) {
break;
}
decoding_state.SetState(decodable_frame);
@ -865,7 +865,7 @@ void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
it != incomplete_frames_.end();) {
VCMFrameBuffer* frame = it->second;
if (IsNewerTimestamp(original_decoded_state.time_stamp(),
frame->TimeStamp())) {
frame->Timestamp())) {
++it;
continue;
}
@ -947,11 +947,11 @@ int VCMJitterBuffer::NonContinuousOrIncompleteDuration() {
if (incomplete_frames_.empty()) {
return 0;
}
uint32_t start_timestamp = incomplete_frames_.Front()->TimeStamp();
uint32_t start_timestamp = incomplete_frames_.Front()->Timestamp();
if (!decodable_frames_.empty()) {
start_timestamp = decodable_frames_.Back()->TimeStamp();
start_timestamp = decodable_frames_.Back()->Timestamp();
}
return incomplete_frames_.Back()->TimeStamp() - start_timestamp;
return incomplete_frames_.Back()->Timestamp() - start_timestamp;
}
uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
@ -1184,10 +1184,10 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
incoming_frame_count_++;
if (frame.FrameType() == kVideoFrameKey) {
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
"KeyComplete");
} else {
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
"DeltaComplete");
}
@ -1263,7 +1263,7 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
}
// No retransmitted frames should be a part of the jitter
// estimate.
UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(),
UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.Timestamp(),
frame.Length(), incomplete_frame);
}

View File

@ -259,7 +259,7 @@ class TestBasicJitterBuffer : public ::testing::TestWithParam<std::string>,
VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
if (!found_frame)
return nullptr;
return jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
return jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
}
VCMEncodedFrame* DecodeIncompleteFrame() {
@ -414,7 +414,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
return false;
VCMEncodedFrame* frame =
jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
bool ret = (frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
return ret;
@ -964,12 +964,12 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(1000U, frame_out->TimeStamp());
EXPECT_EQ(1000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
EXPECT_EQ(13000U, frame_out->TimeStamp());
EXPECT_EQ(13000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
}
@ -1029,7 +1029,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(3000U, frame_out->TimeStamp());
EXPECT_EQ(3000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_FALSE(
@ -1037,14 +1037,14 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
EXPECT_EQ(6000U, frame_out->TimeStamp());
EXPECT_EQ(6000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
EXPECT_EQ(9000U, frame_out->TimeStamp());
EXPECT_EQ(9000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@ -1123,7 +1123,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(3000U, frame_out->TimeStamp());
EXPECT_EQ(3000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_FALSE(
@ -1131,7 +1131,7 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
jitter_buffer_->ReleaseFrame(frame_out);
frame_out = DecodeCompleteFrame();
EXPECT_EQ(6000U, frame_out->TimeStamp());
EXPECT_EQ(6000U, frame_out->Timestamp());
EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@ -1481,8 +1481,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
uint32_t next_timestamp;
VCMEncodedFrame* frame = jitter_buffer_->NextCompleteFrame(0);
EXPECT_NE(frame, nullptr);
EXPECT_EQ(packet_->timestamp, frame->TimeStamp());
frame = jitter_buffer_->ExtractAndSetDecode(frame->TimeStamp());
EXPECT_EQ(packet_->timestamp, frame->Timestamp());
frame = jitter_buffer_->ExtractAndSetDecode(frame->Timestamp());
EXPECT_TRUE(frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
@ -1728,7 +1728,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(3000u, frame_out->TimeStamp());
EXPECT_EQ(3000u, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
@ -1763,7 +1763,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(timestamp_, frame_out->TimeStamp());
EXPECT_EQ(timestamp_, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
@ -1873,13 +1873,13 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
EXPECT_EQ(0xffffff00, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
EXPECT_EQ(2700u, frame_out2->TimeStamp());
EXPECT_EQ(2700u, frame_out2->Timestamp());
CheckOutFrame(frame_out2, size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
jitter_buffer_->ReleaseFrame(frame_out2);
@ -1916,13 +1916,13 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
EXPECT_EQ(0xffffff00, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
EXPECT_EQ(2700u, frame_out2->TimeStamp());
EXPECT_EQ(2700u, frame_out2->Timestamp());
CheckOutFrame(frame_out2, size_, false);
EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
jitter_buffer_->ReleaseFrame(frame_out2);
@ -2017,7 +2017,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(first_key_frame_timestamp, frame_out->TimeStamp());
EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
jitter_buffer_->ReleaseFrame(frame_out);
@ -2043,7 +2043,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
// Timestamp should never be the last TS inserted.
if (testFrame != NULL) {
EXPECT_TRUE(testFrame->TimeStamp() < timestamp_);
EXPECT_TRUE(testFrame->Timestamp() < timestamp_);
jitter_buffer_->ReleaseFrame(testFrame);
}
}

View File

@ -140,7 +140,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
if (found_frame) {
frame_timestamp = found_frame->TimeStamp();
frame_timestamp = found_frame->Timestamp();
min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
} else {
@ -212,7 +212,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
return NULL;
}
frame->SetRenderTime(render_time_ms);
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS",
"render_time", frame->RenderTimeMs());
if (!frame->Complete()) {
// Update stats for incomplete frames.

View File

@ -115,7 +115,7 @@ bool IvfFileWriter::InitFromFirstFrame(const EncodedImage& encoded_image,
height_ = encoded_image._encodedHeight;
RTC_CHECK_GT(width_, 0);
RTC_CHECK_GT(height_, 0);
using_capture_timestamps_ = encoded_image._timeStamp == 0;
using_capture_timestamps_ = encoded_image.Timestamp() == 0;
codec_type_ = codec_type;
@ -151,7 +151,7 @@ bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
int64_t timestamp = using_capture_timestamps_
? encoded_image.capture_time_ms_
: wrap_handler_.Unwrap(encoded_image._timeStamp);
: wrap_handler_.Unwrap(encoded_image.Timestamp());
if (last_timestamp_ != -1 && timestamp <= last_timestamp_) {
RTC_LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_
<< " -> " << timestamp;

View File

@ -50,7 +50,7 @@ class IvfFileWriterTest : public ::testing::Test {
if (use_capture_tims_ms) {
frame.capture_time_ms_ = i;
} else {
frame._timeStamp = i;
frame.SetTimestamp(i);
}
if (!file_writer_->WriteFrame(frame, codec_type))
return false;

View File

@ -109,7 +109,7 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
codec_specific_info->codecSpecific.VP8.temporalIdx;
}
return Result(Result::OK, encoded_image._timeStamp);
return Result(Result::OK, encoded_image.Timestamp());
}
// This method only makes sense for VP8.
void GetLastEncodedFrameInfo(int* temporal_layer,

View File

@ -100,7 +100,7 @@ class EncodedImageCallbackImpl : public EncodedImageCallback {
assert(codec_specific_info);
frame_data_.push_back(
FrameData(encoded_image._length, *codec_specific_info));
return Result(Result::OK, encoded_image._timeStamp);
return Result(Result::OK, encoded_image.Timestamp());
}
void Reset() {

View File

@ -517,7 +517,7 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
bool success = Java_MediaCodecVideoDecoder_queueInputBuffer(
jni, j_media_codec_video_decoder_, j_input_buffer_index,
static_cast<int>(inputImage._length), presentation_timestamp_us,
static_cast<int64_t>(inputImage._timeStamp), inputImage.ntp_time_ms_);
static_cast<int64_t>(inputImage.Timestamp()), inputImage.ntp_time_ms_);
if (CheckException(jni) || !success) {
ALOGE << "queueInputBuffer error";
return ProcessHWErrorOnCodecThread();

View File

@ -987,7 +987,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
new EncodedImage(payload, payload_size, payload_size));
image->_encodedWidth = width_;
image->_encodedHeight = height_;
image->_timeStamp = output_timestamp_;
image->SetTimestamp(output_timestamp_);
image->capture_time_ms_ = output_render_time_ms_;
image->rotation_ = output_rotation_;
image->content_type_ = (codec_mode_ == VideoCodecMode::kScreensharing)

View File

@ -98,12 +98,12 @@ int32_t VideoDecoderWrapper::Decode(
EncodedImage input_image(image_param);
// We use RTP timestamp for capture time because capture_time_ms_ is always 0.
input_image.capture_time_ms_ =
input_image._timeStamp / kNumRtpTicksPerMillisec;
input_image.Timestamp() / kNumRtpTicksPerMillisec;
FrameExtraInfo frame_extra_info;
frame_extra_info.timestamp_ns =
input_image.capture_time_ms_ * rtc::kNumNanosecsPerMillisec;
frame_extra_info.timestamp_rtp = input_image._timeStamp;
frame_extra_info.timestamp_rtp = input_image.Timestamp();
frame_extra_info.timestamp_ntp = input_image.ntp_time_ms_;
frame_extra_info.qp =
qp_parsing_enabled_ ? ParseQP(input_image) : absl::nullopt;

View File

@ -274,7 +274,7 @@ void VideoEncoderWrapper::OnEncodedFrame(JNIEnv* jni,
task_buffer.size(), task_buffer.size());
frame._encodedWidth = encoded_width;
frame._encodedHeight = encoded_height;
frame._timeStamp = frame_extra_info.timestamp_rtp;
frame.SetTimestamp(frame_extra_info.timestamp_rtp);
frame.capture_time_ms_ = capture_time_ns / rtc::kNumNanosecsPerMillisec;
frame._frameType = (FrameType)frame_type;
frame.rotation_ = (VideoRotation)rotation;

View File

@ -39,7 +39,7 @@
freeWhenDone:NO];
_encodedWidth = rtc::dchecked_cast<int32_t>(encodedImage._encodedWidth);
_encodedHeight = rtc::dchecked_cast<int32_t>(encodedImage._encodedHeight);
_timeStamp = encodedImage._timeStamp;
_timeStamp = encodedImage.Timestamp();
_captureTimeMs = encodedImage.capture_time_ms_;
_ntpTimeMs = encodedImage.ntp_time_ms_;
_flags = encodedImage.timing_.flags;
@ -63,7 +63,7 @@
(uint8_t *)_buffer.bytes, (size_t)_buffer.length, (size_t)_buffer.length);
encodedImage._encodedWidth = rtc::dchecked_cast<uint32_t>(_encodedWidth);
encodedImage._encodedHeight = rtc::dchecked_cast<uint32_t>(_encodedHeight);
encodedImage._timeStamp = _timeStamp;
encodedImage.SetTimestamp(_timeStamp);
encodedImage.capture_time_ms_ = _captureTimeMs;
encodedImage.ntp_time_ms_ = _ntpTimeMs;
encodedImage.timing_.flags = _flags;

View File

@ -48,7 +48,7 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
encodedImage._encodedHeight = inputImage.height();
encodedImage._encodedWidth = inputImage.width();
encodedImage._frameType = kVideoFrameKey;
encodedImage._timeStamp = inputImage.timestamp();
encodedImage.SetTimestamp(inputImage.timestamp());
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
RTPFragmentationHeader* fragmentation = NULL;
CodecSpecificInfo specific;

View File

@ -40,7 +40,7 @@ int32_t FakeDecoder::Decode(const EncodedImage& input,
VideoFrame frame(I420Buffer::Create(width_, height_),
webrtc::kVideoRotation_0,
render_time_ms * rtc::kNumMicrosecsPerMillisec);
frame.set_timestamp(input._timeStamp);
frame.set_timestamp(input.Timestamp());
frame.set_ntp_time_ms(input.ntp_time_ms_);
callback_->Decoded(frame);

View File

@ -144,7 +144,7 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image,
std::unique_ptr<uint8_t[]> encoded_buffer(new uint8_t[num_encoded_bytes]);
memcpy(encoded_buffer.get(), encoded_buffer_, num_encoded_bytes);
EncodedImage encoded(encoded_buffer.get(), stream_bytes, num_encoded_bytes);
encoded._timeStamp = input_image.timestamp();
encoded.SetTimestamp(input_image.timestamp());
encoded.capture_time_ms_ = input_image.render_time_ms();
encoded._frameType = (*frame_types)[i];
encoded._encodedWidth = simulcast_streams[i].width;

View File

@ -57,7 +57,6 @@ class FuzzyFrameObject : public video_coding::EncodedFrame {
~FuzzyFrameObject() {}
bool GetBitstream(uint8_t* destination) const override { return false; }
uint32_t Timestamp() const override { return timestamp; }
int64_t ReceivedTime() const override { return 0; }
int64_t RenderTime() const override { return _renderTimeMs; }
};
@ -76,7 +75,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
std::unique_ptr<FuzzyFrameObject> frame(new FuzzyFrameObject());
frame->id.picture_id = reader.GetNum<int64_t>();
frame->id.spatial_layer = reader.GetNum<uint8_t>();
frame->timestamp = reader.GetNum<uint32_t>();
frame->SetTimestamp(reader.GetNum<uint32_t>());
frame->num_references = reader.GetNum<uint8_t>() %
video_coding::EncodedFrame::kMaxFrameReferences;

View File

@ -250,7 +250,7 @@ bool SendStatisticsProxy::UmaSamplesContainer::InsertEncodedFrame(
// Check for jump in timestamp.
if (!encoded_frames_.empty()) {
uint32_t oldest_timestamp = encoded_frames_.begin()->first;
if (ForwardDiff(oldest_timestamp, encoded_frame._timeStamp) >
if (ForwardDiff(oldest_timestamp, encoded_frame.Timestamp()) >
kMaxEncodedFrameTimestampDiff) {
// Gap detected, clear frames to have a sequence where newest timestamp
// is not too far away from oldest in order to distinguish old and new.
@ -262,7 +262,7 @@ bool SendStatisticsProxy::UmaSamplesContainer::InsertEncodedFrame(
if (it == encoded_frames_.end()) {
// First frame with this timestamp.
encoded_frames_.insert(
std::make_pair(encoded_frame._timeStamp,
std::make_pair(encoded_frame.Timestamp(),
Frame(now_ms, encoded_frame._encodedWidth,
encoded_frame._encodedHeight, simulcast_idx)));
sent_fps_counter_.Add(1);

View File

@ -1036,7 +1036,7 @@ TEST_F(SendStatisticsProxyTest, SentResolutionHistogramsAreUpdated) {
// Not enough samples, stats should not be updated.
for (int i = 0; i < kMinSamples - 1; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += 90 * 1000 / kFps;
encoded_image.SetTimestamp(encoded_image.Timestamp() + 90 * 1000 / kFps);
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
}
SetUp(); // Reset stats proxy also causes histograms to be reported.
@ -1044,10 +1044,10 @@ TEST_F(SendStatisticsProxyTest, SentResolutionHistogramsAreUpdated) {
EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
// Enough samples, max resolution per frame should be reported.
encoded_image._timeStamp = 0xffff0000; // Will wrap.
encoded_image.SetTimestamp(0xffff0000); // Will wrap.
for (int i = 0; i < kMinSamples; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += 90 * 1000 / kFps;
encoded_image.SetTimestamp(encoded_image.Timestamp() + 90 * 1000 / kFps);
encoded_image._encodedWidth = kWidth;
encoded_image._encodedHeight = kHeight;
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@ -1083,7 +1083,7 @@ TEST_F(SendStatisticsProxyTest, SentFpsHistogramIsUpdated) {
int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000 + 1;
for (int i = 0; i < frames; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
++encoded_image._timeStamp;
encoded_image.SetTimestamp(encoded_image.Timestamp() + 1);
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
// Frame with same timestamp should not be counted.
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@ -1124,7 +1124,7 @@ TEST_F(SendStatisticsProxyTest, SentFpsHistogramExcludesSuspendedTime) {
int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
for (int i = 0; i < frames; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp = i + 1;
encoded_image.SetTimestamp(i + 1);
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
}
// Suspend.
@ -1133,7 +1133,7 @@ TEST_F(SendStatisticsProxyTest, SentFpsHistogramExcludesSuspendedTime) {
for (int i = 0; i < frames; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp = i + 1;
encoded_image.SetTimestamp(i + 1);
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
}
// Suspended time interval should not affect the framerate.
@ -1431,7 +1431,8 @@ TEST_F(SendStatisticsProxyTest,
encoded_image._encodedHeight = kHeight;
for (int i = 0; i < kMinSamples; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += (kRtpClockRateHz / kFps);
encoded_image.SetTimestamp(encoded_image.Timestamp() +
(kRtpClockRateHz / kFps));
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
}
@ -1466,7 +1467,8 @@ TEST_F(SendStatisticsProxyTest,
EncodedImage encoded_image;
for (int i = 0; i < kMinSamples; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += (kRtpClockRateHz / kFps);
encoded_image.SetTimestamp(encoded_image.Timestamp() +
(kRtpClockRateHz / kFps));
encoded_image._encodedWidth = kWidth;
encoded_image._encodedHeight = kHeight;
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@ -1511,7 +1513,8 @@ TEST_F(SendStatisticsProxyTest,
encoded_image._encodedHeight = kHeight / 2;
for (int i = 0; i < kMinSamples; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += (kRtpClockRateHz / kFps);
encoded_image.SetTimestamp(encoded_image.Timestamp() +
(kRtpClockRateHz / kFps));
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
}
@ -1623,21 +1626,23 @@ TEST_F(SendStatisticsProxyTest, GetStatsReportsBandwidthLimitedResolution) {
encoded_image._encodedHeight = kHeight / 2;
for (int i = 0; i < kMinSamples; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += (kRtpClockRateHz / kFps);
encoded_image.SetTimestamp(encoded_image.Timestamp() +
(kRtpClockRateHz / kFps));
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
}
// First frame removed from EncodedFrameMap, stats updated.
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
++encoded_image._timeStamp;
encoded_image.SetTimestamp(encoded_image.Timestamp() + 1);
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
// Two streams encoded.
for (int i = 0; i < kMinSamples; ++i) {
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += (kRtpClockRateHz / kFps);
encoded_image.SetTimestamp(encoded_image.Timestamp() +
(kRtpClockRateHz / kFps));
encoded_image._encodedWidth = kWidth;
encoded_image._encodedHeight = kHeight;
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@ -1650,7 +1655,8 @@ TEST_F(SendStatisticsProxyTest, GetStatsReportsBandwidthLimitedResolution) {
// First frame with two streams removed, expect no resolution limit.
fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
encoded_image._timeStamp += (kRtpClockRateHz / kFps);
encoded_image.SetTimestamp(encoded_image.Timestamp() +
(kRtpClockRateHz / kFps));
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);

View File

@ -332,7 +332,7 @@ EncodedImageCallback::Result VideoReceiveStream::OnEncodedImage(
}
}
return Result(Result::OK, encoded_image._timeStamp);
return Result(Result::OK, encoded_image.Timestamp());
}
void VideoReceiveStream::SendNack(

View File

@ -529,7 +529,7 @@ EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
if (config_->post_encode_callback) {
config_->post_encode_callback->EncodedFrameCallback(EncodedFrame(
encoded_image._buffer, encoded_image._length, encoded_image._frameType,
simulcast_idx, encoded_image._timeStamp));
simulcast_idx, encoded_image.Timestamp()));
}
{
rtc::CritScope lock(&encoder_activity_crit_sect_);

View File

@ -3037,7 +3037,7 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) {
uint8_t buffer[16] = {0};
EncodedImage encoded(buffer, sizeof(buffer), sizeof(buffer));
encoded._timeStamp = input_image.timestamp();
encoded.SetTimestamp(input_image.timestamp());
encoded.capture_time_ms_ = input_image.render_time_ms();
for (size_t i = 0; i < kNumStreams; ++i) {

View File

@ -186,7 +186,7 @@ VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeNextFrame(
}
int64_t decode_start_time_ms = rtc::TimeMillis();
int64_t timestamp = frame->timestamp;
int64_t timestamp = frame->Timestamp();
int64_t render_time_us = frame->RenderTimeMs() * 1000;
bookkeeping_queue_.PostTask(
[this, decode_start_time_ms, timestamp, render_time_us]() {

View File

@ -887,7 +887,7 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
sink_->OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
int64_t time_sent_us = rtc::TimeMicros();
uint32_t timestamp = encoded_image._timeStamp;
uint32_t timestamp = encoded_image.Timestamp();
const int qp = encoded_image.qp_;
int64_t capture_time_us =
encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec;

View File

@ -656,7 +656,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
const RTPFragmentationHeader* fragmentation) override {
rtc::CritScope lock(&crit_);
EXPECT_TRUE(expect_frames_);
last_timestamp_ = encoded_image._timeStamp;
last_timestamp_ = encoded_image.Timestamp();
last_width_ = encoded_image._encodedWidth;
last_height_ = encoded_image._encodedHeight;
encoded_frame_event_.Set();