Implement timing frames.
Timing information is gathered in EncodedImage, starting at encoders. Then it's sent using RTP header extension. In the end, it's gathered at the GenericDecoder. Actual reporting and tests will be in the next CLs. BUG=webrtc:7594 Review-Url: https://codereview.webrtc.org/2911193002 Cr-Commit-Position: refs/heads/master@{#18659}
This commit is contained in:
parent
3b921f0856
commit
04f4d126f8
@ -173,6 +173,7 @@ rtc_source_set("video_frame_api") {
|
||||
"video/video_frame_buffer.cc",
|
||||
"video/video_frame_buffer.h",
|
||||
"video/video_rotation.h",
|
||||
"video/video_timing.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
|
||||
50
webrtc/api/video/video_timing.h
Normal file
50
webrtc/api/video/video_timing.h
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_API_VIDEO_VIDEO_TIMING_H_
|
||||
#define WEBRTC_API_VIDEO_VIDEO_TIMING_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <limits>
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/safe_conversions.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Video timing timstamps in ms counted from capture_time_ms of a frame.
|
||||
struct VideoTiming {
|
||||
static const uint8_t kEncodeStartDeltaIdx = 0;
|
||||
static const uint8_t kEncodeFinishDeltaIdx = 1;
|
||||
static const uint8_t kPacketizationFinishDeltaIdx = 2;
|
||||
static const uint8_t kPacerExitDeltaIdx = 3;
|
||||
static const uint8_t kNetworkTimestampDeltaIdx = 4;
|
||||
static const uint8_t kNetwork2TimestampDeltaIdx = 5;
|
||||
|
||||
// Returns |time_ms - base_ms| capped at max 16-bit value.
|
||||
// Used to fill this data structure as per
|
||||
// https://webrtc.org/experiments/rtp-hdrext/video-timing/ extension stores
|
||||
// 16-bit deltas of timestamps from packet capture time.
|
||||
static uint16_t GetDeltaCappedMs(int64_t base_ms, int64_t time_ms) {
|
||||
RTC_DCHECK_GE(time_ms, base_ms);
|
||||
return rtc::saturated_cast<uint16_t>(time_ms - base_ms);
|
||||
}
|
||||
|
||||
uint16_t encode_start_delta_ms;
|
||||
uint16_t encode_finish_delta_ms;
|
||||
uint16_t packetization_finish_delta_ms;
|
||||
uint16_t pacer_exit_delta_ms;
|
||||
uint16_t network_timstamp_delta_ms;
|
||||
uint16_t network2_timstamp_delta_ms;
|
||||
bool is_timing_frame;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_API_VIDEO_VIDEO_TIMING_H_
|
||||
@ -54,7 +54,8 @@ RTPHeaderExtension::RTPHeaderExtension()
|
||||
hasVideoRotation(false),
|
||||
videoRotation(kVideoRotation_0),
|
||||
hasVideoContentType(false),
|
||||
videoContentType(VideoContentType::UNSPECIFIED) {}
|
||||
videoContentType(VideoContentType::UNSPECIFIED),
|
||||
has_video_timing(false) {}
|
||||
|
||||
RTPHeader::RTPHeader()
|
||||
: markerBit(false),
|
||||
@ -86,6 +87,7 @@ VideoCodec::VideoCodec()
|
||||
spatialLayers(),
|
||||
mode(kRealtimeVideo),
|
||||
expect_encode_from_texture(false),
|
||||
timing_frame_thresholds({0, 0}),
|
||||
codec_specific_() {}
|
||||
|
||||
VideoCodecVP8* VideoCodec::VP8() {
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
|
||||
#include "webrtc/api/video/video_content_type.h"
|
||||
#include "webrtc/api/video/video_rotation.h"
|
||||
#include "webrtc/api/video/video_timing.h"
|
||||
#include "webrtc/base/array_view.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/optional.h"
|
||||
@ -588,6 +589,19 @@ class VideoCodec {
|
||||
VideoCodecMode mode;
|
||||
bool expect_encode_from_texture;
|
||||
|
||||
// Timing frames configuration. There is delay of delay_ms between two
|
||||
// consequent timing frames, excluding outliers. Frame is always made a
|
||||
// timing frame if it's at least outlier_ratio in percent of "ideal" average
|
||||
// frame given bitrate and framerate, i.e. if it's bigger than
|
||||
// |outlier_ratio / 100.0 * bitrate_bps / fps| in bits. This way, timing
|
||||
// frames will not be sent too often usually. Yet large frames will always
|
||||
// have timing information for debug purposes because they are more likely to
|
||||
// cause extra delays.
|
||||
struct TimingFrameTriggerThresholds {
|
||||
int64_t delay_ms;
|
||||
uint16_t outlier_ratio_percent;
|
||||
} timing_frame_thresholds;
|
||||
|
||||
bool operator==(const VideoCodec& other) const = delete;
|
||||
bool operator!=(const VideoCodec& other) const = delete;
|
||||
|
||||
@ -763,6 +777,9 @@ struct RTPHeaderExtension {
|
||||
bool hasVideoContentType;
|
||||
VideoContentType videoContentType;
|
||||
|
||||
bool has_video_timing;
|
||||
VideoTiming video_timing;
|
||||
|
||||
PlayoutDelay playout_delay = {-1, -1};
|
||||
|
||||
// For identification of a stream when ssrc is not signaled. See
|
||||
|
||||
@ -35,6 +35,12 @@ class EncodedImage {
|
||||
EncodedImage(uint8_t* buffer, size_t length, size_t size)
|
||||
: _buffer(buffer), _length(length), _size(size) {}
|
||||
|
||||
void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms) const {
|
||||
timing_.is_timing_frame = true;
|
||||
timing_.encode_start_ms = encode_start_ms;
|
||||
timing_.encode_finish_ms = encode_finish_ms;
|
||||
}
|
||||
|
||||
// TODO(kthelgason): get rid of this struct as it only has a single member
|
||||
// remaining.
|
||||
struct AdaptReason {
|
||||
@ -63,6 +69,19 @@ class EncodedImage {
|
||||
// indication that all future frames will be constrained with those limits
|
||||
// until the application indicates a change again.
|
||||
PlayoutDelay playout_delay_ = {-1, -1};
|
||||
|
||||
// Timing information should be updatable on const instances.
|
||||
mutable struct Timing {
|
||||
bool is_timing_frame = false;
|
||||
int64_t encode_start_ms = 0;
|
||||
int64_t encode_finish_ms = 0;
|
||||
int64_t packetization_finish_ms = 0;
|
||||
int64_t pacer_exit_ms = 0;
|
||||
int64_t network_timestamp_ms = 0;
|
||||
int64_t network2_timestamp_ms = 0;
|
||||
int64_t receive_start_ms = 0;
|
||||
int64_t receive_finish_ms = 0;
|
||||
} timing_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -76,6 +76,10 @@ const char* RtpExtension::kVideoContentTypeUri =
|
||||
"http://www.webrtc.org/experiments/rtp-hdrext/video-content-type";
|
||||
const int RtpExtension::kVideoContentTypeDefaultId = 7;
|
||||
|
||||
const char* RtpExtension::kVideoTimingUri =
|
||||
"http://www.webrtc.org/experiments/rtp-hdrext/video-timing";
|
||||
const int RtpExtension::kVideoTimingDefaultId = 8;
|
||||
|
||||
const int RtpExtension::kMinId = 1;
|
||||
const int RtpExtension::kMaxId = 14;
|
||||
|
||||
@ -90,7 +94,8 @@ bool RtpExtension::IsSupportedForVideo(const std::string& uri) {
|
||||
uri == webrtc::RtpExtension::kVideoRotationUri ||
|
||||
uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
|
||||
uri == webrtc::RtpExtension::kPlayoutDelayUri ||
|
||||
uri == webrtc::RtpExtension::kVideoContentTypeUri;
|
||||
uri == webrtc::RtpExtension::kVideoContentTypeUri ||
|
||||
uri == webrtc::RtpExtension::kVideoTimingUri;
|
||||
}
|
||||
|
||||
VideoStream::VideoStream()
|
||||
|
||||
@ -92,6 +92,10 @@ struct RtpExtension {
|
||||
static const char* kVideoContentTypeUri;
|
||||
static const int kVideoContentTypeDefaultId;
|
||||
|
||||
// Header extension for video timing.
|
||||
static const char* kVideoTimingUri;
|
||||
static const int kVideoTimingDefaultId;
|
||||
|
||||
// Header extension for transport sequence number, see url for details:
|
||||
// http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions
|
||||
static const char* kTransportSequenceNumberUri;
|
||||
|
||||
@ -458,6 +458,8 @@ RtpCapabilities WebRtcVideoEngine::GetCapabilities() const {
|
||||
webrtc::RtpExtension(webrtc::RtpExtension::kVideoContentTypeUri,
|
||||
webrtc::RtpExtension::kVideoContentTypeDefaultId));
|
||||
}
|
||||
// TODO(ilnik): Add kVideoTimingUri/kVideoTimingDefaultId to capabilities.
|
||||
// Possibly inside field trial.
|
||||
return capabilities;
|
||||
}
|
||||
|
||||
|
||||
@ -61,6 +61,8 @@ struct RTPVideoHeader {
|
||||
|
||||
VideoContentType content_type;
|
||||
|
||||
VideoTiming video_timing;
|
||||
|
||||
bool is_first_packet_in_frame;
|
||||
uint8_t simulcastIdx; // Index if the simulcast encoder creating
|
||||
// this frame, 0 if not using simulcast.
|
||||
|
||||
@ -77,6 +77,7 @@ enum RTPExtensionType {
|
||||
kRtpExtensionTransportSequenceNumber,
|
||||
kRtpExtensionPlayoutDelay,
|
||||
kRtpExtensionVideoContentType,
|
||||
kRtpExtensionVideoTiming,
|
||||
kRtpExtensionRtpStreamId,
|
||||
kRtpExtensionRepairedRtpStreamId,
|
||||
kRtpExtensionNumberOfExtensions // Must be the last entity in the enum.
|
||||
|
||||
@ -39,6 +39,7 @@ constexpr ExtensionInfo kExtensions[] = {
|
||||
CreateExtensionInfo<TransportSequenceNumber>(),
|
||||
CreateExtensionInfo<PlayoutDelayLimits>(),
|
||||
CreateExtensionInfo<VideoContentTypeExtension>(),
|
||||
CreateExtensionInfo<VideoTimingExtension>(),
|
||||
CreateExtensionInfo<RtpStreamId>(),
|
||||
CreateExtensionInfo<RepairedRtpStreamId>(),
|
||||
};
|
||||
|
||||
@ -245,6 +245,72 @@ bool VideoContentTypeExtension::Write(uint8_t* data,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Video Timing.
|
||||
// 6 timestamps in milliseconds counted from capture time stored in rtp header:
|
||||
// encode start/finish, packetization complete, pacer exit and reserved for
|
||||
// modification by the network modification.
|
||||
// 0 1 2 3
|
||||
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// | ID | len=11| encode start ms delta | encode finish |
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// | ms delta | packetizer finish ms delta | pacer exit |
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// | ms delta | network timestamp ms delta | network2 time-|
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// | stamp ms delta|
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
|
||||
constexpr RTPExtensionType VideoTimingExtension::kId;
|
||||
constexpr uint8_t VideoTimingExtension::kValueSizeBytes;
|
||||
constexpr const char* VideoTimingExtension::kUri;
|
||||
|
||||
bool VideoTimingExtension::Parse(rtc::ArrayView<const uint8_t> data,
|
||||
VideoTiming* timing) {
|
||||
RTC_DCHECK(timing);
|
||||
if (data.size() != kValueSizeBytes)
|
||||
return false;
|
||||
timing->encode_start_delta_ms =
|
||||
ByteReader<uint16_t>::ReadBigEndian(data.data());
|
||||
timing->encode_finish_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
|
||||
data.data() + 2 * VideoTiming::kEncodeFinishDeltaIdx);
|
||||
timing->packetization_finish_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
|
||||
data.data() + 2 * VideoTiming::kPacketizationFinishDeltaIdx);
|
||||
timing->pacer_exit_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
|
||||
data.data() + 2 * VideoTiming::kPacerExitDeltaIdx);
|
||||
timing->network_timstamp_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
|
||||
data.data() + 2 * VideoTiming::kNetworkTimestampDeltaIdx);
|
||||
timing->network2_timstamp_delta_ms = ByteReader<uint16_t>::ReadBigEndian(
|
||||
data.data() + 2 * VideoTiming::kNetwork2TimestampDeltaIdx);
|
||||
timing->is_timing_frame = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VideoTimingExtension::Write(uint8_t* data, const VideoTiming& timing) {
|
||||
ByteWriter<uint16_t>::WriteBigEndian(data, timing.encode_start_delta_ms);
|
||||
ByteWriter<uint16_t>::WriteBigEndian(
|
||||
data + 2 * VideoTiming::kEncodeFinishDeltaIdx,
|
||||
timing.encode_finish_delta_ms);
|
||||
ByteWriter<uint16_t>::WriteBigEndian(
|
||||
data + 2 * VideoTiming::kPacketizationFinishDeltaIdx,
|
||||
timing.packetization_finish_delta_ms);
|
||||
ByteWriter<uint16_t>::WriteBigEndian(
|
||||
data + 2 * VideoTiming::kPacerExitDeltaIdx, timing.pacer_exit_delta_ms);
|
||||
ByteWriter<uint16_t>::WriteBigEndian(
|
||||
data + 2 * VideoTiming::kNetworkTimestampDeltaIdx, 0); // reserved
|
||||
ByteWriter<uint16_t>::WriteBigEndian(
|
||||
data + 2 * VideoTiming::kNetwork2TimestampDeltaIdx, 0); // reserved
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VideoTimingExtension::Write(uint8_t* data,
|
||||
uint16_t time_delta_ms,
|
||||
uint8_t idx) {
|
||||
RTC_DCHECK_LT(idx, 6);
|
||||
ByteWriter<uint16_t>::WriteBigEndian(data + 2 * idx, time_delta_ms);
|
||||
return true;
|
||||
}
|
||||
|
||||
// RtpStreamId.
|
||||
constexpr RTPExtensionType RtpStreamId::kId;
|
||||
constexpr const char* RtpStreamId::kUri;
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
|
||||
#include "webrtc/api/video/video_content_type.h"
|
||||
#include "webrtc/api/video/video_rotation.h"
|
||||
#include "webrtc/api/video/video_timing.h"
|
||||
#include "webrtc/base/array_view.h"
|
||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
|
||||
@ -126,6 +127,24 @@ class VideoContentTypeExtension {
|
||||
static bool Write(uint8_t* data, VideoContentType content_type);
|
||||
};
|
||||
|
||||
class VideoTimingExtension {
|
||||
public:
|
||||
static constexpr RTPExtensionType kId = kRtpExtensionVideoTiming;
|
||||
static constexpr uint8_t kValueSizeBytes = 12;
|
||||
static constexpr const char* kUri =
|
||||
"http://www.webrtc.org/experiments/rtp-hdrext/video-timing";
|
||||
|
||||
static bool Parse(rtc::ArrayView<const uint8_t> data, VideoTiming* timing);
|
||||
static size_t ValueSize(const VideoTiming&) { return kValueSizeBytes; }
|
||||
static bool Write(uint8_t* data, const VideoTiming& timing);
|
||||
|
||||
static size_t ValueSize(uint16_t time_delta_ms, uint8_t idx) {
|
||||
return kValueSizeBytes;
|
||||
}
|
||||
// Writes only single time delta to position idx.
|
||||
static bool Write(uint8_t* data, uint16_t time_delta_ms, uint8_t idx);
|
||||
};
|
||||
|
||||
class RtpStreamId {
|
||||
public:
|
||||
static constexpr RTPExtensionType kId = kRtpExtensionRtpStreamId;
|
||||
|
||||
@ -174,6 +174,8 @@ void Packet::GetHeader(RTPHeader* header) const {
|
||||
header->extension.hasVideoContentType =
|
||||
GetExtension<VideoContentTypeExtension>(
|
||||
&header->extension.videoContentType);
|
||||
header->extension.has_video_timing =
|
||||
GetExtension<VideoTimingExtension>(&header->extension.video_timing);
|
||||
GetExtension<RtpStreamId>(&header->extension.stream_id);
|
||||
GetExtension<RepairedRtpStreamId>(&header->extension.repaired_stream_id);
|
||||
GetExtension<PlayoutDelayLimits>(&header->extension.playout_delay);
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_PACKET_TO_SEND_H_
|
||||
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_PACKET_TO_SEND_H_
|
||||
|
||||
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extensions.h"
|
||||
#include "webrtc/modules/rtp_rtcp/source/rtp_packet.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -23,10 +24,36 @@ class RtpPacketToSend : public rtp::Packet {
|
||||
: Packet(extensions, capacity) {}
|
||||
|
||||
RtpPacketToSend& operator=(const RtpPacketToSend& packet) = default;
|
||||
|
||||
// Time in local time base as close as it can to frame capture time.
|
||||
int64_t capture_time_ms() const { return capture_time_ms_; }
|
||||
|
||||
void set_capture_time_ms(int64_t time) { capture_time_ms_ = time; }
|
||||
|
||||
void set_packetization_finish_time_ms(int64_t time) {
|
||||
SetExtension<VideoTimingExtension>(
|
||||
VideoTiming::GetDeltaCappedMs(capture_time_ms_, time),
|
||||
VideoTiming::kPacketizationFinishDeltaIdx);
|
||||
}
|
||||
|
||||
void set_pacer_exit_time_ms(int64_t time) {
|
||||
SetExtension<VideoTimingExtension>(
|
||||
VideoTiming::GetDeltaCappedMs(capture_time_ms_, time),
|
||||
VideoTiming::kPacerExitDeltaIdx);
|
||||
}
|
||||
|
||||
void set_network_time_ms(int64_t time) {
|
||||
SetExtension<VideoTimingExtension>(
|
||||
VideoTiming::GetDeltaCappedMs(capture_time_ms_, time),
|
||||
VideoTiming::kNetworkTimestampDeltaIdx);
|
||||
}
|
||||
|
||||
void set_network2_time_ms(int64_t time) {
|
||||
SetExtension<VideoTimingExtension>(
|
||||
VideoTiming::GetDeltaCappedMs(capture_time_ms_, time),
|
||||
VideoTiming::kNetwork2TimestampDeltaIdx);
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t capture_time_ms_ = 0;
|
||||
};
|
||||
|
||||
@ -91,6 +91,7 @@ int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
|
||||
rtp_header->type = parsed_payload.type;
|
||||
rtp_header->type.Video.rotation = kVideoRotation_0;
|
||||
rtp_header->type.Video.content_type = VideoContentType::UNSPECIFIED;
|
||||
rtp_header->type.Video.video_timing.is_timing_frame = false;
|
||||
|
||||
// Retrieve the video rotation information.
|
||||
if (rtp_header->header.extension.hasVideoRotation) {
|
||||
@ -103,6 +104,12 @@ int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
|
||||
rtp_header->header.extension.videoContentType;
|
||||
}
|
||||
|
||||
if (rtp_header->header.extension.has_video_timing) {
|
||||
rtp_header->type.Video.video_timing =
|
||||
rtp_header->header.extension.video_timing;
|
||||
rtp_header->type.Video.video_timing.is_timing_frame = true;
|
||||
}
|
||||
|
||||
rtp_header->type.Video.playout_delay =
|
||||
rtp_header->header.extension.playout_delay;
|
||||
|
||||
|
||||
@ -42,6 +42,8 @@ RTPExtensionType StringToRtpExtensionType(const std::string& extension) {
|
||||
return kRtpExtensionPlayoutDelay;
|
||||
if (extension == RtpExtension::kVideoContentTypeUri)
|
||||
return kRtpExtensionVideoContentType;
|
||||
if (extension == RtpExtension::kVideoTimingUri)
|
||||
return kRtpExtensionVideoTiming;
|
||||
RTC_NOTREACHED() << "Looking up unsupported RTP extension.";
|
||||
return kRtpExtensionNone;
|
||||
}
|
||||
|
||||
@ -214,6 +214,7 @@ class RtpRtcpImplTest : public ::testing::Test {
|
||||
rtp_video_header.simulcastIdx = 0;
|
||||
rtp_video_header.codec = kRtpVideoVp8;
|
||||
rtp_video_header.codecHeader = {vp8_header};
|
||||
rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
|
||||
|
||||
const uint8_t payload[100] = {0};
|
||||
EXPECT_EQ(true, module->impl_->SendOutgoingData(
|
||||
|
||||
@ -743,6 +743,9 @@ bool RTPSender::PrepareAndSendPacket(std::unique_ptr<RtpPacketToSend> packet,
|
||||
packet_to_send->SetExtension<AbsoluteSendTime>(
|
||||
AbsoluteSendTime::MsTo24Bits(now_ms));
|
||||
|
||||
if (packet_to_send->HasExtension<VideoTimingExtension>())
|
||||
packet_to_send->set_pacer_exit_time_ms(now_ms);
|
||||
|
||||
PacketOptions options;
|
||||
if (UpdateTransportSequenceNumber(packet_to_send, &options.packet_id)) {
|
||||
AddPacketToTransportFeedback(options.packet_id, *packet_to_send,
|
||||
@ -830,6 +833,8 @@ bool RTPSender::SendToNetwork(std::unique_ptr<RtpPacketToSend> packet,
|
||||
if (packet->capture_time_ms() > 0) {
|
||||
packet->SetExtension<TransmissionOffset>(
|
||||
kTimestampTicksPerMs * (now_ms - packet->capture_time_ms()));
|
||||
if (packet->HasExtension<VideoTimingExtension>())
|
||||
packet->set_pacer_exit_time_ms(now_ms);
|
||||
}
|
||||
packet->SetExtension<AbsoluteSendTime>(AbsoluteSendTime::MsTo24Bits(now_ms));
|
||||
|
||||
|
||||
@ -38,6 +38,7 @@ namespace {
|
||||
const int kTransmissionTimeOffsetExtensionId = 1;
|
||||
const int kAbsoluteSendTimeExtensionId = 14;
|
||||
const int kTransportSequenceNumberExtensionId = 13;
|
||||
const int kVideoTimingExtensionId = 12;
|
||||
const int kPayload = 100;
|
||||
const int kRtxPayload = 98;
|
||||
const uint32_t kTimestamp = 10;
|
||||
@ -74,6 +75,8 @@ class LoopbackTransportTest : public webrtc::Transport {
|
||||
kVideoRotationExtensionId);
|
||||
receivers_extensions_.Register(kRtpExtensionAudioLevel,
|
||||
kAudioLevelExtensionId);
|
||||
receivers_extensions_.Register(kRtpExtensionVideoTiming,
|
||||
kVideoTimingExtensionId);
|
||||
}
|
||||
|
||||
bool SendRtp(const uint8_t* data,
|
||||
@ -460,6 +463,51 @@ TEST_P(RtpSenderTest, SendsPacketsWithTransportSequenceNumber) {
|
||||
EXPECT_EQ(transport_.last_packet_id_, transport_seq_no);
|
||||
}
|
||||
|
||||
TEST_P(RtpSenderTestWithoutPacer, WritesTimestampToTimingExtension) {
|
||||
rtp_sender_->SetStorePacketsStatus(true, 10);
|
||||
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
|
||||
kRtpExtensionVideoTiming, kVideoTimingExtensionId));
|
||||
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
|
||||
auto packet = rtp_sender_->AllocatePacket();
|
||||
packet->SetPayloadType(kPayload);
|
||||
packet->SetMarker(true);
|
||||
packet->SetTimestamp(kTimestamp);
|
||||
packet->set_capture_time_ms(capture_time_ms);
|
||||
const VideoTiming kVideoTiming = {0u, 0u, 0u, 0u, 0u, 0u, true};
|
||||
packet->SetExtension<VideoTimingExtension>(kVideoTiming);
|
||||
EXPECT_TRUE(rtp_sender_->AssignSequenceNumber(packet.get()));
|
||||
size_t packet_size = packet->size();
|
||||
webrtc::RTPHeader rtp_header;
|
||||
|
||||
packet->GetHeader(&rtp_header);
|
||||
|
||||
const int kStoredTimeInMs = 100;
|
||||
fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs);
|
||||
|
||||
EXPECT_TRUE(rtp_sender_->SendToNetwork(std::move(packet),
|
||||
kAllowRetransmission,
|
||||
RtpPacketSender::kNormalPriority));
|
||||
EXPECT_EQ(1, transport_.packets_sent());
|
||||
EXPECT_EQ(packet_size, transport_.last_sent_packet().size());
|
||||
|
||||
transport_.last_sent_packet().GetHeader(&rtp_header);
|
||||
EXPECT_TRUE(rtp_header.extension.has_video_timing);
|
||||
EXPECT_EQ(kStoredTimeInMs,
|
||||
rtp_header.extension.video_timing.pacer_exit_delta_ms);
|
||||
|
||||
fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs);
|
||||
rtp_sender_->TimeToSendPacket(kSsrc, kSeqNum, capture_time_ms, false,
|
||||
PacedPacketInfo());
|
||||
|
||||
EXPECT_EQ(2, transport_.packets_sent());
|
||||
EXPECT_EQ(packet_size, transport_.last_sent_packet().size());
|
||||
|
||||
transport_.last_sent_packet().GetHeader(&rtp_header);
|
||||
EXPECT_TRUE(rtp_header.extension.has_video_timing);
|
||||
EXPECT_EQ(kStoredTimeInMs * 2,
|
||||
rtp_header.extension.video_timing.pacer_exit_delta_ms);
|
||||
}
|
||||
|
||||
TEST_P(RtpSenderTest, TrafficSmoothingWithExtensions) {
|
||||
EXPECT_CALL(mock_paced_sender_, InsertPacket(RtpPacketSender::kNormalPriority,
|
||||
kSsrc, kSeqNum, _, _, _));
|
||||
@ -1410,6 +1458,33 @@ TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) {
|
||||
EXPECT_EQ(kVideoRotation_0, rotation);
|
||||
}
|
||||
|
||||
TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) {
|
||||
uint8_t kFrame[kMaxPacketLength];
|
||||
const int64_t kPacketizationTimeMs = 100;
|
||||
const int64_t kEncodeStartDeltaMs = 10;
|
||||
const int64_t kEncodeFinishDeltaMs = 50;
|
||||
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
|
||||
kRtpExtensionVideoTiming, kVideoTimingExtensionId));
|
||||
|
||||
const int64_t kCaptureTimestamp = fake_clock_.TimeInMilliseconds();
|
||||
|
||||
RTPVideoHeader hdr = {0};
|
||||
hdr.video_timing.is_timing_frame = true;
|
||||
hdr.video_timing.encode_start_delta_ms = kEncodeStartDeltaMs;
|
||||
hdr.video_timing.encode_finish_delta_ms = kEncodeFinishDeltaMs;
|
||||
|
||||
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
|
||||
rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload,
|
||||
kTimestamp, kCaptureTimestamp, kFrame,
|
||||
sizeof(kFrame), nullptr, &hdr);
|
||||
VideoTiming timing;
|
||||
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
|
||||
&timing));
|
||||
EXPECT_EQ(kPacketizationTimeMs, timing.packetization_finish_delta_ms);
|
||||
EXPECT_EQ(kEncodeStartDeltaMs, timing.encode_start_delta_ms);
|
||||
EXPECT_EQ(kEncodeFinishDeltaMs, timing.encode_finish_delta_ms);
|
||||
}
|
||||
|
||||
TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) {
|
||||
uint8_t kFrame[kMaxPacketLength];
|
||||
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
|
||||
|
||||
@ -304,6 +304,7 @@ bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
|
||||
auto last_packet = rtc::MakeUnique<RtpPacketToSend>(*rtp_header);
|
||||
|
||||
size_t fec_packet_overhead;
|
||||
bool is_timing_frame = false;
|
||||
bool red_enabled;
|
||||
int32_t retransmission_settings;
|
||||
{
|
||||
@ -332,6 +333,11 @@ bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
|
||||
last_packet->SetExtension<VideoContentTypeExtension>(
|
||||
video_header->content_type);
|
||||
}
|
||||
if (video_header->video_timing.is_timing_frame) {
|
||||
last_packet->SetExtension<VideoTimingExtension>(
|
||||
video_header->video_timing);
|
||||
is_timing_frame = true;
|
||||
}
|
||||
}
|
||||
|
||||
// FEC settings.
|
||||
@ -388,6 +394,11 @@ bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
|
||||
if (!rtp_sender_->AssignSequenceNumber(packet.get()))
|
||||
return false;
|
||||
|
||||
// Put packetization finish timestamp into extension.
|
||||
if (last && is_timing_frame) {
|
||||
packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds());
|
||||
}
|
||||
|
||||
const bool protect_packet =
|
||||
(packetizer->GetProtectionType() == kProtectedPacket);
|
||||
if (flexfec_enabled()) {
|
||||
|
||||
@ -10,8 +10,6 @@
|
||||
|
||||
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "webrtc/base/logging.h"
|
||||
#include "webrtc/modules/rtp_rtcp/include/rtp_cvo.h"
|
||||
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
|
||||
@ -253,6 +251,9 @@ bool RtpHeaderParser::Parse(RTPHeader* header,
|
||||
header->extension.hasVideoContentType = false;
|
||||
header->extension.videoContentType = VideoContentType::UNSPECIFIED;
|
||||
|
||||
header->extension.has_video_timing = false;
|
||||
header->extension.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
|
||||
|
||||
if (X) {
|
||||
/* RTP header extension, RFC 3550.
|
||||
0 1 2 3
|
||||
@ -464,6 +465,16 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kRtpExtensionVideoTiming: {
|
||||
if (len != VideoTimingExtension::kValueSizeBytes - 1) {
|
||||
LOG(LS_WARNING) << "Incorrect video timing len: " << len;
|
||||
return;
|
||||
}
|
||||
header->extension.has_video_timing = true;
|
||||
VideoTimingExtension::Parse(rtc::MakeArrayView(ptr, len + 1),
|
||||
&header->extension.video_timing);
|
||||
break;
|
||||
}
|
||||
case kRtpExtensionRtpStreamId: {
|
||||
header->extension.stream_id.Set(rtc::MakeArrayView(ptr, len + 1));
|
||||
break;
|
||||
|
||||
@ -513,6 +513,7 @@ if (rtc_include_tests) {
|
||||
"codecs/vp8/simulcast_unittest.h",
|
||||
"decoding_state_unittest.cc",
|
||||
"frame_buffer2_unittest.cc",
|
||||
"generic_encoder_unittest.cc",
|
||||
"h264_sprop_parameter_sets_unittest.cc",
|
||||
"h264_sps_pps_tracker_unittest.cc",
|
||||
"histogram_unittest.cc",
|
||||
|
||||
@ -127,6 +127,9 @@ void VCMCodecDataBase::Codec(VideoCodecType codec_type, VideoCodec* settings) {
|
||||
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
|
||||
settings->numberOfSimulcastStreams = 0;
|
||||
settings->qpMax = 56;
|
||||
settings->timing_frame_thresholds = {
|
||||
kDefaultTimingFramesDelayMs, kDefaultOutlierFrameSizePercent,
|
||||
};
|
||||
*(settings->VP8()) = VideoEncoder::GetDefaultVp8Settings();
|
||||
return;
|
||||
case kVideoCodecVP9:
|
||||
@ -142,6 +145,9 @@ void VCMCodecDataBase::Codec(VideoCodecType codec_type, VideoCodec* settings) {
|
||||
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
|
||||
settings->numberOfSimulcastStreams = 0;
|
||||
settings->qpMax = 56;
|
||||
settings->timing_frame_thresholds = {
|
||||
kDefaultTimingFramesDelayMs, kDefaultOutlierFrameSizePercent,
|
||||
};
|
||||
*(settings->VP9()) = VideoEncoder::GetDefaultVp9Settings();
|
||||
return;
|
||||
case kVideoCodecH264:
|
||||
@ -157,6 +163,9 @@ void VCMCodecDataBase::Codec(VideoCodecType codec_type, VideoCodec* settings) {
|
||||
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
|
||||
settings->numberOfSimulcastStreams = 0;
|
||||
settings->qpMax = 56;
|
||||
settings->timing_frame_thresholds = {
|
||||
kDefaultTimingFramesDelayMs, kDefaultOutlierFrameSizePercent,
|
||||
};
|
||||
*(settings->H264()) = VideoEncoder::GetDefaultH264Settings();
|
||||
return;
|
||||
case kVideoCodecI420:
|
||||
|
||||
@ -21,8 +21,8 @@
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/logging.h"
|
||||
#include "webrtc/base/timeutils.h"
|
||||
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "webrtc/media/base/mediaconstants.h"
|
||||
#include "webrtc/system_wrappers/include/metrics.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -370,6 +370,7 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
||||
encoded_image_.content_type_ = (mode_ == kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_image_.timing_.is_timing_frame = false;
|
||||
encoded_image_._frameType = ConvertToVideoFrameType(info.eFrameType);
|
||||
|
||||
// Split encoded image up into fragments. This also updates |encoded_image_|.
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
|
||||
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
|
||||
#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
|
||||
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
|
||||
#include "webrtc/test/gtest.h"
|
||||
|
||||
using ::testing::_;
|
||||
@ -206,6 +207,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
settings->height = kDefaultHeight;
|
||||
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
|
||||
ASSERT_EQ(3, kNumberOfSimulcastStreams);
|
||||
settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
|
||||
kDefaultOutlierFrameSizePercent};
|
||||
ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
|
||||
kMinBitrates[0], kTargetBitrates[0],
|
||||
&settings->simulcastStream[0], temporal_layer_profile[0]);
|
||||
|
||||
@ -878,6 +878,7 @@ int VP8EncoderImpl::GetEncodedPartitions(
|
||||
encoded_images_[encoder_idx].content_type_ =
|
||||
(codec_.mode == kScreensharing) ? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_images_[encoder_idx].timing_.is_timing_frame = false;
|
||||
|
||||
int qp = -1;
|
||||
vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
|
||||
|
||||
@ -29,7 +29,6 @@
|
||||
#include "webrtc/base/trace_event.h"
|
||||
#include "webrtc/common_video/include/video_frame_buffer.h"
|
||||
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "webrtc/modules/include/module_common_types.h"
|
||||
#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -710,9 +709,11 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_image_._encodedHeight = raw_->d_h;
|
||||
encoded_image_._encodedWidth = raw_->d_w;
|
||||
encoded_image_.timing_.is_timing_frame = false;
|
||||
int qp = -1;
|
||||
vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
|
||||
encoded_image_.qp_ = qp;
|
||||
|
||||
encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific,
|
||||
&frag_info);
|
||||
}
|
||||
|
||||
@ -88,6 +88,7 @@ void VCMEncodedFrame::Reset() {
|
||||
_codec = kVideoCodecUnknown;
|
||||
rotation_ = kVideoRotation_0;
|
||||
content_type_ = VideoContentType::UNSPECIFIED;
|
||||
timing_.is_timing_frame = false;
|
||||
_rotation_set = false;
|
||||
}
|
||||
|
||||
|
||||
@ -85,6 +85,10 @@ class VCMEncodedFrame : protected EncodedImage {
|
||||
* Get video content type
|
||||
*/
|
||||
VideoContentType contentType() const { return content_type_; }
|
||||
/**
|
||||
* Get video timing
|
||||
*/
|
||||
EncodedImage::Timing video_timing() const { return timing_; }
|
||||
/**
|
||||
* True if this frame is complete, false otherwise
|
||||
*/
|
||||
|
||||
@ -164,6 +164,27 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
||||
rotation_ = packet.video_header.rotation;
|
||||
_rotation_set = true;
|
||||
content_type_ = packet.video_header.content_type;
|
||||
if (packet.video_header.video_timing.is_timing_frame) {
|
||||
timing_.is_timing_frame = true;
|
||||
timing_.encode_start_ms =
|
||||
ntp_time_ms_ + packet.video_header.video_timing.encode_start_delta_ms;
|
||||
timing_.encode_finish_ms =
|
||||
ntp_time_ms_ +
|
||||
packet.video_header.video_timing.encode_finish_delta_ms;
|
||||
timing_.packetization_finish_ms =
|
||||
ntp_time_ms_ +
|
||||
packet.video_header.video_timing.packetization_finish_delta_ms;
|
||||
timing_.pacer_exit_ms =
|
||||
ntp_time_ms_ + packet.video_header.video_timing.pacer_exit_delta_ms;
|
||||
timing_.network_timestamp_ms =
|
||||
ntp_time_ms_ +
|
||||
packet.video_header.video_timing.network_timstamp_delta_ms;
|
||||
timing_.network2_timestamp_ms =
|
||||
ntp_time_ms_ +
|
||||
packet.video_header.video_timing.network2_timstamp_delta_ms;
|
||||
} else {
|
||||
timing_.is_timing_frame = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (packet.is_first_packet_in_frame) {
|
||||
|
||||
@ -111,6 +111,34 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
|
||||
rotation_ = last_packet->video_header.rotation;
|
||||
_rotation_set = true;
|
||||
content_type_ = last_packet->video_header.content_type;
|
||||
if (last_packet->video_header.video_timing.is_timing_frame) {
|
||||
// ntp_time_ms_ may be -1 if not estimated yet. This is not a problem,
|
||||
// as this will be dealt with at the time of reporting.
|
||||
timing_.is_timing_frame = true;
|
||||
timing_.encode_start_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.encode_start_delta_ms;
|
||||
timing_.encode_finish_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.encode_finish_delta_ms;
|
||||
timing_.packetization_finish_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.packetization_finish_delta_ms;
|
||||
timing_.pacer_exit_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.pacer_exit_delta_ms;
|
||||
timing_.network_timestamp_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.network_timstamp_delta_ms;
|
||||
timing_.network2_timestamp_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.network2_timstamp_delta_ms;
|
||||
|
||||
timing_.receive_start_ms = first_packet->receive_time_ms;
|
||||
timing_.receive_finish_ms = last_packet->receive_time_ms;
|
||||
} else {
|
||||
timing_.is_timing_frame = false;
|
||||
}
|
||||
}
|
||||
|
||||
RtpFrameObject::~RtpFrameObject() {
|
||||
|
||||
@ -24,7 +24,10 @@ VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming* timing,
|
||||
: _clock(clock),
|
||||
_timing(timing),
|
||||
_timestampMap(kDecoderFrameMemoryLength),
|
||||
_lastReceivedPictureID(0) {}
|
||||
_lastReceivedPictureID(0) {
|
||||
ntp_offset_ =
|
||||
_clock->CurrentNtpInMilliseconds() - _clock->TimeInMilliseconds();
|
||||
}
|
||||
|
||||
VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {
|
||||
}
|
||||
@ -85,6 +88,30 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
||||
_timing->StopDecodeTimer(decodedImage.timestamp(), *decode_time_ms, now_ms,
|
||||
frameInfo->renderTimeMs);
|
||||
|
||||
// Report timing information.
|
||||
if (frameInfo->timing.is_timing_frame) {
|
||||
// Convert remote timestamps to local time from ntp timestamps.
|
||||
frameInfo->timing.encode_start_ms -= ntp_offset_;
|
||||
frameInfo->timing.encode_finish_ms -= ntp_offset_;
|
||||
frameInfo->timing.packetization_finish_ms -= ntp_offset_;
|
||||
frameInfo->timing.pacer_exit_ms -= ntp_offset_;
|
||||
frameInfo->timing.network_timestamp_ms -= ntp_offset_;
|
||||
frameInfo->timing.network2_timestamp_ms -= ntp_offset_;
|
||||
// TODO(ilnik): Report timing information here.
|
||||
// Capture time: decodedImage.ntp_time_ms() - ntp_offset
|
||||
// Encode start: frameInfo->timing.encode_start_ms
|
||||
// Encode finish: frameInfo->timing.encode_finish_ms
|
||||
// Packetization done: frameInfo->timing.packetization_finish_ms
|
||||
// Pacer exit: frameInfo->timing.pacer_exit_ms
|
||||
// Network timestamp: frameInfo->timing.network_timestamp_ms
|
||||
// Network2 timestamp: frameInfo->timing.network2_timestamp_ms
|
||||
// Receive start: frameInfo->timing.receive_start_ms
|
||||
// Receive finish: frameInfo->timing.receive_finish_ms
|
||||
// Decode start: frameInfo->decodeStartTimeMs
|
||||
// Decode finish: now_ms
|
||||
// Render time: frameInfo->renderTimeMs
|
||||
}
|
||||
|
||||
decodedImage.set_timestamp_us(
|
||||
frameInfo->renderTimeMs * rtc::kNumMicrosecsPerMillisec);
|
||||
decodedImage.set_rotation(frameInfo->rotation);
|
||||
@ -151,6 +178,7 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
||||
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
|
||||
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
||||
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
|
||||
_frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
|
||||
// Set correctly only for key frames. Thus, use latest key frame
|
||||
// content type. If the corresponding key frame was lost, decode will fail
|
||||
// and content type will be ignored.
|
||||
|
||||
@ -31,6 +31,7 @@ struct VCMFrameInformation {
|
||||
void* userData;
|
||||
VideoRotation rotation;
|
||||
VideoContentType content_type;
|
||||
EncodedImage::Timing timing;
|
||||
};
|
||||
|
||||
class VCMDecodedFrameCallback : public DecodedImageCallback {
|
||||
@ -68,6 +69,7 @@ class VCMDecodedFrameCallback : public DecodedImageCallback {
|
||||
rtc::CriticalSection lock_;
|
||||
VCMTimestampMap _timestampMap GUARDED_BY(lock_);
|
||||
uint64_t _lastReceivedPictureID;
|
||||
int64_t ntp_offset_;
|
||||
};
|
||||
|
||||
class VCMGenericDecoder {
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#include "webrtc/api/video/i420_buffer.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/logging.h"
|
||||
#include "webrtc/base/timeutils.h"
|
||||
#include "webrtc/base/trace_event.h"
|
||||
#include "webrtc/modules/video_coding/encoded_frame.h"
|
||||
#include "webrtc/modules/video_coding/media_optimization.h"
|
||||
@ -29,7 +30,8 @@ VCMGenericEncoder::VCMGenericEncoder(
|
||||
vcm_encoded_frame_callback_(encoded_frame_callback),
|
||||
internal_source_(internal_source),
|
||||
encoder_params_({BitrateAllocation(), 0, 0, 0}),
|
||||
is_screenshare_(false) {}
|
||||
is_screenshare_(false),
|
||||
streams_or_svc_num_(0) {}
|
||||
|
||||
VCMGenericEncoder::~VCMGenericEncoder() {}
|
||||
|
||||
@ -45,6 +47,17 @@ int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
|
||||
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
||||
TRACE_EVENT0("webrtc", "VCMGenericEncoder::InitEncode");
|
||||
is_screenshare_ = settings->mode == VideoCodecMode::kScreensharing;
|
||||
streams_or_svc_num_ = settings->numberOfSimulcastStreams;
|
||||
if (settings->codecType == kVideoCodecVP9) {
|
||||
streams_or_svc_num_ = settings->VP9().numberOfSpatialLayers;
|
||||
}
|
||||
if (streams_or_svc_num_ == 0)
|
||||
streams_or_svc_num_ = 1;
|
||||
|
||||
vcm_encoded_frame_callback_->SetTimingFramesThresholds(
|
||||
settings->timing_frame_thresholds);
|
||||
vcm_encoded_frame_callback_->OnFrameRateChanged(settings->maxFramerate);
|
||||
|
||||
if (encoder_->InitEncode(settings, number_of_cores, max_payload_size) != 0) {
|
||||
LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
|
||||
"payload name: "
|
||||
@ -65,6 +78,8 @@ int32_t VCMGenericEncoder::Encode(const VideoFrame& frame,
|
||||
for (FrameType frame_type : frame_types)
|
||||
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
|
||||
|
||||
for (size_t i = 0; i < streams_or_svc_num_; ++i)
|
||||
vcm_encoded_frame_callback_->OnEncodeStarted(frame.render_time_ms(), i);
|
||||
int32_t result = encoder_->Encode(frame, codec_specific, &frame_types);
|
||||
|
||||
if (is_screenshare_ &&
|
||||
@ -107,6 +122,17 @@ void VCMGenericEncoder::SetEncoderParameters(const EncoderParameters& params) {
|
||||
<< ", framerate = " << params.input_frame_rate
|
||||
<< "): " << res;
|
||||
}
|
||||
vcm_encoded_frame_callback_->OnFrameRateChanged(params.input_frame_rate);
|
||||
for (size_t i = 0; i < streams_or_svc_num_; ++i) {
|
||||
size_t layer_bitrate_bytes_per_sec =
|
||||
params.target_bitrate.GetSpatialLayerSum(i) / 8;
|
||||
// VP9 rate control is not yet moved out of VP9Impl. Due to that rates
|
||||
// are not split among spatial layers.
|
||||
if (layer_bitrate_bytes_per_sec == 0)
|
||||
layer_bitrate_bytes_per_sec = params.target_bitrate.get_sum_bps() / 8;
|
||||
vcm_encoded_frame_callback_->OnTargetBitrateChanged(
|
||||
layer_bitrate_bytes_per_sec, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,6 +150,8 @@ int32_t VCMGenericEncoder::RequestFrame(
|
||||
const std::vector<FrameType>& frame_types) {
|
||||
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
||||
|
||||
for (size_t i = 0; i < streams_or_svc_num_; ++i)
|
||||
vcm_encoded_frame_callback_->OnEncodeStarted(0, i);
|
||||
// TODO(nisse): Used only with internal source. Delete as soon as
|
||||
// that feature is removed. The only implementation I've been able
|
||||
// to find ignores what's in the frame. With one exception: It seems
|
||||
@ -151,16 +179,102 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
|
||||
media_optimization::MediaOptimization* media_opt)
|
||||
: internal_source_(false),
|
||||
post_encode_callback_(post_encode_callback),
|
||||
media_opt_(media_opt) {}
|
||||
media_opt_(media_opt),
|
||||
framerate_(1),
|
||||
last_timing_frame_time_ms_(-1),
|
||||
timing_frames_thresholds_({-1, 0}) {}
|
||||
|
||||
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {}
|
||||
|
||||
void VCMEncodedFrameCallback::OnTargetBitrateChanged(
|
||||
size_t bitrate_bytes_per_second,
|
||||
size_t simulcast_svc_idx) {
|
||||
rtc::CritScope crit(&timing_params_lock_);
|
||||
if (timing_frames_info_.size() < simulcast_svc_idx + 1)
|
||||
timing_frames_info_.resize(simulcast_svc_idx + 1);
|
||||
timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec =
|
||||
bitrate_bytes_per_second;
|
||||
}
|
||||
|
||||
void VCMEncodedFrameCallback::OnFrameRateChanged(size_t framerate) {
|
||||
rtc::CritScope crit(&timing_params_lock_);
|
||||
framerate_ = framerate;
|
||||
}
|
||||
|
||||
void VCMEncodedFrameCallback::OnEncodeStarted(int64_t capture_time_ms,
|
||||
size_t simulcast_svc_idx) {
|
||||
rtc::CritScope crit(&timing_params_lock_);
|
||||
if (timing_frames_info_.size() < simulcast_svc_idx + 1)
|
||||
timing_frames_info_.resize(simulcast_svc_idx + 1);
|
||||
timing_frames_info_[simulcast_svc_idx].encode_start_time_ms[capture_time_ms] =
|
||||
rtc::TimeMillis();
|
||||
}
|
||||
|
||||
EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific,
|
||||
const RTPFragmentationHeader* fragmentation_header) {
|
||||
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
|
||||
"timestamp", encoded_image._timeStamp);
|
||||
bool is_timing_frame = false;
|
||||
size_t outlier_frame_size = 0;
|
||||
int64_t encode_start_ms = -1;
|
||||
size_t simulcast_svc_idx = 0;
|
||||
if (codec_specific->codecType == kVideoCodecVP9) {
|
||||
if (codec_specific->codecSpecific.VP9.num_spatial_layers > 1)
|
||||
simulcast_svc_idx = codec_specific->codecSpecific.VP9.spatial_idx;
|
||||
} else if (codec_specific->codecType == kVideoCodecVP8) {
|
||||
simulcast_svc_idx = codec_specific->codecSpecific.VP8.simulcastIdx;
|
||||
} else if (codec_specific->codecType == kVideoCodecGeneric) {
|
||||
simulcast_svc_idx = codec_specific->codecSpecific.generic.simulcast_idx;
|
||||
} else if (codec_specific->codecType == kVideoCodecH264) {
|
||||
// TODO(ilnik): When h264 simulcast is landed, extract simulcast idx here.
|
||||
}
|
||||
|
||||
{
|
||||
rtc::CritScope crit(&timing_params_lock_);
|
||||
RTC_CHECK_LT(simulcast_svc_idx, timing_frames_info_.size());
|
||||
|
||||
auto encode_start_map =
|
||||
&timing_frames_info_[simulcast_svc_idx].encode_start_time_ms;
|
||||
auto it = encode_start_map->find(encoded_image.capture_time_ms_);
|
||||
if (it != encode_start_map->end()) {
|
||||
encode_start_ms = it->second;
|
||||
// Assuming all encoders do not reorder frames within single stream,
|
||||
// there may be some dropped frames with smaller timestamps. These should
|
||||
// be purged.
|
||||
encode_start_map->erase(encode_start_map->begin(), it);
|
||||
encode_start_map->erase(it);
|
||||
} else {
|
||||
// Some chromium remoting unittests use generic encoder incorrectly
|
||||
// If timestamps do not match, purge them all.
|
||||
encode_start_map->erase(encode_start_map->begin(),
|
||||
encode_start_map->end());
|
||||
}
|
||||
|
||||
int64_t timing_frame_delay_ms =
|
||||
encoded_image.capture_time_ms_ - last_timing_frame_time_ms_;
|
||||
if (last_timing_frame_time_ms_ == -1 ||
|
||||
timing_frame_delay_ms >= timing_frames_thresholds_.delay_ms ||
|
||||
timing_frame_delay_ms == 0) {
|
||||
is_timing_frame = true;
|
||||
last_timing_frame_time_ms_ = encoded_image.capture_time_ms_;
|
||||
}
|
||||
RTC_CHECK_GT(framerate_, 0);
|
||||
size_t average_frame_size =
|
||||
timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec /
|
||||
framerate_;
|
||||
outlier_frame_size = average_frame_size *
|
||||
timing_frames_thresholds_.outlier_ratio_percent / 100;
|
||||
}
|
||||
|
||||
if (encoded_image._length >= outlier_frame_size) {
|
||||
is_timing_frame = true;
|
||||
}
|
||||
if (encode_start_ms >= 0 && is_timing_frame) {
|
||||
encoded_image.SetEncodeTime(encode_start_ms, rtc::TimeMillis());
|
||||
}
|
||||
|
||||
Result result = post_encode_callback_->OnEncodedImage(
|
||||
encoded_image, codec_specific, fragmentation_header);
|
||||
if (result.error != Result::OK)
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
|
||||
|
||||
#include <stdio.h>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
|
||||
@ -44,14 +45,43 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) override;
|
||||
|
||||
void SetInternalSource(bool internal_source) {
|
||||
internal_source_ = internal_source;
|
||||
}
|
||||
|
||||
// Timing frames configuration methods. These 4 should be called before
|
||||
// |OnEncodedImage| at least once.
|
||||
void OnTargetBitrateChanged(size_t bitrate_bytes_per_sec,
|
||||
size_t simulcast_svc_idx);
|
||||
|
||||
void OnFrameRateChanged(size_t framerate);
|
||||
|
||||
void OnEncodeStarted(int64_t capture_time_ms, size_t simulcast_svc_idx);
|
||||
|
||||
void SetTimingFramesThresholds(
|
||||
const VideoCodec::TimingFrameTriggerThresholds& thresholds) {
|
||||
rtc::CritScope crit(&timing_params_lock_);
|
||||
timing_frames_thresholds_ = thresholds;
|
||||
}
|
||||
|
||||
private:
|
||||
rtc::CriticalSection timing_params_lock_;
|
||||
bool internal_source_;
|
||||
EncodedImageCallback* const post_encode_callback_;
|
||||
media_optimization::MediaOptimization* const media_opt_;
|
||||
|
||||
struct TimingFramesLayerInfo {
|
||||
size_t target_bitrate_bytes_per_sec = 0;
|
||||
std::map<int64_t, int64_t> encode_start_time_ms;
|
||||
};
|
||||
// Separate instance for each simulcast stream or spatial layer.
|
||||
std::vector<TimingFramesLayerInfo> timing_frames_info_
|
||||
GUARDED_BY(timing_params_lock_);
|
||||
size_t framerate_ GUARDED_BY(timing_params_lock_);
|
||||
int64_t last_timing_frame_time_ms_ GUARDED_BY(timing_params_lock_);
|
||||
VideoCodec::TimingFrameTriggerThresholds timing_frames_thresholds_
|
||||
GUARDED_BY(timing_params_lock_);
|
||||
};
|
||||
|
||||
class VCMGenericEncoder {
|
||||
@ -88,6 +118,7 @@ class VCMGenericEncoder {
|
||||
rtc::CriticalSection params_lock_;
|
||||
EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
|
||||
bool is_screenshare_;
|
||||
size_t streams_or_svc_num_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
168
webrtc/modules/video_coding/generic_encoder_unittest.cc
Normal file
168
webrtc/modules/video_coding/generic_encoder_unittest.cc
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/modules/video_coding/encoded_frame.h"
|
||||
#include "webrtc/modules/video_coding/generic_encoder.h"
|
||||
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
|
||||
#include "webrtc/test/gtest.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
namespace {
|
||||
inline size_t FrameSize(const size_t& min_frame_size,
|
||||
const size_t& max_frame_size,
|
||||
const int& s,
|
||||
const int& i) {
|
||||
return min_frame_size + (s + 1) * i % (max_frame_size - min_frame_size);
|
||||
}
|
||||
|
||||
class FakeEncodedImageCallback : public EncodedImageCallback {
|
||||
public:
|
||||
FakeEncodedImageCallback() : last_frame_was_timing_(false) {}
|
||||
Result OnEncodedImage(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
last_frame_was_timing_ = encoded_image.timing_.is_timing_frame;
|
||||
return Result::OK;
|
||||
};
|
||||
|
||||
bool WasTimingFrame() { return last_frame_was_timing_; }
|
||||
|
||||
private:
|
||||
bool last_frame_was_timing_;
|
||||
};
|
||||
|
||||
enum class FrameType {
|
||||
kNormal,
|
||||
kTiming,
|
||||
kDropped,
|
||||
};
|
||||
|
||||
// Emulates |num_frames| on |num_streams| frames with capture timestamps
|
||||
// increased by 1 from 0. Size of each frame is between
|
||||
// |min_frame_size| and |max_frame_size|, outliers are counted relatevely to
|
||||
// |average_frame_sizes[]| for each stream.
|
||||
std::vector<std::vector<FrameType>> GetTimingFrames(
|
||||
const int64_t delay_ms,
|
||||
const size_t min_frame_size,
|
||||
const size_t max_frame_size,
|
||||
std::vector<size_t> average_frame_sizes,
|
||||
const int num_streams,
|
||||
const int num_frames) {
|
||||
FakeEncodedImageCallback sink;
|
||||
VCMEncodedFrameCallback callback(&sink, nullptr);
|
||||
const size_t kFramerate = 30;
|
||||
callback.SetTimingFramesThresholds(
|
||||
{delay_ms, kDefaultOutlierFrameSizePercent});
|
||||
callback.OnFrameRateChanged(kFramerate);
|
||||
int s, i;
|
||||
std::vector<std::vector<FrameType>> result(num_streams);
|
||||
for (s = 0; s < num_streams; ++s)
|
||||
callback.OnTargetBitrateChanged(average_frame_sizes[s] * kFramerate, s);
|
||||
int64_t current_timestamp = 0;
|
||||
for (i = 0; i < num_frames; ++i) {
|
||||
current_timestamp += 1;
|
||||
for (s = 0; s < num_streams; ++s) {
|
||||
// every (5+s)-th frame is dropped on s-th stream by design.
|
||||
bool dropped = i % (5 + s) == 0;
|
||||
|
||||
EncodedImage image;
|
||||
CodecSpecificInfo codec_specific;
|
||||
image._length = FrameSize(min_frame_size, max_frame_size, s, i);
|
||||
image.capture_time_ms_ = current_timestamp;
|
||||
codec_specific.codecType = kVideoCodecGeneric;
|
||||
codec_specific.codecSpecific.generic.simulcast_idx = s;
|
||||
callback.OnEncodeStarted(current_timestamp, s);
|
||||
if (dropped) {
|
||||
result[s].push_back(FrameType::kDropped);
|
||||
continue;
|
||||
}
|
||||
callback.OnEncodedImage(image, &codec_specific, nullptr);
|
||||
if (sink.WasTimingFrame()) {
|
||||
result[s].push_back(FrameType::kTiming);
|
||||
} else {
|
||||
result[s].push_back(FrameType::kNormal);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(TestVCMEncodedFrameCallback, MarksTimingFramesPeriodicallyTogether) {
|
||||
const int64_t kDelayMs = 29;
|
||||
const size_t kMinFrameSize = 10;
|
||||
const size_t kMaxFrameSize = 20;
|
||||
const int kNumFrames = 1000;
|
||||
const int kNumStreams = 3;
|
||||
// No outliers as 1000 is larger than anything from range [10,20].
|
||||
const std::vector<size_t> kAverageSize = {1000, 1000, 1000};
|
||||
auto frames = GetTimingFrames(kDelayMs, kMinFrameSize, kMaxFrameSize,
|
||||
kAverageSize, kNumStreams, kNumFrames);
|
||||
// Timing frames should be tirggered every delayMs.
|
||||
// As no outliers are expected, frames on all streams have to be
|
||||
// marked together.
|
||||
int last_timing_frame = -1;
|
||||
for (int i = 0; i < kNumFrames; ++i) {
|
||||
int num_normal = 0;
|
||||
int num_timing = 0;
|
||||
int num_dropped = 0;
|
||||
for (int s = 0; s < kNumStreams; ++s) {
|
||||
if (frames[s][i] == FrameType::kTiming) {
|
||||
++num_timing;
|
||||
} else if (frames[s][i] == FrameType::kNormal) {
|
||||
++num_normal;
|
||||
} else {
|
||||
++num_dropped;
|
||||
}
|
||||
}
|
||||
// Can't have both normal and timing frames at the same timstamp.
|
||||
EXPECT_TRUE(num_timing == 0 || num_normal == 0);
|
||||
if (num_dropped < kNumStreams) {
|
||||
if (last_timing_frame == -1 || i >= last_timing_frame + kDelayMs) {
|
||||
// If didn't have timing frames for a period, current sent frame has to
|
||||
// be one. No normal frames should be sent.
|
||||
EXPECT_EQ(num_normal, 0);
|
||||
} else {
|
||||
// No unneeded timing frames should be sent.
|
||||
EXPECT_EQ(num_timing, 0);
|
||||
}
|
||||
}
|
||||
if (num_timing > 0)
|
||||
last_timing_frame = i;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TestVCMEncodedFrameCallback, MarksOutliers) {
|
||||
const int64_t kDelayMs = 29;
|
||||
const size_t kMinFrameSize = 2495;
|
||||
const size_t kMaxFrameSize = 2505;
|
||||
const int kNumFrames = 1000;
|
||||
const int kNumStreams = 3;
|
||||
// Possible outliers as 1000 lies in range [995, 1005].
|
||||
const std::vector<size_t> kAverageSize = {998, 1000, 1004};
|
||||
auto frames = GetTimingFrames(kDelayMs, kMinFrameSize, kMaxFrameSize,
|
||||
kAverageSize, kNumStreams, kNumFrames);
|
||||
// All outliers should be marked.
|
||||
for (int i = 0; i < kNumFrames; ++i) {
|
||||
for (int s = 0; s < kNumStreams; ++s) {
|
||||
if (FrameSize(kMinFrameSize, kMaxFrameSize, s, i) >=
|
||||
kAverageSize[s] * kDefaultOutlierFrameSizePercent / 100) {
|
||||
// Too big frame. May be dropped or timing, but not normal.
|
||||
EXPECT_NE(frames[s][i], FrameType::kNormal);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
@ -39,7 +39,14 @@ namespace webrtc {
|
||||
#define VCM_NO_FRAME_DECODED -11
|
||||
#define VCM_NOT_IMPLEMENTED -20
|
||||
|
||||
enum { kDefaultStartBitrateKbps = 300 };
|
||||
enum {
|
||||
kDefaultStartBitrateKbps = 300,
|
||||
// Timing frames settings. Timing frames are sent every
|
||||
// |kDefaultTimingFramesDelayMs|, or if the frame is at least
|
||||
// |kDefaultOutliserFrameSizePercent| in size of average frame.
|
||||
kDefaultTimingFramesDelayMs = 200,
|
||||
kDefaultOutlierFrameSizePercent = 250,
|
||||
};
|
||||
|
||||
enum VCMVideoProtection {
|
||||
kProtectionNone,
|
||||
|
||||
@ -32,7 +32,8 @@ VCMPacket::VCMPacket()
|
||||
insertStartCode(false),
|
||||
width(0),
|
||||
height(0),
|
||||
video_header() {
|
||||
video_header(),
|
||||
receive_time_ms(0) {
|
||||
video_header.playout_delay = {-1, -1};
|
||||
}
|
||||
|
||||
|
||||
@ -47,6 +47,8 @@ class VCMPacket {
|
||||
int height;
|
||||
RTPVideoHeader video_header;
|
||||
|
||||
int64_t receive_time_ms;
|
||||
|
||||
protected:
|
||||
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
|
||||
};
|
||||
|
||||
@ -12,11 +12,12 @@
|
||||
|
||||
#include "webrtc/base/basictypes.h"
|
||||
#include "webrtc/base/logging.h"
|
||||
#include "webrtc/common_video/include/video_bitrate_allocator.h"
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/common_video/include/video_bitrate_allocator.h"
|
||||
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
|
||||
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
|
||||
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
|
||||
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
|
||||
#include "webrtc/modules/video_coding/utility/default_video_bitrate_allocator.h"
|
||||
#include "webrtc/system_wrappers/include/clock.h"
|
||||
|
||||
@ -165,6 +166,8 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
|
||||
video_codec.minBitrate = streams[0].min_bitrate_bps / 1000;
|
||||
if (video_codec.minBitrate < kEncoderMinBitrateKbps)
|
||||
video_codec.minBitrate = kEncoderMinBitrateKbps;
|
||||
video_codec.timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
|
||||
kDefaultOutlierFrameSizePercent};
|
||||
RTC_DCHECK_LE(streams.size(), kMaxSimulcastStreams);
|
||||
if (video_codec.codecType == kVideoCodecVP9) {
|
||||
// If the vector is empty, bitrates will be configured automatically.
|
||||
|
||||
@ -1057,6 +1057,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
|
||||
(codec_mode_ == webrtc::VideoCodecMode::kScreensharing)
|
||||
? webrtc::VideoContentType::SCREENSHARE
|
||||
: webrtc::VideoContentType::UNSPECIFIED;
|
||||
image->timing_.is_timing_frame = false;
|
||||
image->_frameType =
|
||||
(key_frame ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta);
|
||||
image->_completeFrame = true;
|
||||
|
||||
@ -720,9 +720,9 @@ void H264VideoToolboxEncoder::OnEncodedFrame(
|
||||
frame.capture_time_ms_ = render_time_ms;
|
||||
frame._timeStamp = timestamp;
|
||||
frame.rotation_ = rotation;
|
||||
|
||||
frame.content_type_ =
|
||||
(mode_ == kScreensharing) ? VideoContentType::SCREENSHARE : VideoContentType::UNSPECIFIED;
|
||||
frame.timing_.is_timing_frame = false;
|
||||
|
||||
h264_bitstream_parser_.ParseBitstream(buffer->data(), buffer->size());
|
||||
h264_bitstream_parser_.GetLastSliceQp(&frame.qp_);
|
||||
|
||||
@ -18,6 +18,7 @@ const int kAbsSendTimeExtensionId = 7;
|
||||
const int kTransportSequenceNumberExtensionId = 8;
|
||||
const int kVideoRotationExtensionId = 9;
|
||||
const int kVideoContentTypeExtensionId = 10;
|
||||
const int kVideoTimingExtensionId = 11;
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
|
||||
@ -16,5 +16,6 @@ extern const int kAbsSendTimeExtensionId;
|
||||
extern const int kTransportSequenceNumberExtensionId;
|
||||
extern const int kVideoRotationExtensionId;
|
||||
extern const int kVideoContentTypeExtensionId;
|
||||
extern const int kVideoTimingExtensionId;
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
|
||||
@ -148,6 +148,7 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
specifics.codec_name = ImplementationName();
|
||||
specifics.codecSpecific.generic.simulcast_idx = i;
|
||||
RTC_DCHECK(callback);
|
||||
if (callback->OnEncodedImage(encoded, &specifics, nullptr).error !=
|
||||
EncodedImageCallback::Result::OK) {
|
||||
|
||||
@ -159,7 +159,7 @@ void FrameGeneratorCapturer::InsertFrame() {
|
||||
rtc::CritScope cs(&lock_);
|
||||
if (sending_) {
|
||||
VideoFrame* frame = frame_generator_->NextFrame();
|
||||
frame->set_timestamp_us(rtc::TimeMicros());
|
||||
frame->set_timestamp_us(clock_->TimeInMicroseconds());
|
||||
frame->set_ntp_time_ms(clock_->CurrentNtpInMilliseconds());
|
||||
frame->set_rotation(fake_rotation_);
|
||||
if (first_frame_capture_time_ == -1) {
|
||||
|
||||
@ -90,6 +90,10 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
|
||||
VideoContentType content_type;
|
||||
packet.GetExtension<VideoContentTypeExtension>(&content_type);
|
||||
break;
|
||||
case kRtpExtensionVideoTiming:
|
||||
VideoTiming timing;
|
||||
packet.GetExtension<VideoTimingExtension>(&timing);
|
||||
break;
|
||||
case kRtpExtensionRtpStreamId: {
|
||||
std::string rsid;
|
||||
packet.GetExtension<RtpStreamId>(&rsid);
|
||||
|
||||
@ -130,6 +130,21 @@ EncodedImageCallback::Result PayloadRouter::OnEncodedImage(
|
||||
CopyCodecSpecific(codec_specific_info, &rtp_video_header);
|
||||
rtp_video_header.rotation = encoded_image.rotation_;
|
||||
rtp_video_header.content_type = encoded_image.content_type_;
|
||||
if (encoded_image.timing_.is_timing_frame) {
|
||||
rtp_video_header.video_timing.encode_start_delta_ms =
|
||||
VideoTiming::GetDeltaCappedMs(encoded_image.capture_time_ms_,
|
||||
encoded_image.timing_.encode_start_ms);
|
||||
rtp_video_header.video_timing.encode_finish_delta_ms =
|
||||
VideoTiming::GetDeltaCappedMs(encoded_image.capture_time_ms_,
|
||||
encoded_image.timing_.encode_finish_ms);
|
||||
rtp_video_header.video_timing.packetization_finish_delta_ms = 0;
|
||||
rtp_video_header.video_timing.pacer_exit_delta_ms = 0;
|
||||
rtp_video_header.video_timing.network_timstamp_delta_ms = 0;
|
||||
rtp_video_header.video_timing.network2_timstamp_delta_ms = 0;
|
||||
rtp_video_header.video_timing.is_timing_frame = true;
|
||||
} else {
|
||||
rtp_video_header.video_timing.is_timing_frame = false;
|
||||
}
|
||||
rtp_video_header.playout_delay = encoded_image.playout_delay_;
|
||||
|
||||
int stream_index = rtp_video_header.simulcastIdx;
|
||||
|
||||
@ -10,8 +10,8 @@
|
||||
|
||||
#include "webrtc/video/rtp_video_stream_receiver.h"
|
||||
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/location.h"
|
||||
@ -239,6 +239,7 @@ int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
|
||||
VCMPacket packet(payload_data, payload_size, rtp_header_with_ntp);
|
||||
packet.timesNacked =
|
||||
nack_module_ ? nack_module_->OnReceivedPacket(packet) : -1;
|
||||
packet.receive_time_ms = clock_->TimeInMilliseconds();
|
||||
|
||||
// In the case of a video stream without picture ids and no rtx the
|
||||
// RtpFrameReferenceFinder will need to know about padding to
|
||||
@ -520,6 +521,11 @@ void RtpVideoStreamReceiver::NotifyReceiverOfFecPacket(
|
||||
if (header.extension.hasVideoContentType) {
|
||||
rtp_header.type.Video.content_type = header.extension.videoContentType;
|
||||
}
|
||||
rtp_header.type.Video.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
|
||||
if (header.extension.has_video_timing) {
|
||||
rtp_header.type.Video.video_timing = header.extension.video_timing;
|
||||
rtp_header.type.Video.video_timing.is_timing_frame = true;
|
||||
}
|
||||
rtp_header.type.Video.playout_delay = header.extension.playout_delay;
|
||||
|
||||
OnReceivedPayloadData(nullptr, 0, &rtp_header);
|
||||
|
||||
@ -1327,6 +1327,8 @@ void VideoQualityTest::SetupVideo(Transport* send_transport,
|
||||
}
|
||||
video_send_config_.rtp.extensions.push_back(RtpExtension(
|
||||
RtpExtension::kVideoContentTypeUri, test::kVideoContentTypeExtensionId));
|
||||
video_send_config_.rtp.extensions.push_back(RtpExtension(
|
||||
RtpExtension::kVideoTimingUri, test::kVideoTimingExtensionId));
|
||||
|
||||
video_encoder_config_.min_transmit_bitrate_bps =
|
||||
params_.video.min_transmit_bps;
|
||||
|
||||
@ -337,6 +337,40 @@ TEST_F(VideoSendStreamTest, SupportsVideoContentType) {
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
TEST_F(VideoSendStreamTest, SupportsVideoTimingFrames) {
|
||||
class VideoRotationObserver : public test::SendTest {
|
||||
public:
|
||||
VideoRotationObserver() : SendTest(kDefaultTimeoutMs) {
|
||||
EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
|
||||
kRtpExtensionVideoTiming, test::kVideoTimingExtensionId));
|
||||
}
|
||||
|
||||
Action OnSendRtp(const uint8_t* packet, size_t length) override {
|
||||
RTPHeader header;
|
||||
EXPECT_TRUE(parser_->Parse(packet, length, &header));
|
||||
if (header.extension.has_video_timing) {
|
||||
observation_complete_.Set();
|
||||
}
|
||||
return SEND_PACKET;
|
||||
}
|
||||
|
||||
void ModifyVideoConfigs(
|
||||
VideoSendStream::Config* send_config,
|
||||
std::vector<VideoReceiveStream::Config>* receive_configs,
|
||||
VideoEncoderConfig* encoder_config) override {
|
||||
send_config->rtp.extensions.clear();
|
||||
send_config->rtp.extensions.push_back(RtpExtension(
|
||||
RtpExtension::kVideoTimingUri, test::kVideoTimingExtensionId));
|
||||
}
|
||||
|
||||
void PerformTest() override {
|
||||
EXPECT_TRUE(Wait()) << "Timed out while waiting for timing frames.";
|
||||
}
|
||||
} test;
|
||||
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
class FakeReceiveStatistics : public NullReceiveStatistics {
|
||||
public:
|
||||
FakeReceiveStatistics(uint32_t send_ssrc,
|
||||
|
||||
@ -675,7 +675,14 @@ void ViEEncoder::OnFrame(const VideoFrame& video_frame) {
|
||||
VideoFrame incoming_frame = video_frame;
|
||||
|
||||
// Local time in webrtc time base.
|
||||
int64_t current_time_ms = clock_->TimeInMilliseconds();
|
||||
int64_t current_time_us = clock_->TimeInMicroseconds();
|
||||
int64_t current_time_ms = current_time_us / rtc::kNumMicrosecsPerMillisec;
|
||||
// In some cases, e.g., when the frame from decoder is fed to encoder,
|
||||
// the timestamp may be set to the future. As the encoding pipeline assumes
|
||||
// capture time to be less than present time, we should reset the capture
|
||||
// timestamps here. Otherwise there may be issues with RTP send stream.
|
||||
if (incoming_frame.timestamp_us() > current_time_us)
|
||||
incoming_frame.set_timestamp_us(current_time_us);
|
||||
|
||||
// Capture time may come from clock with an offset and drift from clock_.
|
||||
int64_t capture_ntp_time_ms;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user