Add flag enabling more packets to be retransmittable.

If not indicated otherwise, allow adding a packet to the retransmission
history at least every 1/15s in order to reduce frame dropping.

BUG=webrtc:7694

Review-Url: https://codereview.webrtc.org/2999063002
Cr-Commit-Position: refs/heads/master@{#19665}
This commit is contained in:
sprang 2017-09-04 07:23:56 -07:00 committed by Commit Bot
parent 791a8f611d
commit a8ae6f2aca
17 changed files with 460 additions and 172 deletions

View File

@ -113,11 +113,16 @@ enum KeyFrameRequestMethod { kKeyFrameReqPliRtcp, kKeyFrameReqFirRtcp };
enum RtpRtcpPacketType { kPacketRtp = 0, kPacketKeepAlive = 1 }; enum RtpRtcpPacketType { kPacketRtp = 0, kPacketKeepAlive = 1 };
// kConditionallyRetransmitHigherLayers allows retransmission of video frames
// in higher layers if either the last frame in that layer was too far back in
// time, or if we estimate that a new frame will be available in a lower layer
// in a shorter time than it would take to request and receive a retransmission.
enum RetransmissionMode : uint8_t { enum RetransmissionMode : uint8_t {
kRetransmitOff = 0x0, kRetransmitOff = 0x0,
kRetransmitFECPackets = 0x1, kRetransmitFECPackets = 0x1,
kRetransmitBaseLayer = 0x2, kRetransmitBaseLayer = 0x2,
kRetransmitHigherLayers = 0x4, kRetransmitHigherLayers = 0x4,
kConditionallyRetransmitHigherLayers = 0x8,
kRetransmitAllPackets = 0xFF kRetransmitAllPackets = 0xFF
}; };

View File

@ -41,10 +41,6 @@ class RtpPacketizer {
// Returns true on success, false otherwise. // Returns true on success, false otherwise.
virtual bool NextPacket(RtpPacketToSend* packet) = 0; virtual bool NextPacket(RtpPacketToSend* packet) = 0;
virtual ProtectionType GetProtectionType() = 0;
virtual StorageType GetStorageType(uint32_t retransmission_settings) = 0;
virtual std::string ToString() = 0; virtual std::string ToString() = 0;
}; };

View File

@ -389,15 +389,6 @@ void RtpPacketizerH264::NextFragmentPacket(RtpPacketToSend* rtp_packet) {
packets_.pop(); packets_.pop();
} }
ProtectionType RtpPacketizerH264::GetProtectionType() {
return kProtectedPacket;
}
StorageType RtpPacketizerH264::GetStorageType(
uint32_t retransmission_settings) {
return kAllowRetransmission;
}
std::string RtpPacketizerH264::ToString() { std::string RtpPacketizerH264::ToString() {
return "RtpPacketizerH264"; return "RtpPacketizerH264";
} }

View File

@ -41,10 +41,6 @@ class RtpPacketizerH264 : public RtpPacketizer {
// Returns true on success, false otherwise. // Returns true on success, false otherwise.
bool NextPacket(RtpPacketToSend* rtp_packet) override; bool NextPacket(RtpPacketToSend* rtp_packet) override;
ProtectionType GetProtectionType() override;
StorageType GetStorageType(uint32_t retransmission_settings) override;
std::string ToString() override; std::string ToString() override;
private: private:

View File

@ -108,15 +108,6 @@ bool RtpPacketizerGeneric::NextPacket(RtpPacketToSend* packet) {
return true; return true;
} }
ProtectionType RtpPacketizerGeneric::GetProtectionType() {
return kProtectedPacket;
}
StorageType RtpPacketizerGeneric::GetStorageType(
uint32_t retransmission_settings) {
return kAllowRetransmission;
}
std::string RtpPacketizerGeneric::ToString() { std::string RtpPacketizerGeneric::ToString() {
return "RtpPacketizerGeneric"; return "RtpPacketizerGeneric";
} }

View File

@ -43,10 +43,6 @@ class RtpPacketizerGeneric : public RtpPacketizer {
// Returns true on success, false otherwise. // Returns true on success, false otherwise.
bool NextPacket(RtpPacketToSend* packet) override; bool NextPacket(RtpPacketToSend* packet) override;
ProtectionType GetProtectionType() override;
StorageType GetStorageType(uint32_t retransmission_settings) override;
std::string ToString() override; std::string ToString() override;
private: private:

View File

@ -211,25 +211,6 @@ bool RtpPacketizerVp8::NextPacket(RtpPacketToSend* packet) {
return true; return true;
} }
ProtectionType RtpPacketizerVp8::GetProtectionType() {
bool protect =
hdr_info_.temporalIdx == 0 || hdr_info_.temporalIdx == kNoTemporalIdx;
return protect ? kProtectedPacket : kUnprotectedPacket;
}
StorageType RtpPacketizerVp8::GetStorageType(uint32_t retransmission_settings) {
if (hdr_info_.temporalIdx == 0 &&
!(retransmission_settings & kRetransmitBaseLayer)) {
return kDontRetransmit;
}
if (hdr_info_.temporalIdx != kNoTemporalIdx &&
hdr_info_.temporalIdx > 0 &&
!(retransmission_settings & kRetransmitHigherLayers)) {
return kDontRetransmit;
}
return kAllowRetransmission;
}
std::string RtpPacketizerVp8::ToString() { std::string RtpPacketizerVp8::ToString() {
return "RtpPacketizerVp8"; return "RtpPacketizerVp8";
} }

View File

@ -72,10 +72,6 @@ class RtpPacketizerVp8 : public RtpPacketizer {
// Returns true on success, false otherwise. // Returns true on success, false otherwise.
bool NextPacket(RtpPacketToSend* packet) override; bool NextPacket(RtpPacketToSend* packet) override;
ProtectionType GetProtectionType() override;
StorageType GetStorageType(uint32_t retransmission_settings) override;
std::string ToString() override; std::string ToString() override;
private: private:

View File

@ -472,24 +472,6 @@ RtpPacketizerVp9::RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr,
RtpPacketizerVp9::~RtpPacketizerVp9() { RtpPacketizerVp9::~RtpPacketizerVp9() {
} }
ProtectionType RtpPacketizerVp9::GetProtectionType() {
bool protect =
hdr_.temporal_idx == 0 || hdr_.temporal_idx == kNoTemporalIdx;
return protect ? kProtectedPacket : kUnprotectedPacket;
}
StorageType RtpPacketizerVp9::GetStorageType(uint32_t retransmission_settings) {
StorageType storage = kAllowRetransmission;
if (hdr_.temporal_idx == 0 &&
!(retransmission_settings & kRetransmitBaseLayer)) {
storage = kDontRetransmit;
} else if (hdr_.temporal_idx != kNoTemporalIdx && hdr_.temporal_idx > 0 &&
!(retransmission_settings & kRetransmitHigherLayers)) {
storage = kDontRetransmit;
}
return storage;
}
std::string RtpPacketizerVp9::ToString() { std::string RtpPacketizerVp9::ToString() {
return "RtpPacketizerVp9"; return "RtpPacketizerVp9";
} }

View File

@ -39,10 +39,6 @@ class RtpPacketizerVp9 : public RtpPacketizer {
virtual ~RtpPacketizerVp9(); virtual ~RtpPacketizerVp9();
ProtectionType GetProtectionType() override;
StorageType GetStorageType(uint32_t retransmission_settings) override;
std::string ToString() override; std::string ToString() override;
// The payload data must be one encoded VP9 layer frame. // The payload data must be one encoded VP9 layer frame.

View File

@ -580,37 +580,6 @@ TEST_F(RtpPacketizerVp9Test, TestRespectsLastPacketReductionLen) {
EXPECT_TRUE(packet.Marker()); EXPECT_TRUE(packet.Marker());
} }
TEST_F(RtpPacketizerVp9Test, TestBaseLayerProtectionAndStorageType) {
const size_t kFrameSize = 10;
const size_t kPacketSize = 12;
// I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload)
// L: T:0, U:0, S:0, D:0
expected_.flexible_mode = true;
expected_.temporal_idx = 0;
Init(kFrameSize, kPacketSize);
EXPECT_EQ(kProtectedPacket, packetizer_->GetProtectionType());
EXPECT_EQ(kAllowRetransmission,
packetizer_->GetStorageType(kRetransmitBaseLayer));
EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitOff));
}
TEST_F(RtpPacketizerVp9Test, TestHigherLayerProtectionAndStorageType) {
const size_t kFrameSize = 10;
const size_t kPacketSize = 12;
// I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload)
// L: T:1, U:0, S:0, D:0
expected_.flexible_mode = true;
expected_.temporal_idx = 1;
Init(kFrameSize, kPacketSize);
EXPECT_EQ(kUnprotectedPacket, packetizer_->GetProtectionType());
EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitBaseLayer));
EXPECT_EQ(kAllowRetransmission,
packetizer_->GetStorageType(kRetransmitHigherLayers));
}
class RtpDepacketizerVp9Test : public ::testing::Test { class RtpDepacketizerVp9Test : public ::testing::Test {
protected: protected:
RtpDepacketizerVp9Test() RtpDepacketizerVp9Test()

View File

@ -31,6 +31,7 @@ namespace {
const int64_t kRtpRtcpMaxIdleTimeProcessMs = 5; const int64_t kRtpRtcpMaxIdleTimeProcessMs = 5;
const int64_t kRtpRtcpRttProcessTimeMs = 1000; const int64_t kRtpRtcpRttProcessTimeMs = 1000;
const int64_t kRtpRtcpBitrateProcessTimeMs = 10; const int64_t kRtpRtcpBitrateProcessTimeMs = 10;
const int64_t kDefaultExpectedRetransmissionTimeMs = 125;
} // namespace } // namespace
RTPExtensionType StringToRtpExtensionType(const std::string& extension) { RTPExtensionType StringToRtpExtensionType(const std::string& extension) {
@ -430,9 +431,20 @@ bool ModuleRtpRtcpImpl::SendOutgoingData(
if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) { if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) {
rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
} }
int64_t expected_retransmission_time_ms = rtt_ms();
if (expected_retransmission_time_ms == 0) {
// No rtt available (|kRtpRtcpRttProcessTimeMs| not yet passed?), so try to
// poll avg_rtt_ms directly from rtcp receiver.
if (rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), nullptr,
&expected_retransmission_time_ms, nullptr,
nullptr) == -1) {
expected_retransmission_time_ms = kDefaultExpectedRetransmissionTimeMs;
}
}
return rtp_sender_->SendOutgoingData( return rtp_sender_->SendOutgoingData(
frame_type, payload_type, time_stamp, capture_time_ms, payload_data, frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
payload_size, fragmentation, rtp_video_header, transport_frame_id_out); payload_size, fragmentation, rtp_video_header, transport_frame_id_out,
expected_retransmission_time_ms);
} }
bool ModuleRtpRtcpImpl::TimeToSendPacket(uint32_t ssrc, bool ModuleRtpRtcpImpl::TimeToSendPacket(uint32_t ssrc,

View File

@ -232,7 +232,7 @@ int32_t RTPSender::RegisterPayload(
if (payload_type_map_.end() != it) { if (payload_type_map_.end() != it) {
// We already use this payload type. // We already use this payload type.
RtpUtility::Payload* payload = it->second; RtpUtility::Payload* payload = it->second;
assert(payload); RTC_DCHECK(payload);
// Check if it's the same as we already have. // Check if it's the same as we already have.
if (RtpUtility::StringCompare( if (RtpUtility::StringCompare(
@ -355,7 +355,7 @@ int32_t RTPSender::CheckPayloadType(int8_t payload_type,
} }
SetSendPayloadType(payload_type); SetSendPayloadType(payload_type);
RtpUtility::Payload* payload = it->second; RtpUtility::Payload* payload = it->second;
assert(payload); RTC_DCHECK(payload);
if (!payload->audio && !audio_configured_) { if (!payload->audio && !audio_configured_) {
video_->SetVideoCodecType(payload->typeSpecific.Video.videoCodecType); video_->SetVideoCodecType(payload->typeSpecific.Video.videoCodecType);
*video_type = payload->typeSpecific.Video.videoCodecType; *video_type = payload->typeSpecific.Video.videoCodecType;
@ -371,7 +371,8 @@ bool RTPSender::SendOutgoingData(FrameType frame_type,
size_t payload_size, size_t payload_size,
const RTPFragmentationHeader* fragmentation, const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_header, const RTPVideoHeader* rtp_header,
uint32_t* transport_frame_id_out) { uint32_t* transport_frame_id_out,
int64_t expected_retransmission_time_ms) {
uint32_t ssrc; uint32_t ssrc;
uint16_t sequence_number; uint16_t sequence_number;
uint32_t rtp_timestamp; uint32_t rtp_timestamp;
@ -395,20 +396,29 @@ bool RTPSender::SendOutgoingData(FrameType frame_type,
return false; return false;
} }
switch (frame_type) {
case kAudioFrameSpeech:
case kAudioFrameCN:
RTC_CHECK(audio_configured_);
break;
case kVideoFrameKey:
case kVideoFrameDelta:
RTC_CHECK(!audio_configured_);
break;
case kEmptyFrame:
break;
}
bool result; bool result;
if (audio_configured_) { if (audio_configured_) {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", rtp_timestamp, "Send", "type", TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", rtp_timestamp, "Send", "type",
FrameTypeToString(frame_type)); FrameTypeToString(frame_type));
assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
frame_type == kEmptyFrame);
result = audio_->SendAudio(frame_type, payload_type, rtp_timestamp, result = audio_->SendAudio(frame_type, payload_type, rtp_timestamp,
payload_data, payload_size, fragmentation); payload_data, payload_size, fragmentation);
} else { } else {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms, TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms,
"Send", "type", FrameTypeToString(frame_type)); "Send", "type", FrameTypeToString(frame_type));
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
if (frame_type == kEmptyFrame) if (frame_type == kEmptyFrame)
return true; return true;
@ -419,7 +429,8 @@ bool RTPSender::SendOutgoingData(FrameType frame_type,
result = video_->SendVideo(video_type, frame_type, payload_type, result = video_->SendVideo(video_type, frame_type, payload_type,
rtp_timestamp, capture_time_ms, payload_data, rtp_timestamp, capture_time_ms, payload_data,
payload_size, fragmentation, rtp_header); payload_size, fragmentation, rtp_header,
expected_retransmission_time_ms);
} }
rtc::CritScope cs(&statistics_crit_); rtc::CritScope cs(&statistics_crit_);
@ -1105,7 +1116,7 @@ rtc::Optional<uint32_t> RTPSender::FlexfecSsrc() const {
} }
void RTPSender::SetCsrcs(const std::vector<uint32_t>& csrcs) { void RTPSender::SetCsrcs(const std::vector<uint32_t>& csrcs) {
assert(csrcs.size() <= kRtpCsrcSize); RTC_DCHECK_LE(csrcs.size(), kRtpCsrcSize);
rtc::CritScope lock(&send_critsect_); rtc::CritScope lock(&send_critsect_);
csrcs_ = csrcs; csrcs_ = csrcs;
} }
@ -1136,7 +1147,7 @@ int32_t RTPSender::SetAudioLevel(uint8_t level_d_bov) {
} }
RtpVideoCodecTypes RTPSender::VideoCodecType() const { RtpVideoCodecTypes RTPSender::VideoCodecType() const {
assert(!audio_configured_ && "Sender is an audio stream!"); RTC_DCHECK(!audio_configured_) << "Sender is an audio stream!";
return video_->VideoCodecType(); return video_->VideoCodecType();
} }

View File

@ -108,7 +108,8 @@ class RTPSender {
size_t payload_size, size_t payload_size,
const RTPFragmentationHeader* fragmentation, const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_header, const RTPVideoHeader* rtp_header,
uint32_t* transport_frame_id_out); uint32_t* transport_frame_id_out,
int64_t expected_retransmission_time_ms);
// RTP header extension // RTP header extension
int32_t RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id); int32_t RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);

View File

@ -25,7 +25,9 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_sender.h" #include "webrtc/modules/rtp_rtcp/source/rtp_sender.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_video.h" #include "webrtc/modules/rtp_rtcp/source/rtp_sender_video.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h" #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/rtc_base/arraysize.h"
#include "webrtc/rtc_base/buffer.h" #include "webrtc/rtc_base/buffer.h"
#include "webrtc/rtc_base/ptr_util.h"
#include "webrtc/rtc_base/rate_limiter.h" #include "webrtc/rtc_base/rate_limiter.h"
#include "webrtc/test/field_trial.h" #include "webrtc/test/field_trial.h"
#include "webrtc/test/gmock.h" #include "webrtc/test/gmock.h"
@ -55,6 +57,7 @@ const size_t kMaxPaddingSize = 224u;
const int kVideoRotationExtensionId = 5; const int kVideoRotationExtensionId = 5;
const size_t kGenericHeaderLength = 1; const size_t kGenericHeaderLength = 1;
const uint8_t kPayloadData[] = {47, 11, 32, 93, 89}; const uint8_t kPayloadData[] = {47, 11, 32, 93, 89};
const int64_t kDefaultExpectedRetransmissionTimeMs = 125;
using ::testing::_; using ::testing::_;
using ::testing::ElementsAreArray; using ::testing::ElementsAreArray;
@ -238,7 +241,8 @@ class RtpSenderTest : public ::testing::TestWithParam<bool> {
EXPECT_TRUE(rtp_sender_->SendOutgoingData( EXPECT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData, kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData,
sizeof(kPayloadData), nullptr, nullptr, nullptr)); sizeof(kPayloadData), nullptr, nullptr, nullptr,
kDefaultExpectedRetransmissionTimeMs));
} }
}; };
@ -249,15 +253,32 @@ class RtpSenderTestWithoutPacer : public RtpSenderTest {
void SetUp() override { SetUpRtpSender(false); } void SetUp() override { SetUpRtpSender(false); }
}; };
class TestRtpSenderVideo : public RTPSenderVideo {
public:
TestRtpSenderVideo(Clock* clock,
RTPSender* rtp_sender,
FlexfecSender* flexfec_sender)
: RTPSenderVideo(clock, rtp_sender, flexfec_sender) {}
~TestRtpSenderVideo() override {}
StorageType GetStorageType(const RTPVideoHeader& header,
int32_t retransmission_settings,
int64_t expected_retransmission_time_ms) {
return RTPSenderVideo::GetStorageType(GetTemporalId(header),
retransmission_settings,
expected_retransmission_time_ms);
}
};
class RtpSenderVideoTest : public RtpSenderTest { class RtpSenderVideoTest : public RtpSenderTest {
protected: protected:
void SetUp() override { void SetUp() override {
// TODO(pbos): Set up to use pacer. // TODO(pbos): Set up to use pacer.
SetUpRtpSender(false); SetUpRtpSender(false);
rtp_sender_video_.reset( rtp_sender_video_.reset(
new RTPSenderVideo(&fake_clock_, rtp_sender_.get(), nullptr)); new TestRtpSenderVideo(&fake_clock_, rtp_sender_.get(), nullptr));
} }
std::unique_ptr<RTPSenderVideo> rtp_sender_video_; std::unique_ptr<TestRtpSenderVideo> rtp_sender_video_;
}; };
TEST_P(RtpSenderTestWithoutPacer, AllocatePacketSetCsrc) { TEST_P(RtpSenderTestWithoutPacer, AllocatePacketSetCsrc) {
@ -861,9 +882,9 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) {
uint8_t payload[] = {47, 11, 32, 93, 89}; uint8_t payload[] = {47, 11, 32, 93, 89};
// Send keyframe // Send keyframe
ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234, ASSERT_TRUE(rtp_sender_->SendOutgoingData(
4321, payload, sizeof(payload), kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
auto sent_payload = transport_.last_sent_packet().payload(); auto sent_payload = transport_.last_sent_packet().payload();
uint8_t generic_header = sent_payload[0]; uint8_t generic_header = sent_payload[0];
@ -878,7 +899,7 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) {
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload), kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
sent_payload = transport_.last_sent_packet().payload(); sent_payload = transport_.last_sent_packet().payload();
generic_header = sent_payload[0]; generic_header = sent_payload[0];
@ -998,7 +1019,8 @@ TEST_P(RtpSenderTest, NoFlexfecForTimingFrames) {
video_header.video_timing.flags = TimingFrameFlags::kTriggeredByTimer; video_header.video_timing.flags = TimingFrameFlags::kTriggeredByTimer;
EXPECT_TRUE(rtp_sender_->SendOutgoingData( EXPECT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData, kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData,
sizeof(kPayloadData), nullptr, &video_header, nullptr)); sizeof(kPayloadData), nullptr, &video_header, nullptr,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_CALL(mock_rtc_event_log_, EXPECT_CALL(mock_rtc_event_log_,
LogRtpHeader(PacketDirection::kOutgoingPacket, _, _, _)) LogRtpHeader(PacketDirection::kOutgoingPacket, _, _, _))
@ -1023,7 +1045,8 @@ TEST_P(RtpSenderTest, NoFlexfecForTimingFrames) {
video_header.video_timing.flags = TimingFrameFlags::kInvalid; video_header.video_timing.flags = TimingFrameFlags::kInvalid;
EXPECT_TRUE(rtp_sender_->SendOutgoingData( EXPECT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, kPayloadType, kTimestamp + 1, kCaptureTimeMs + 1, kVideoFrameKey, kPayloadType, kTimestamp + 1, kCaptureTimeMs + 1,
kPayloadData, sizeof(kPayloadData), nullptr, &video_header, nullptr)); kPayloadData, sizeof(kPayloadData), nullptr, &video_header, nullptr,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_CALL(mock_rtc_event_log_, EXPECT_CALL(mock_rtc_event_log_,
LogRtpHeader(PacketDirection::kOutgoingPacket, _, _, _)) LogRtpHeader(PacketDirection::kOutgoingPacket, _, _, _))
@ -1168,9 +1191,9 @@ TEST_P(RtpSenderTest, FrameCountCallbacks) {
EXPECT_CALL(mock_paced_sender_, InsertPacket(_, _, _, _, _, _)) EXPECT_CALL(mock_paced_sender_, InsertPacket(_, _, _, _, _, _))
.Times(::testing::AtLeast(2)); .Times(::testing::AtLeast(2));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234, ASSERT_TRUE(rtp_sender_->SendOutgoingData(
4321, payload, sizeof(payload), kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(1U, callback.num_calls_); EXPECT_EQ(1U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_); EXPECT_EQ(ssrc, callback.ssrc_);
@ -1179,7 +1202,7 @@ TEST_P(RtpSenderTest, FrameCountCallbacks) {
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload), kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(2U, callback.num_calls_); EXPECT_EQ(2U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_); EXPECT_EQ(ssrc, callback.ssrc_);
@ -1245,7 +1268,7 @@ TEST_P(RtpSenderTest, BitrateCallbacks) {
for (uint32_t i = 0; i < kNumPackets; ++i) { for (uint32_t i = 0; i < kNumPackets; ++i) {
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload), kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
fake_clock_.AdvanceTimeMilliseconds(kPacketInterval); fake_clock_.AdvanceTimeMilliseconds(kPacketInterval);
} }
@ -1327,8 +1350,8 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
// Send a frame. // Send a frame.
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, payload_type, 1234, 4321, payload, kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
sizeof(payload), nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
StreamDataCounters expected; StreamDataCounters expected;
expected.transmitted.payload_bytes = 6; expected.transmitted.payload_bytes = 6;
expected.transmitted.header_bytes = 12; expected.transmitted.header_bytes = 12;
@ -1369,8 +1392,8 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
fec_params.max_fec_frames = 1; fec_params.max_fec_frames = 1;
rtp_sender_->SetFecParameters(fec_params, fec_params); rtp_sender_->SetFecParameters(fec_params, fec_params);
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload, kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
sizeof(payload), nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
expected.transmitted.payload_bytes = 40; expected.transmitted.payload_bytes = 40;
expected.transmitted.header_bytes = 60; expected.transmitted.header_bytes = 60;
expected.transmitted.packets = 5; expected.transmitted.packets = 5;
@ -1388,8 +1411,8 @@ TEST_P(RtpSenderAudioTest, SendAudio) {
uint8_t payload[] = {47, 11, 32, 93, 89}; uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kAudioFrameCN, payload_type, 1234, 4321, payload, kAudioFrameCN, payload_type, 1234, 4321, payload, sizeof(payload),
sizeof(payload), nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
auto sent_payload = transport_.last_sent_packet().payload(); auto sent_payload = transport_.last_sent_packet().payload();
EXPECT_THAT(sent_payload, ElementsAreArray(payload)); EXPECT_THAT(sent_payload, ElementsAreArray(payload));
@ -1407,8 +1430,8 @@ TEST_P(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
uint8_t payload[] = {47, 11, 32, 93, 89}; uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kAudioFrameCN, payload_type, 1234, 4321, payload, kAudioFrameCN, payload_type, 1234, 4321, payload, sizeof(payload),
sizeof(payload), nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
auto sent_payload = transport_.last_sent_packet().payload(); auto sent_payload = transport_.last_sent_packet().payload();
EXPECT_THAT(sent_payload, ElementsAreArray(payload)); EXPECT_THAT(sent_payload, ElementsAreArray(payload));
@ -1445,22 +1468,22 @@ TEST_P(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
// During start, it takes the starting timestamp as last sent timestamp. // During start, it takes the starting timestamp as last sent timestamp.
// The duration is calculated as the difference of current and last sent // The duration is calculated as the difference of current and last sent
// timestamp. So for first call it will skip since the duration is zero. // timestamp. So for first call it will skip since the duration is zero.
ASSERT_TRUE(rtp_sender_->SendOutgoingData(kEmptyFrame, kPayloadType, ASSERT_TRUE(rtp_sender_->SendOutgoingData(
capture_time_ms, 0, nullptr, 0, kEmptyFrame, kPayloadType, capture_time_ms, 0, nullptr, 0, nullptr,
nullptr, nullptr, nullptr)); nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
// DTMF Sample Length is (Frequency/1000) * Duration. // DTMF Sample Length is (Frequency/1000) * Duration.
// So in this case, it is (8000/1000) * 500 = 4000. // So in this case, it is (8000/1000) * 500 = 4000.
// Sending it as two packets. // Sending it as two packets.
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kEmptyFrame, kPayloadType, capture_time_ms + 2000, 0, kEmptyFrame, kPayloadType, capture_time_ms + 2000, 0, nullptr, 0, nullptr,
nullptr, 0, nullptr, nullptr, nullptr)); nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
// Marker Bit should be set to 1 for first packet. // Marker Bit should be set to 1 for first packet.
EXPECT_TRUE(transport_.last_sent_packet().Marker()); EXPECT_TRUE(transport_.last_sent_packet().Marker());
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kEmptyFrame, kPayloadType, capture_time_ms + 4000, 0, kEmptyFrame, kPayloadType, capture_time_ms + 4000, 0, nullptr, 0, nullptr,
nullptr, 0, nullptr, nullptr, nullptr)); nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
// Marker Bit should be set to 0 for rest of the packets. // Marker Bit should be set to 0 for rest of the packets.
EXPECT_FALSE(transport_.last_sent_packet().Marker()); EXPECT_FALSE(transport_.last_sent_packet().Marker());
} }
@ -1478,8 +1501,8 @@ TEST_P(RtpSenderTestWithoutPacer, BytesReportedCorrectly) {
uint8_t payload[] = {47, 11, 32, 93, 89}; uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_->SendOutgoingData( ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, kPayloadType, 1234, 4321, payload, kVideoFrameKey, kPayloadType, 1234, 4321, payload, sizeof(payload),
sizeof(payload), nullptr, nullptr, nullptr)); nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs));
// Will send 2 full-size padding packets. // Will send 2 full-size padding packets.
rtp_sender_->TimeToSendPadding(1, PacedPacketInfo()); rtp_sender_->TimeToSendPadding(1, PacedPacketInfo());
@ -1553,7 +1576,7 @@ TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) {
hdr.rotation = kVideoRotation_0; hdr.rotation = kVideoRotation_0;
rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload, rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload,
kTimestamp, 0, kFrame, sizeof(kFrame), nullptr, kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
&hdr); &hdr, kDefaultExpectedRetransmissionTimeMs);
VideoRotation rotation; VideoRotation rotation;
EXPECT_TRUE( EXPECT_TRUE(
@ -1579,7 +1602,8 @@ TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) {
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs); fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload, rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload,
kTimestamp, kCaptureTimestamp, kFrame, kTimestamp, kCaptureTimestamp, kFrame,
sizeof(kFrame), nullptr, &hdr); sizeof(kFrame), nullptr, &hdr,
kDefaultExpectedRetransmissionTimeMs);
VideoSendTiming timing; VideoSendTiming timing;
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>( EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
&timing)); &timing));
@ -1595,14 +1619,14 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) {
RTPVideoHeader hdr = {0}; RTPVideoHeader hdr = {0};
hdr.rotation = kVideoRotation_90; hdr.rotation = kVideoRotation_90;
EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, EXPECT_TRUE(rtp_sender_video_->SendVideo(
kPayload, kTimestamp, 0, kFrame, kRtpVideoGeneric, kVideoFrameKey, kPayload, kTimestamp, 0, kFrame,
sizeof(kFrame), nullptr, &hdr)); sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
hdr.rotation = kVideoRotation_0; hdr.rotation = kVideoRotation_0;
EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameDelta, EXPECT_TRUE(rtp_sender_video_->SendVideo(
kPayload, kTimestamp + 1, 0, kFrame, kRtpVideoGeneric, kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame,
sizeof(kFrame), nullptr, &hdr)); sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation; VideoRotation rotation;
EXPECT_TRUE( EXPECT_TRUE(
@ -1617,13 +1641,13 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) {
RTPVideoHeader hdr = {0}; RTPVideoHeader hdr = {0};
hdr.rotation = kVideoRotation_90; hdr.rotation = kVideoRotation_90;
EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, EXPECT_TRUE(rtp_sender_video_->SendVideo(
kPayload, kTimestamp, 0, kFrame, kRtpVideoGeneric, kVideoFrameKey, kPayload, kTimestamp, 0, kFrame,
sizeof(kFrame), nullptr, &hdr)); sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameDelta, EXPECT_TRUE(rtp_sender_video_->SendVideo(
kPayload, kTimestamp + 1, 0, kFrame, kRtpVideoGeneric, kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame,
sizeof(kFrame), nullptr, &hdr)); sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation; VideoRotation rotation;
EXPECT_TRUE( EXPECT_TRUE(
@ -1652,6 +1676,224 @@ TEST_P(RtpSenderVideoTest, SendVideoWithCameraAndFlipCVO) {
ConvertCVOByteToVideoRotation(flip_bit | camera_bit | 3)); ConvertCVOByteToVideoRotation(flip_bit | camera_bit | 3));
} }
TEST_P(RtpSenderVideoTest, RetransmissionTypesGeneric) {
RTPVideoHeader header;
header.codec = kRtpVideoGeneric;
EXPECT_EQ(kDontRetransmit,
rtp_sender_video_->GetStorageType(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission,
rtp_sender_video_->GetStorageType(
header, kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitAllPackets,
kDefaultExpectedRetransmissionTimeMs));
}
TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
RTPVideoHeader header;
header.codec = kRtpVideoH264;
header.codecHeader.H264.packetization_mode =
H264PacketizationMode::NonInterleaved;
EXPECT_EQ(kDontRetransmit,
rtp_sender_video_->GetStorageType(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission,
rtp_sender_video_->GetStorageType(
header, kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitAllPackets,
kDefaultExpectedRetransmissionTimeMs));
}
TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) {
RTPVideoHeader header;
header.codec = kRtpVideoVp8;
header.codecHeader.VP8.temporalIdx = 0;
EXPECT_EQ(kDontRetransmit,
rtp_sender_video_->GetStorageType(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission,
rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers | kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(
kAllowRetransmission,
rtp_sender_video_->GetStorageType(
header, kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitAllPackets,
kDefaultExpectedRetransmissionTimeMs));
}
TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8HigherLayers) {
RTPVideoHeader header;
header.codec = kRtpVideoVp8;
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
header.codecHeader.VP8.temporalIdx = tid;
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitOff,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission,
rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers | kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitAllPackets,
kDefaultExpectedRetransmissionTimeMs));
}
}
TEST_P(RtpSenderVideoTest, RetransmissionTypesVP9) {
RTPVideoHeader header;
header.codec = kRtpVideoVp9;
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
header.codecHeader.VP9.temporal_idx = tid;
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitOff,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
header, kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission,
rtp_sender_video_->GetStorageType(
header, kRetransmitHigherLayers | kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_EQ(kAllowRetransmission, rtp_sender_video_->GetStorageType(
header, kRetransmitAllPackets,
kDefaultExpectedRetransmissionTimeMs));
}
}
TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
const int64_t kFrameIntervalMs = 33;
const int64_t kRttMs = (kFrameIntervalMs * 3) / 2;
const uint8_t kSettings =
kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers;
// Insert VP8 frames for all temporal layers, but stop before the final index.
RTPVideoHeader header;
header.codec = kRtpVideoVp8;
// Fill averaging window to prevent rounding errors.
constexpr int kNumRepetitions =
(RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
kFrameIntervalMs;
constexpr int kPattern[] = {0, 2, 1, 2};
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
header.codecHeader.VP8.temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
}
// Since we're at the start of the pattern, the next expected frame in TL0 is
// right now. We will wait at most one expected retransmission time before
// acknowledging that it did not arrive, which means this frame and the next
// will not be retransmitted.
header.codecHeader.VP8.temporalIdx = 1;
EXPECT_EQ(StorageType::kDontRetransmit,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
EXPECT_EQ(StorageType::kDontRetransmit,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// The TL0 frame did not arrive. So allow retransmission.
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// Insert a frame for TL2. We just had frame in TL1, so the next one there is
// in three frames away. TL0 is still too far in the past. So, allow
// retransmission.
header.codecHeader.VP8.temporalIdx = 2;
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// Another TL2, next in TL1 is two frames away. Allow again.
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// Yet another TL2, next in TL1 is now only one frame away, so don't store
// for retransmission.
EXPECT_EQ(StorageType::kDontRetransmit,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
}
TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
const int64_t kFrameIntervalMs = 200;
const int64_t kRttMs = (kFrameIntervalMs * 3) / 2;
const int32_t kSettings =
kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers;
// Insert VP8 frames for all temporal layers, but stop before the final index.
RTPVideoHeader header;
header.codec = kRtpVideoVp8;
// Fill averaging window to prevent rounding errors.
constexpr int kNumRepetitions =
(RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
kFrameIntervalMs;
constexpr int kPattern[] = {0, 2, 2, 2};
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
header.codecHeader.VP8.temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
}
// Since we're at the start of the pattern, the next expected frame will be
// right now in TL0. Put it in TL1 instead. Regular rules would dictate that
// we don't store for retransmission because we expect a frame in a lower
// layer, but that last frame in TL1 was a long time ago in absolute terms,
// so allow retransmission anyway.
header.codecHeader.VP8.temporalIdx = 1;
EXPECT_EQ(StorageType::kAllowRetransmission,
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
}
TEST_P(RtpSenderTest, OnOverheadChanged) { TEST_P(RtpSenderTest, OnOverheadChanged) {
MockOverheadObserver mock_overhead_observer; MockOverheadObserver mock_overhead_observer;
rtp_sender_.reset( rtp_sender_.reset(

View File

@ -13,9 +13,10 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <limits>
#include <memory> #include <memory>
#include <vector>
#include <utility> #include <utility>
#include <vector>
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h" #include "webrtc/modules/rtp_rtcp/source/byte_io.h"
@ -33,6 +34,7 @@ namespace webrtc {
namespace { namespace {
constexpr size_t kRedForFecHeaderLength = 1; constexpr size_t kRedForFecHeaderLength = 1;
constexpr int64_t kMaxUnretransmittableFrameIntervalMs = 33 * 4;
void BuildRedPayload(const RtpPacketToSend& media_packet, void BuildRedPayload(const RtpPacketToSend& media_packet,
RtpPacketToSend* red_packet) { RtpPacketToSend* red_packet) {
@ -53,7 +55,8 @@ RTPSenderVideo::RTPSenderVideo(Clock* clock,
: rtp_sender_(rtp_sender), : rtp_sender_(rtp_sender),
clock_(clock), clock_(clock),
video_type_(kRtpVideoGeneric), video_type_(kRtpVideoGeneric),
retransmission_settings_(kRetransmitBaseLayer), retransmission_settings_(kRetransmitBaseLayer |
kConditionallyRetransmitHigherLayers),
last_rotation_(kVideoRotation_0), last_rotation_(kVideoRotation_0),
red_payload_type_(-1), red_payload_type_(-1),
ulpfec_payload_type_(-1), ulpfec_payload_type_(-1),
@ -292,7 +295,8 @@ bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
const uint8_t* payload_data, const uint8_t* payload_data,
size_t payload_size, size_t payload_size,
const RTPFragmentationHeader* fragmentation, const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* video_header) { const RTPVideoHeader* video_header,
int64_t expected_retransmission_time_ms) {
if (payload_size == 0) if (payload_size == 0)
return false; return false;
@ -365,8 +369,11 @@ bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create( std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
video_type, max_data_payload_length, last_packet_reduction_len, video_type, max_data_payload_length, last_packet_reduction_len,
video_header ? &(video_header->codecHeader) : nullptr, frame_type)); video_header ? &(video_header->codecHeader) : nullptr, frame_type));
// Media packet storage.
StorageType storage = packetizer->GetStorageType(retransmission_settings); const uint8_t temporal_id =
video_header ? GetTemporalId(*video_header) : kNoTemporalIdx;
StorageType storage = GetStorageType(temporal_id, retransmission_settings,
expected_retransmission_time_ms);
// TODO(changbin): we currently don't support to configure the codec to // TODO(changbin): we currently don't support to configure the codec to
// output multiple partitions for VP8. Should remove below check after the // output multiple partitions for VP8. Should remove below check after the
@ -392,7 +399,9 @@ bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
if (!rtp_sender_->AssignSequenceNumber(packet.get())) if (!rtp_sender_->AssignSequenceNumber(packet.get()))
return false; return false;
bool protect_packet = (packetizer->GetProtectionType() == kProtectedPacket); // No FEC protection for upper temporal layers, if used.
bool protect_packet = temporal_id == 0 || temporal_id == kNoTemporalIdx;
// Put packetization finish timestamp into extension. // Put packetization finish timestamp into extension.
if (packet->HasExtension<VideoTimingExtension>()) { if (packet->HasExtension<VideoTimingExtension>()) {
packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds()); packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds());
@ -453,4 +462,90 @@ void RTPSenderVideo::SetSelectiveRetransmissions(uint8_t settings) {
retransmission_settings_ = settings; retransmission_settings_ = settings;
} }
StorageType RTPSenderVideo::GetStorageType(
uint8_t temporal_id,
int32_t retransmission_settings,
int64_t expected_retransmission_time_ms) {
if (retransmission_settings == kRetransmitOff)
return StorageType::kDontRetransmit;
if (retransmission_settings == kRetransmitAllPackets)
return StorageType::kAllowRetransmission;
rtc::CritScope cs(&stats_crit_);
// Media packet storage.
if ((retransmission_settings & kConditionallyRetransmitHigherLayers) &&
UpdateConditionalRetransmit(temporal_id,
expected_retransmission_time_ms)) {
retransmission_settings |= kRetransmitHigherLayers;
}
if (temporal_id == kNoTemporalIdx)
return kAllowRetransmission;
if ((retransmission_settings & kRetransmitBaseLayer) && temporal_id == 0)
return kAllowRetransmission;
if ((retransmission_settings & kRetransmitHigherLayers) && temporal_id > 0)
return kAllowRetransmission;
return kDontRetransmit;
}
uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
switch (header.codec) {
case kRtpVideoVp8:
return header.codecHeader.VP8.temporalIdx;
case kRtpVideoVp9:
return header.codecHeader.VP9.temporal_idx;
default:
return kNoTemporalIdx;
}
}
bool RTPSenderVideo::UpdateConditionalRetransmit(
uint8_t temporal_id,
int64_t expected_retransmission_time_ms) {
int64_t now_ms = clock_->TimeInMilliseconds();
// Update stats for any temporal layer.
TemporalLayerStats* current_layer_stats =
&frame_stats_by_temporal_layer_[temporal_id];
current_layer_stats->frame_rate_fp1000s.Update(1, now_ms);
int64_t tl_frame_interval = now_ms - current_layer_stats->last_frame_time_ms;
current_layer_stats->last_frame_time_ms = now_ms;
// Conditional retransmit only applies to upper layers.
if (temporal_id != kNoTemporalIdx && temporal_id > 0) {
if (tl_frame_interval >= kMaxUnretransmittableFrameIntervalMs) {
// Too long since a retransmittable frame in this layer, enable NACK
// protection.
return true;
} else {
// Estimate when the next frame of any lower layer will be sent.
const int64_t kUndefined = std::numeric_limits<int64_t>::max();
int64_t expected_next_frame_time = kUndefined;
for (int i = temporal_id - 1; i >= 0; --i) {
TemporalLayerStats* stats = &frame_stats_by_temporal_layer_[i];
rtc::Optional<uint32_t> rate = stats->frame_rate_fp1000s.Rate(now_ms);
if (rate) {
int64_t tl_next = stats->last_frame_time_ms + 1000000 / *rate;
if (tl_next - now_ms > -expected_retransmission_time_ms &&
tl_next < expected_next_frame_time) {
expected_next_frame_time = tl_next;
}
}
}
if (expected_next_frame_time == kUndefined ||
expected_next_frame_time - now_ms > expected_retransmission_time_ms) {
// The next frame in a lower layer is expected at a later time (or
// unable to tell due to lack of data) than a retransmission is
// estimated to be able to arrive, so allow this packet to be nacked.
return true;
}
}
}
return false;
}
} // namespace webrtc } // namespace webrtc

View File

@ -11,9 +11,8 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_H_ #ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_H_ #define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_VIDEO_H_
#include <list> #include <map>
#include <memory> #include <memory>
#include <vector>
#include "webrtc/common_types.h" #include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/include/flexfec_sender.h" #include "webrtc/modules/rtp_rtcp/include/flexfec_sender.h"
@ -32,10 +31,13 @@
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
namespace webrtc { namespace webrtc {
class RtpPacketizer;
class RtpPacketToSend; class RtpPacketToSend;
class RTPSenderVideo { class RTPSenderVideo {
public: public:
static constexpr int64_t kTLRateWindowSizeMs = 2500;
RTPSenderVideo(Clock* clock, RTPSenderVideo(Clock* clock,
RTPSender* rtpSender, RTPSender* rtpSender,
FlexfecSender* flexfec_sender); FlexfecSender* flexfec_sender);
@ -55,7 +57,8 @@ class RTPSenderVideo {
const uint8_t* payload_data, const uint8_t* payload_data,
size_t payload_size, size_t payload_size,
const RTPFragmentationHeader* fragmentation, const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* video_header); const RTPVideoHeader* video_header,
int64_t expected_retransmission_time_ms);
void SetVideoCodecType(RtpVideoCodecTypes type); void SetVideoCodecType(RtpVideoCodecTypes type);
@ -76,7 +79,24 @@ class RTPSenderVideo {
int SelectiveRetransmissions() const; int SelectiveRetransmissions() const;
void SetSelectiveRetransmissions(uint8_t settings); void SetSelectiveRetransmissions(uint8_t settings);
protected:
static uint8_t GetTemporalId(const RTPVideoHeader& header);
StorageType GetStorageType(uint8_t temporal_id,
int32_t retransmission_settings,
int64_t expected_retransmission_time_ms);
private: private:
struct TemporalLayerStats {
TemporalLayerStats()
: frame_rate_fp1000s(kTLRateWindowSizeMs, 1000 * 1000),
last_frame_time_ms(0) {}
// Frame rate, in frames per 1000 seconds. This essentially turns the fps
// value into a fixed point value with three decimals. Improves precision at
// low frame rates.
RateStatistics frame_rate_fp1000s;
int64_t last_frame_time_ms;
};
size_t CalculateFecPacketOverhead() const EXCLUSIVE_LOCKS_REQUIRED(crit_); size_t CalculateFecPacketOverhead() const EXCLUSIVE_LOCKS_REQUIRED(crit_);
void SendVideoPacket(std::unique_ptr<RtpPacketToSend> packet, void SendVideoPacket(std::unique_ptr<RtpPacketToSend> packet,
@ -103,6 +123,10 @@ class RTPSenderVideo {
bool flexfec_enabled() const { return flexfec_sender_ != nullptr; } bool flexfec_enabled() const { return flexfec_sender_ != nullptr; }
bool UpdateConditionalRetransmit(uint8_t temporal_id,
int64_t expected_retransmission_time_ms)
EXCLUSIVE_LOCKS_REQUIRED(stats_crit_);
RTPSender* const rtp_sender_; RTPSender* const rtp_sender_;
Clock* const clock_; Clock* const clock_;
@ -131,6 +155,10 @@ class RTPSenderVideo {
RateStatistics fec_bitrate_ GUARDED_BY(stats_crit_); RateStatistics fec_bitrate_ GUARDED_BY(stats_crit_);
// Bitrate used for video payload and RTP headers. // Bitrate used for video payload and RTP headers.
RateStatistics video_bitrate_ GUARDED_BY(stats_crit_); RateStatistics video_bitrate_ GUARDED_BY(stats_crit_);
std::map<int, TemporalLayerStats> frame_stats_by_temporal_layer_
GUARDED_BY(stats_crit_);
OneTimeEvent first_frame_sent_; OneTimeEvent first_frame_sent_;
}; };