Send estimated capture clock offset when sending Abs-capture-time RTP header extension.

Bug: webrtc:10739
Change-Id: I4e3c46c749b9907ae9d212651b564add91c56958
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/182004
Commit-Queue: Minyue Li <minyue@webrtc.org>
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Reviewed-by: Chen Xing <chxg@google.com>
Cr-Commit-Position: refs/heads/master@{#31973}
This commit is contained in:
Minyue Li 2020-08-20 16:18:31 +02:00 committed by Commit Bot
parent 0796b58a7e
commit e64b3d0159
6 changed files with 238 additions and 125 deletions

View File

@ -49,12 +49,18 @@ const char* FrameTypeToString(AudioFrameType frame_type) {
}
#endif
constexpr char kIncludeCaptureClockOffset[] =
"WebRTC-IncludeCaptureClockOffset";
} // namespace
RTPSenderAudio::RTPSenderAudio(Clock* clock, RTPSender* rtp_sender)
: clock_(clock),
rtp_sender_(rtp_sender),
absolute_capture_time_sender_(clock) {
absolute_capture_time_sender_(clock),
include_capture_clock_offset_(
absl::StartsWith(field_trials_.Lookup(kIncludeCaptureClockOffset),
"Enabled")) {
RTC_DCHECK(clock_);
}
@ -280,7 +286,8 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type,
// absolute capture time sending.
encoder_rtp_timestamp_frequency.value_or(0),
Int64MsToUQ32x32(absolute_capture_timestamp_ms + NtpOffsetMs()),
/*estimated_capture_clock_offset=*/absl::nullopt);
/*estimated_capture_clock_offset=*/
include_capture_clock_offset_ ? absl::make_optional(0) : absl::nullopt);
if (absolute_capture_time) {
// It also checks that extension was registered during SDP negotiation. If
// not then setter won't do anything.

View File

@ -17,6 +17,7 @@
#include <memory>
#include "absl/strings/string_view.h"
#include "api/transport/field_trial_based_config.h"
#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
#include "modules/rtp_rtcp/source/dtmf_queue.h"
@ -106,6 +107,9 @@ class RTPSenderAudio {
AbsoluteCaptureTimeSender absolute_capture_time_sender_;
const FieldTrialBasedConfig field_trials_;
const bool include_capture_clock_offset_;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RTPSenderAudio);
};

View File

@ -20,6 +20,7 @@
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
#include "modules/rtp_rtcp/source/time_util.h"
#include "test/field_trial.h"
#include "test/gmock.h"
#include "test/gtest.h"
@ -76,48 +77,50 @@ class RtpSenderAudioTest : public ::testing::Test {
config.local_media_ssrc = kSsrc;
return config;
}())),
rtp_sender_audio_(&fake_clock_, rtp_module_->RtpSender()) {
rtp_sender_audio_(
std::make_unique<RTPSenderAudio>(&fake_clock_,
rtp_module_->RtpSender())) {
rtp_module_->SetSequenceNumber(kSeqNum);
}
SimulatedClock fake_clock_;
LoopbackTransportTest transport_;
std::unique_ptr<ModuleRtpRtcpImpl2> rtp_module_;
RTPSenderAudio rtp_sender_audio_;
std::unique_ptr<RTPSenderAudio> rtp_sender_audio_;
};
TEST_F(RtpSenderAudioTest, SendAudio) {
const char payload_name[] = "PAYLOAD_NAME";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload(
ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
payload_name, payload_type, 48000, 0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kAudioFrameCN,
payload_type, 4321, payload,
sizeof(payload),
/*absolute_capture_timestamp_ms=*/0));
ASSERT_TRUE(
rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type,
4321, payload, sizeof(payload),
/*absolute_capture_timestamp_ms=*/0));
auto sent_payload = transport_.last_sent_packet().payload();
EXPECT_THAT(sent_payload, ElementsAreArray(payload));
}
TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
EXPECT_EQ(0, rtp_sender_audio_.SetAudioLevel(kAudioLevel));
EXPECT_EQ(0, rtp_sender_audio_->SetAudioLevel(kAudioLevel));
rtp_module_->RegisterRtpHeaderExtension(AudioLevel::kUri,
kAudioLevelExtensionId);
const char payload_name[] = "PAYLOAD_NAME";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload(
ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
payload_name, payload_type, 48000, 0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kAudioFrameCN,
payload_type, 4321, payload,
sizeof(payload),
/*absolute_capture_timestamp_ms=*/0));
ASSERT_TRUE(
rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type,
4321, payload, sizeof(payload),
/*absolute_capture_timestamp_ms=*/0));
auto sent_payload = transport_.last_sent_packet().payload();
EXPECT_THAT(sent_payload, ElementsAreArray(payload));
@ -134,11 +137,11 @@ TEST_F(RtpSenderAudioTest, SendAudioWithoutAbsoluteCaptureTime) {
constexpr uint32_t kAbsoluteCaptureTimestampMs = 521;
const char payload_name[] = "audio";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload(
ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
payload_name, payload_type, 48000, 0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_audio_.SendAudio(
ASSERT_TRUE(rtp_sender_audio_->SendAudio(
AudioFrameType::kAudioFrameCN, payload_type, 4321, payload,
sizeof(payload), kAbsoluteCaptureTimestampMs));
@ -152,11 +155,11 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAbsoluteCaptureTime) {
constexpr uint32_t kAbsoluteCaptureTimestampMs = 521;
const char payload_name[] = "audio";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload(
ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
payload_name, payload_type, 48000, 0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_audio_.SendAudio(
ASSERT_TRUE(rtp_sender_audio_->SendAudio(
AudioFrameType::kAudioFrameCN, payload_type, 4321, payload,
sizeof(payload), kAbsoluteCaptureTimestampMs));
@ -166,6 +169,43 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAbsoluteCaptureTime) {
EXPECT_TRUE(absolute_capture_time);
EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp,
Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs()));
EXPECT_FALSE(
absolute_capture_time->estimated_capture_clock_offset.has_value());
}
// Essentially the same test as SendAudioWithAbsoluteCaptureTime but with a
// field trial. After the field trial is experimented, we will remove
// SendAudioWithAbsoluteCaptureTime.
TEST_F(RtpSenderAudioTest,
SendAudioWithAbsoluteCaptureTimeWithCaptureClockOffset) {
// Recreate rtp_sender_audio_ wieh new field trial.
test::ScopedFieldTrials field_trial(
"WebRTC-IncludeCaptureClockOffset/Enabled/");
rtp_sender_audio_ =
std::make_unique<RTPSenderAudio>(&fake_clock_, rtp_module_->RtpSender());
rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::kUri,
kAbsoluteCaptureTimeExtensionId);
constexpr uint32_t kAbsoluteCaptureTimestampMs = 521;
const char payload_name[] = "audio";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
payload_name, payload_type, 48000, 0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_TRUE(rtp_sender_audio_->SendAudio(
AudioFrameType::kAudioFrameCN, payload_type, 4321, payload,
sizeof(payload), kAbsoluteCaptureTimestampMs));
auto absolute_capture_time =
transport_.last_sent_packet()
.GetExtension<AbsoluteCaptureTimeExtension>();
EXPECT_TRUE(absolute_capture_time);
EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp,
Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs()));
EXPECT_TRUE(
absolute_capture_time->estimated_capture_clock_offset.has_value());
EXPECT_EQ(0, *absolute_capture_time->estimated_capture_clock_offset);
}
// As RFC4733, named telephone events are carried as part of the audio stream
@ -178,40 +218,40 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
const char* kDtmfPayloadName = "telephone-event";
const uint32_t kPayloadFrequency = 8000;
const uint8_t kPayloadType = 126;
ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload(
ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
kDtmfPayloadName, kPayloadType, kPayloadFrequency, 0, 0));
// For Telephone events, payload is not added to the registered payload list,
// it will register only the payload used for audio stream.
// Registering the payload again for audio stream with different payload name.
const char* kPayloadName = "payload_name";
ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload(
ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload(
kPayloadName, kPayloadType, kPayloadFrequency, 1, 0));
// Start time is arbitrary.
uint32_t capture_timestamp = fake_clock_.TimeInMilliseconds();
// DTMF event key=9, duration=500 and attenuationdB=10
rtp_sender_audio_.SendTelephoneEvent(9, 500, 10);
rtp_sender_audio_->SendTelephoneEvent(9, 500, 10);
// During start, it takes the starting timestamp as last sent timestamp.
// The duration is calculated as the difference of current and last sent
// timestamp. So for first call it will skip since the duration is zero.
ASSERT_TRUE(rtp_sender_audio_.SendAudio(
ASSERT_TRUE(rtp_sender_audio_->SendAudio(
AudioFrameType::kEmptyFrame, kPayloadType, capture_timestamp, nullptr, 0,
/*absolute_capture_time_ms=0*/ 0));
// DTMF Sample Length is (Frequency/1000) * Duration.
// So in this case, it is (8000/1000) * 500 = 4000.
// Sending it as two packets.
ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kEmptyFrame,
kPayloadType,
capture_timestamp + 2000, nullptr, 0,
/*absolute_capture_time_ms=0*/ 0));
ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame,
kPayloadType,
capture_timestamp + 2000, nullptr, 0,
/*absolute_capture_time_ms=0*/ 0));
// Marker Bit should be set to 1 for first packet.
EXPECT_TRUE(transport_.last_sent_packet().Marker());
ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kEmptyFrame,
kPayloadType,
capture_timestamp + 4000, nullptr, 0,
/*absolute_capture_time_ms=0*/ 0));
ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame,
kPayloadType,
capture_timestamp + 4000, nullptr, 0,
/*absolute_capture_time_ms=0*/ 0));
// Marker Bit should be set to 0 for rest of the packets.
EXPECT_FALSE(transport_.last_sent_packet().Marker());
}

View File

@ -44,6 +44,8 @@ namespace webrtc {
namespace {
constexpr size_t kRedForFecHeaderLength = 1;
constexpr int64_t kMaxUnretransmittableFrameIntervalMs = 33 * 4;
constexpr char kIncludeCaptureClockOffset[] =
"WebRTC-IncludeCaptureClockOffset";
void BuildRedPayload(const RtpPacketToSend& media_packet,
RtpPacketToSend* red_packet) {
@ -146,7 +148,10 @@ RTPSenderVideo::RTPSenderVideo(const Config& config)
config.frame_transformer,
rtp_sender_->SSRC(),
config.send_transport_queue)
: nullptr) {
: nullptr),
include_capture_clock_offset_(absl::StartsWith(
config.field_trials->Lookup(kIncludeCaptureClockOffset),
"Enabled")) {
if (frame_transformer_delegate_)
frame_transformer_delegate_->Init();
}
@ -446,7 +451,9 @@ bool RTPSenderVideo::SendVideo(
single_packet->Csrcs()),
single_packet->Timestamp(), kVideoPayloadTypeFrequency,
Int64MsToUQ32x32(single_packet->capture_time_ms() + NtpOffsetMs()),
/*estimated_capture_clock_offset=*/absl::nullopt);
/*estimated_capture_clock_offset=*/
include_capture_clock_offset_ ? absl::make_optional(0)
: absl::nullopt);
auto first_packet = std::make_unique<RtpPacketToSend>(*single_packet);
auto middle_packet = std::make_unique<RtpPacketToSend>(*single_packet);

View File

@ -218,6 +218,8 @@ class RTPSenderVideo {
const rtc::scoped_refptr<RTPSenderVideoFrameTransformerDelegate>
frame_transformer_delegate_;
const bool include_capture_clock_offset_;
};
} // namespace webrtc

View File

@ -147,17 +147,25 @@ class TestRtpSenderVideo : public RTPSenderVideo {
class FieldTrials : public WebRtcKeyValueConfig {
public:
explicit FieldTrials(bool use_send_side_bwe_with_overhead)
: use_send_side_bwe_with_overhead_(use_send_side_bwe_with_overhead) {}
: use_send_side_bwe_with_overhead_(use_send_side_bwe_with_overhead),
include_capture_clock_offset_(false) {}
void set_include_capture_clock_offset(bool include_capture_clock_offset) {
include_capture_clock_offset_ = include_capture_clock_offset;
}
std::string Lookup(absl::string_view key) const override {
return key == "WebRTC-SendSideBwe-WithOverhead" &&
use_send_side_bwe_with_overhead_
? "Enabled"
: "";
if (key == "WebRTC-SendSideBwe-WithOverhead") {
return use_send_side_bwe_with_overhead_ ? "Enabled" : "";
} else if (key == "WebRTC-IncludeCaptureClockOffset") {
return include_capture_clock_offset_ ? "Enabled" : "";
}
return "";
}
private:
bool use_send_side_bwe_with_overhead_;
bool include_capture_clock_offset_;
};
class RtpSenderVideoTest : public ::testing::TestWithParam<bool> {
@ -175,10 +183,11 @@ class RtpSenderVideoTest : public ::testing::TestWithParam<bool> {
config.local_media_ssrc = kSsrc;
return config;
}())),
rtp_sender_video_(&fake_clock_,
rtp_module_->RtpSender(),
nullptr,
field_trials_) {
rtp_sender_video_(
std::make_unique<TestRtpSenderVideo>(&fake_clock_,
rtp_module_->RtpSender(),
nullptr,
field_trials_)) {
rtp_module_->SetSequenceNumber(kSeqNum);
rtp_module_->SetStartTimestamp(0);
}
@ -193,7 +202,7 @@ class RtpSenderVideoTest : public ::testing::TestWithParam<bool> {
LoopbackTransportTest transport_;
RateLimiter retransmission_rate_limiter_;
std::unique_ptr<ModuleRtpRtcpImpl2> rtp_module_;
TestRtpSenderVideo rtp_sender_video_;
std::unique_ptr<TestRtpSenderVideo> rtp_sender_video_;
};
TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) {
@ -204,8 +213,8 @@ TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) {
RTPVideoHeader hdr;
hdr.rotation = kVideoRotation_0;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
VideoRotation rotation;
EXPECT_TRUE(
@ -230,9 +239,9 @@ TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) {
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp,
kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp,
kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
VideoSendTiming timing;
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
&timing));
@ -250,14 +259,14 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) {
hdr.rotation = kVideoRotation_90;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs));
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs));
hdr.rotation = kVideoRotation_0;
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
hdr, kDefaultExpectedRetransmissionTimeMs));
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
hdr, kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation;
EXPECT_TRUE(
@ -274,13 +283,13 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) {
hdr.rotation = kVideoRotation_90;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs));
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs));
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
hdr, kDefaultExpectedRetransmissionTimeMs));
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
hdr, kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation;
EXPECT_TRUE(
@ -313,13 +322,13 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesGeneric) {
RTPVideoHeader header;
header.codec = kVideoCodecGeneric;
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
}
@ -330,13 +339,13 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
H264PacketizationMode::NonInterleaved;
header.codec = kVideoCodecH264;
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
}
@ -347,19 +356,19 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) {
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.temporalIdx = 0;
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers | kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers,
kDefaultExpectedRetransmissionTimeMs));
}
@ -372,13 +381,13 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8HigherLayers) {
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
vp8_header.temporalIdx = tid;
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers | kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
}
@ -392,13 +401,13 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP9) {
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
vp9_header.temporal_idx = tid;
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
EXPECT_FALSE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
EXPECT_TRUE(rtp_sender_video_->AllowRetransmission(
header, kRetransmitHigherLayers | kRetransmitBaseLayer,
kDefaultExpectedRetransmissionTimeMs));
}
@ -422,7 +431,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs);
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
}
@ -432,31 +441,34 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
// will not be retransmitted.
vp8_header.temporalIdx = 1;
EXPECT_FALSE(
rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs));
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
EXPECT_FALSE(
rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs));
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// The TL0 frame did not arrive. So allow retransmission.
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs));
EXPECT_TRUE(
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// Insert a frame for TL2. We just had frame in TL1, so the next one there is
// in three frames away. TL0 is still too far in the past. So, allow
// retransmission.
vp8_header.temporalIdx = 2;
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs));
EXPECT_TRUE(
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// Another TL2, next in TL1 is two frames away. Allow again.
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs));
EXPECT_TRUE(
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
// Yet another TL2, next in TL1 is now only one frame away, so don't store
// for retransmission.
EXPECT_FALSE(
rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs));
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
}
TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
@ -478,7 +490,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs);
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs);
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
}
@ -488,7 +500,8 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
// layer, but that last frame in TL1 was a long time ago in absolute terms,
// so allow retransmission anyway.
vp8_header.temporalIdx = 1;
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs));
EXPECT_TRUE(
rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs));
}
TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
@ -503,7 +516,7 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
FrameDependencyTemplate().S(1).T(0).Dtis("-S"),
FrameDependencyTemplate().S(1).T(1).Dtis("-D"),
};
rtp_sender_video_.SetVideoStructure(&video_structure);
rtp_sender_video_->SetVideoStructure(&video_structure);
// Send key frame.
RTPVideoHeader hdr;
@ -514,8 +527,8 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
DecodeTargetIndication::kSwitch};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key;
@ -540,8 +553,8 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
DecodeTargetIndication::kRequired};
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
EXPECT_EQ(transport_.packets_sent(), 2);
DependencyDescriptor descriptor_delta;
@ -571,7 +584,7 @@ TEST_P(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) {
video_structure.templates = {
FrameDependencyTemplate().S(0).T(0).Dtis("SS").ChainDiffs({1}),
};
rtp_sender_video_.SetVideoStructure(&video_structure);
rtp_sender_video_->SetVideoStructure(&video_structure);
RTPVideoHeader hdr;
RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
@ -580,8 +593,8 @@ TEST_P(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) {
DecodeTargetIndication::kSwitch};
generic.chain_diffs = {2};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key;
@ -605,7 +618,7 @@ TEST_P(RtpSenderVideoTest,
video_structure.templates = {
FrameDependencyTemplate().S(0).T(0).Dtis("SS").ChainDiffs({1}),
};
rtp_sender_video_.SetVideoStructure(&video_structure);
rtp_sender_video_->SetVideoStructure(&video_structure);
RTPVideoHeader hdr;
RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
@ -615,8 +628,8 @@ TEST_P(RtpSenderVideoTest,
generic.active_decode_targets = 0b01;
generic.chain_diffs = {1};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key;
@ -652,9 +665,9 @@ TEST_P(RtpSenderVideoTest,
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
DecodeTargetIndication::kSwitch};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SetVideoStructure(&video_structure1);
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SetVideoStructure(&video_structure1);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
// Parse 1st extension.
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key1;
@ -669,8 +682,8 @@ TEST_P(RtpSenderVideoTest,
generic.decode_target_indications = {DecodeTargetIndication::kDiscardable,
DecodeTargetIndication::kNotPresent};
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 2);
RtpPacket delta_packet = transport_.last_sent_packet();
@ -680,9 +693,9 @@ TEST_P(RtpSenderVideoTest,
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
DecodeTargetIndication::kSwitch};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SetVideoStructure(&video_structure2);
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SetVideoStructure(&video_structure2);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
// Parse the 2nd key frame.
ASSERT_EQ(transport_.packets_sent(), 3);
DependencyDescriptor descriptor_key2;
@ -758,8 +771,8 @@ TEST_P(RtpSenderVideoTest, PopulateGenericFrameDescriptor) {
generic.dependencies.push_back(kFrameId - 1);
generic.dependencies.push_back(kFrameId - 500);
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
RtpGenericFrameDescriptor descriptor_wire;
EXPECT_EQ(1, transport_.packets_sent());
@ -792,9 +805,9 @@ void RtpSenderVideoTest::
RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
generic.frame_id = kFrameId;
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
rtp_sender_video_.SendVideo(kPayload, VideoCodecType::kVideoCodecVP8,
kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, VideoCodecType::kVideoCodecVP8,
kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
// Expect only minimal 1-byte vp8 descriptor was generated.
@ -819,12 +832,13 @@ TEST_P(RtpSenderVideoTest, AbsoluteCaptureTime) {
RTPVideoHeader hdr;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp,
kAbsoluteCaptureTimestampMs, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
kAbsoluteCaptureTimestampMs, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
// It is expected that one and only one of the packets sent on this video
// frame has absolute capture time header extension.
// frame has absolute capture time header extension. And no absolute capture
// time header extensions include capture clock offset.
int packets_with_abs_capture_time = 0;
for (const RtpPacketReceived& packet : transport_.sent_packets()) {
auto absolute_capture_time =
@ -833,6 +847,45 @@ TEST_P(RtpSenderVideoTest, AbsoluteCaptureTime) {
++packets_with_abs_capture_time;
EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp,
Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs()));
EXPECT_FALSE(
absolute_capture_time->estimated_capture_clock_offset.has_value());
}
}
EXPECT_EQ(packets_with_abs_capture_time, 1);
}
// Essentially the same test as AbsoluteCaptureTime but with a field trial.
// After the field trial is experimented, we will remove AbsoluteCaptureTime.
TEST_P(RtpSenderVideoTest, AbsoluteCaptureTimeWithCaptureClockOffset) {
field_trials_.set_include_capture_clock_offset(true);
rtp_sender_video_ = std::make_unique<TestRtpSenderVideo>(
&fake_clock_, rtp_module_->RtpSender(), nullptr, field_trials_);
constexpr int64_t kAbsoluteCaptureTimestampMs = 12345678;
uint8_t kFrame[kMaxPacketLength];
rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::kUri,
kAbsoluteCaptureTimeExtensionId);
RTPVideoHeader hdr;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp,
kAbsoluteCaptureTimestampMs, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
// It is expected that one and only one of the packets sent on this video
// frame has absolute capture time header extension. And it includes capture
// clock offset.
int packets_with_abs_capture_time = 0;
for (const RtpPacketReceived& packet : transport_.sent_packets()) {
auto absolute_capture_time =
packet.GetExtension<AbsoluteCaptureTimeExtension>();
if (absolute_capture_time) {
++packets_with_abs_capture_time;
EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp,
Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs()));
EXPECT_TRUE(
absolute_capture_time->estimated_capture_clock_offset.has_value());
EXPECT_EQ(0, *absolute_capture_time->estimated_capture_clock_offset);
}
}
EXPECT_EQ(packets_with_abs_capture_time, 1);
@ -853,8 +906,8 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) {
auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.temporalIdx = 0;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
EXPECT_FALSE(
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
@ -862,8 +915,8 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) {
hdr.playout_delay = kExpectedDelay;
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
vp8_header.temporalIdx = 1;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
PlayoutDelay received_delay = PlayoutDelay::Noop();
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
&received_delay));
@ -873,23 +926,23 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) {
// be populated since dilvery wasn't guaranteed on the last one.
hdr.playout_delay = PlayoutDelay::Noop(); // Inidcates "no change".
vp8_header.temporalIdx = 0;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
&received_delay));
EXPECT_EQ(received_delay, kExpectedDelay);
// The next frame does not need the extensions since it's delivery has
// already been guaranteed.
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
EXPECT_FALSE(
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
// Insert key-frame, we need to refresh the state here.
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
&received_delay));
EXPECT_EQ(received_delay, kExpectedDelay);