From 4c556219e59f2ff3ea62da44e1ff81c6cd1f8021 Mon Sep 17 00:00:00 2001 From: Danil Chapovalov Date: Mon, 4 Sep 2023 12:27:59 +0200 Subject: [PATCH] Cleanup RTPSenderAudio::SendAudio MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Combine all parameters into single struct so that it is easier to add and remove optional parameters Use Timestamp type instad of plain int to represent capture time Use rtc::ArrayView instead of pointer+size to represent payload Merge passing audio level into send function. Bug: webrtc:13757, webrtc:14870 Change-Id: I0386b710eb99b864334d61235add9abcde9bc69d Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/317442 Reviewed-by: Jakob Ivarsson‎ Commit-Queue: Danil Chapovalov Cr-Commit-Position: refs/heads/main@{#40688} --- audio/channel_send.cc | 24 +++---- audio/voip/audio_egress.cc | 6 +- modules/rtp_rtcp/source/rtp_sender_audio.cc | 66 ++++++++++++------- modules/rtp_rtcp/source/rtp_sender_audio.h | 44 +++++++++---- .../source/rtp_sender_audio_unittest.cc | 63 +++++++++--------- 5 files changed, 122 insertions(+), 81 deletions(-) diff --git a/audio/channel_send.cc b/audio/channel_send.cc index 984f530d38..e3058fca0d 100644 --- a/audio/channel_send.cc +++ b/audio/channel_send.cc @@ -294,13 +294,6 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, uint32_t rtp_timestamp_without_offset, rtc::ArrayView payload, int64_t absolute_capture_timestamp_ms) { - if (include_audio_level_indication_.load()) { - // Store current audio level in the RTP sender. - // The level will be used in combination with voice-activity state - // (frameType) to add an RTP header extension - rtp_sender_audio_->SetAudioLevel(rms_level_.Average()); - } - // E2EE Custom Audio Frame Encryption (This is optional). // Keep this buffer around for the lifetime of the send call. rtc::Buffer encrypted_audio_payload; @@ -357,10 +350,19 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, // knowledge of the offset to a single place. // This call will trigger Transport::SendPacket() from the RTP/RTCP module. - if (!rtp_sender_audio_->SendAudio( - frameType, payloadType, - rtp_timestamp_without_offset + rtp_rtcp_->StartTimestamp(), - payload.data(), payload.size(), absolute_capture_timestamp_ms)) { + RTPSenderAudio::RtpAudioFrame frame = { + .type = frameType, + .payload = payload, + .payload_id = payloadType, + .rtp_timestamp = + rtp_timestamp_without_offset + rtp_rtcp_->StartTimestamp()}; + if (absolute_capture_timestamp_ms > 0) { + frame.capture_time = Timestamp::Millis(absolute_capture_timestamp_ms); + } + if (include_audio_level_indication_.load()) { + frame.audio_level_dbov = rms_level_.Average(); + } + if (!rtp_sender_audio_->SendAudio(frame)) { RTC_DLOG(LS_ERROR) << "ChannelSend::SendData() failed to send data to RTP/RTCP module"; return -1; diff --git a/audio/voip/audio_egress.cc b/audio/voip/audio_egress.cc index ae98d91cb8..95a1a3351e 100644 --- a/audio/voip/audio_egress.cc +++ b/audio/voip/audio_egress.cc @@ -132,8 +132,10 @@ int32_t AudioEgress::SendData(AudioFrameType frame_type, const uint32_t rtp_timestamp = timestamp + rtp_rtcp_->StartTimestamp(); // This call will trigger Transport::SendPacket() from the RTP/RTCP module. - if (!rtp_sender_audio_.SendAudio(frame_type, payload_type, rtp_timestamp, - payload.data(), payload.size())) { + if (!rtp_sender_audio_.SendAudio({.type = frame_type, + .payload = payload, + .payload_id = payload_type, + .rtp_timestamp = rtp_timestamp})) { RTC_DLOG(LS_ERROR) << "AudioEgress::SendData() failed to send data to RTP/RTCP module"; return -1; diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.cc b/modules/rtp_rtcp/source/rtp_sender_audio.cc index 0fac4407c6..62174e343c 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.cc +++ b/modules/rtp_rtcp/source/rtp_sender_audio.cc @@ -146,10 +146,10 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, uint32_t rtp_timestamp, const uint8_t* payload_data, size_t payload_size) { - return SendAudio(frame_type, payload_type, rtp_timestamp, payload_data, - payload_size, - // TODO(bugs.webrtc.org/10739) replace once plumbed. - /*absolute_capture_timestamp_ms=*/-1); + return SendAudio({.type = frame_type, + .payload{payload_data, payload_size}, + .payload_id = payload_type, + .rtp_timestamp = rtp_timestamp}); } bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, @@ -158,8 +158,23 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, const uint8_t* payload_data, size_t payload_size, int64_t absolute_capture_timestamp_ms) { - TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", rtp_timestamp, "Send", "type", - FrameTypeToString(frame_type)); + RtpAudioFrame frame = { + .type = frame_type, + .payload{payload_data, payload_size}, + .payload_id = payload_type, + .rtp_timestamp = rtp_timestamp, + }; + if (absolute_capture_timestamp_ms > 0) { + frame.capture_time = Timestamp::Millis(absolute_capture_timestamp_ms); + } + return SendAudio(frame); +} + +bool RTPSenderAudio::SendAudio(const RtpAudioFrame& frame) { + RTC_DCHECK_GE(frame.payload_id, 0); + RTC_DCHECK_LE(frame.payload_id, 127); + TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", frame.rtp_timestamp, "Send", + "type", FrameTypeToString(frame.type)); // From RFC 4733: // A source has wide latitude as to how often it sends event updates. A @@ -182,7 +197,7 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, if ((clock_->TimeInMilliseconds() - dtmf_time_last_sent_) > kDtmfIntervalTimeMs) { // New tone to play - dtmf_timestamp_ = rtp_timestamp; + dtmf_timestamp_ = frame.rtp_timestamp; if (dtmf_queue_.NextDtmf(&dtmf_current_event_)) { dtmf_event_first_packet_sent_ = false; dtmf_length_samples_ = @@ -195,20 +210,20 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, // A source MAY send events and coded audio packets for the same time // but we don't support it if (dtmf_event_is_on_) { - if (frame_type == AudioFrameType::kEmptyFrame) { + if (frame.type == AudioFrameType::kEmptyFrame) { // kEmptyFrame is used to drive the DTMF when in CN mode // it can be triggered more frequently than we want to send the // DTMF packets. const unsigned int dtmf_interval_time_rtp = dtmf_payload_freq * kDtmfIntervalTimeMs / 1000; - if ((rtp_timestamp - dtmf_timestamp_last_sent_) < + if ((frame.rtp_timestamp - dtmf_timestamp_last_sent_) < dtmf_interval_time_rtp) { // not time to send yet return true; } } - dtmf_timestamp_last_sent_ = rtp_timestamp; - uint32_t dtmf_duration_samples = rtp_timestamp - dtmf_timestamp_; + dtmf_timestamp_last_sent_ = frame.rtp_timestamp; + uint32_t dtmf_duration_samples = frame.rtp_timestamp - dtmf_timestamp_; bool ended = false; bool send = true; @@ -229,7 +244,7 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, static_cast(0xffff), false); // set new timestap for this segment - dtmf_timestamp_ = rtp_timestamp; + dtmf_timestamp_ = frame.rtp_timestamp; dtmf_duration_samples -= 0xffff; dtmf_length_samples_ -= 0xffff; @@ -248,8 +263,8 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, } return true; } - if (payload_size == 0 || payload_data == NULL) { - if (frame_type == AudioFrameType::kEmptyFrame) { + if (frame.payload.empty()) { + if (frame.type == AudioFrameType::kEmptyFrame) { // we don't send empty audio RTP packets // no error since we use it to either drive DTMF when we use VAD, or // enter DTX. @@ -259,15 +274,16 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, } std::unique_ptr packet = rtp_sender_->AllocatePacket(); - packet->SetMarker(MarkerBit(frame_type, payload_type)); - packet->SetPayloadType(payload_type); - packet->SetTimestamp(rtp_timestamp); + packet->SetMarker(MarkerBit(frame.type, frame.payload_id)); + packet->SetPayloadType(frame.payload_id); + packet->SetTimestamp(frame.rtp_timestamp); packet->set_capture_time(clock_->CurrentTime()); // Update audio level extension, if included. packet->SetExtension( - frame_type == AudioFrameType::kAudioFrameSpeech, audio_level_dbov); + frame.type == AudioFrameType::kAudioFrameSpeech, + frame.audio_level_dbov.value_or(audio_level_dbov)); - if (absolute_capture_timestamp_ms > 0) { + if (frame.capture_time.has_value()) { // Send absolute capture time periodically in order to optimize and save // network traffic. Missing absolute capture times can be interpolated on // the receiving end if sending intervals are small enough. @@ -277,8 +293,8 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, // Replace missing value with 0 (invalid frequency), this will trigger // absolute capture time sending. encoder_rtp_timestamp_frequency.value_or(0), - Int64MsToUQ32x32(clock_->ConvertTimestampToNtpTimeInMilliseconds( - absolute_capture_timestamp_ms)), + static_cast( + clock_->ConvertTimestampToNtpTime(*frame.capture_time)), /*estimated_capture_clock_offset=*/0); if (absolute_capture_time) { // It also checks that extension was registered during SDP negotiation. If @@ -288,15 +304,15 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, } } - uint8_t* payload = packet->AllocatePayload(payload_size); + uint8_t* payload = packet->AllocatePayload(frame.payload.size()); RTC_CHECK(payload); - memcpy(payload, payload_data, payload_size); + memcpy(payload, frame.payload.data(), frame.payload.size()); { MutexLock lock(&send_audio_mutex_); - last_payload_type_ = payload_type; + last_payload_type_ = frame.payload_id; } - TRACE_EVENT_ASYNC_END2("webrtc", "Audio", rtp_timestamp, "timestamp", + TRACE_EVENT_ASYNC_END2("webrtc", "Audio", frame.rtp_timestamp, "timestamp", packet->Timestamp(), "seqnum", packet->SequenceNumber()); packet->set_packet_type(RtpPacketMediaType::kAudio); diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.h b/modules/rtp_rtcp/source/rtp_sender_audio.h index e22ca41b90..ee4e92635f 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.h +++ b/modules/rtp_rtcp/source/rtp_sender_audio.h @@ -44,25 +44,45 @@ class RTPSenderAudio { size_t channels, uint32_t rate); - bool SendAudio(AudioFrameType frame_type, - int8_t payload_type, - uint32_t rtp_timestamp, - const uint8_t* payload_data, - size_t payload_size); + struct RtpAudioFrame { + AudioFrameType type = AudioFrameType::kAudioFrameSpeech; + rtc::ArrayView payload; + + // Payload id to write to the payload type field of the rtp packet. + int payload_id = -1; + + // capture time of the audio frame represented as rtp timestamp. + uint32_t rtp_timestamp = 0; + + // capture time of the audio frame in the same epoch as `clock->CurrentTime` + absl::optional capture_time; + + // Audio level in dBov for + // header-extension-for-audio-level-indication. + // Valid range is [0,127]. Actual value is negative. + absl::optional audio_level_dbov; + }; + bool SendAudio(const RtpAudioFrame& frame); + + [[deprecated]] bool SendAudio(AudioFrameType frame_type, + int8_t payload_type, + uint32_t rtp_timestamp, + const uint8_t* payload_data, + size_t payload_size); // `absolute_capture_timestamp_ms` and `Clock::CurrentTime` // should be using the same epoch. - bool SendAudio(AudioFrameType frame_type, - int8_t payload_type, - uint32_t rtp_timestamp, - const uint8_t* payload_data, - size_t payload_size, - int64_t absolute_capture_timestamp_ms); + [[deprecated]] bool SendAudio(AudioFrameType frame_type, + int8_t payload_type, + uint32_t rtp_timestamp, + const uint8_t* payload_data, + size_t payload_size, + int64_t absolute_capture_timestamp_ms); // Store the audio level in dBov for // header-extension-for-audio-level-indication. // Valid range is [0,127]. Actual value is negative. - int32_t SetAudioLevel(uint8_t level_dbov); + [[deprecated]] int32_t SetAudioLevel(uint8_t level_dbov); // Send a DTMF tone using RFC 2833 (4733) int32_t SendTelephoneEvent(uint8_t key, uint16_t time_ms, uint8_t level); diff --git a/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc index 0087712518..0db610c149 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc @@ -32,7 +32,6 @@ enum : int { // The first valid value is 1. const uint16_t kSeqNum = 33; const uint32_t kSsrc = 725242; -const uint8_t kAudioLevel = 0x5a; const uint64_t kStartTime = 123456789; using ::testing::ElementsAreArray; @@ -94,17 +93,15 @@ TEST_F(RtpSenderAudioTest, SendAudio) { payload_name, payload_type, 48000, 0, 1500)); uint8_t payload[] = {47, 11, 32, 93, 89}; - ASSERT_TRUE( - rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type, - 4321, payload, sizeof(payload), - /*absolute_capture_timestamp_ms=*/0)); + ASSERT_TRUE(rtp_sender_audio_->SendAudio( + {.payload = payload, .payload_id = payload_type})); auto sent_payload = transport_.last_sent_packet().payload(); EXPECT_THAT(sent_payload, ElementsAreArray(payload)); } TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) { - EXPECT_EQ(0, rtp_sender_audio_->SetAudioLevel(kAudioLevel)); + const uint8_t kAudioLevel = 0x5a; rtp_module_->RegisterRtpHeaderExtension(AudioLevel::Uri(), kAudioLevelExtensionId); @@ -116,9 +113,10 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) { uint8_t payload[] = {47, 11, 32, 93, 89}; ASSERT_TRUE( - rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type, - 4321, payload, sizeof(payload), - /*absolute_capture_timestamp_ms=*/0)); + rtp_sender_audio_->SendAudio({.type = AudioFrameType::kAudioFrameCN, + .payload = payload, + .payload_id = payload_type, + .audio_level_dbov = kAudioLevel})); auto sent_payload = transport_.last_sent_packet().payload(); EXPECT_THAT(sent_payload, ElementsAreArray(payload)); @@ -132,7 +130,7 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) { } TEST_F(RtpSenderAudioTest, SendAudioWithoutAbsoluteCaptureTime) { - constexpr uint32_t kAbsoluteCaptureTimestampMs = 521; + constexpr Timestamp kAbsoluteCaptureTimestamp = Timestamp::Millis(521); const char payload_name[] = "audio"; const uint8_t payload_type = 127; ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( @@ -140,9 +138,11 @@ TEST_F(RtpSenderAudioTest, SendAudioWithoutAbsoluteCaptureTime) { uint8_t payload[] = {47, 11, 32, 93, 89}; ASSERT_TRUE(rtp_sender_audio_->SendAudio( - AudioFrameType::kAudioFrameCN, payload_type, 4321, payload, - sizeof(payload), kAbsoluteCaptureTimestampMs)); + {.payload = payload, + .payload_id = payload_type, + .capture_time = kAbsoluteCaptureTimestamp})); + // AbsoluteCaptureTimeExtension wasn't registered, thus can't be sent. EXPECT_FALSE(transport_.last_sent_packet() .HasExtension()); } @@ -151,7 +151,7 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAbsoluteCaptureTimeWithCaptureClockOffset) { rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::Uri(), kAbsoluteCaptureTimeExtensionId); - constexpr uint32_t kAbsoluteCaptureTimestampMs = 521; + constexpr Timestamp kAbsoluteCaptureTimestamp = Timestamp::Millis(521); const char payload_name[] = "audio"; const uint8_t payload_type = 127; ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( @@ -159,17 +159,16 @@ TEST_F(RtpSenderAudioTest, uint8_t payload[] = {47, 11, 32, 93, 89}; ASSERT_TRUE(rtp_sender_audio_->SendAudio( - AudioFrameType::kAudioFrameCN, payload_type, 4321, payload, - sizeof(payload), kAbsoluteCaptureTimestampMs)); + {.payload = payload, + .payload_id = payload_type, + .capture_time = kAbsoluteCaptureTimestamp})); auto absolute_capture_time = transport_.last_sent_packet() .GetExtension(); ASSERT_TRUE(absolute_capture_time); - EXPECT_EQ( - absolute_capture_time->absolute_capture_timestamp, - Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( - kAbsoluteCaptureTimestampMs))); + EXPECT_EQ(NtpTime(absolute_capture_time->absolute_capture_timestamp), + fake_clock_.ConvertTimestampToNtpTime(kAbsoluteCaptureTimestamp)); EXPECT_EQ(absolute_capture_time->estimated_capture_clock_offset, 0); } @@ -192,31 +191,33 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) { ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( kPayloadName, kPayloadType, kPayloadFrequency, 1, 0)); // Start time is arbitrary. - uint32_t capture_timestamp = fake_clock_.TimeInMilliseconds(); + uint32_t capture_timestamp = 12345; // DTMF event key=9, duration=500 and attenuationdB=10 rtp_sender_audio_->SendTelephoneEvent(9, 500, 10); // During start, it takes the starting timestamp as last sent timestamp. // The duration is calculated as the difference of current and last sent // timestamp. So for first call it will skip since the duration is zero. - ASSERT_TRUE(rtp_sender_audio_->SendAudio( - AudioFrameType::kEmptyFrame, kPayloadType, capture_timestamp, nullptr, 0, - /*absolute_capture_time_ms=0*/ 0)); + ASSERT_TRUE( + rtp_sender_audio_->SendAudio({.type = AudioFrameType::kEmptyFrame, + .payload_id = kPayloadType, + .rtp_timestamp = capture_timestamp})); // DTMF Sample Length is (Frequency/1000) * Duration. // So in this case, it is (8000/1000) * 500 = 4000. // Sending it as two packets. - ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame, - kPayloadType, - capture_timestamp + 2000, nullptr, 0, - /*absolute_capture_time_ms=0*/ 0)); + ASSERT_TRUE(rtp_sender_audio_->SendAudio( + {.type = AudioFrameType::kEmptyFrame, + .payload_id = kPayloadType, + .rtp_timestamp = capture_timestamp + 2000})); // Marker Bit should be set to 1 for first packet. EXPECT_TRUE(transport_.last_sent_packet().Marker()); - ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame, - kPayloadType, - capture_timestamp + 4000, nullptr, 0, - /*absolute_capture_time_ms=0*/ 0)); + ASSERT_TRUE(rtp_sender_audio_->SendAudio( + {.type = AudioFrameType::kEmptyFrame, + .payload_id = kPayloadType, + .rtp_timestamp = capture_timestamp + 4000})); + // Marker Bit should be set to 0 for rest of the packets. EXPECT_FALSE(transport_.last_sent_packet().Marker()); }