Deprecate RTPFragmentationHeader argument to AudioPacketizationCallback::SendData

It appears unused everywhere. It will be deleted in a followup cl.

Bug: webrtc:6471
Change-Id: Ief992db6e52aee3cf1bc77ffd659ffbc072672ba
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/134212
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27787}
This commit is contained in:
Niels Möller 2019-04-25 16:31:18 +02:00 committed by Commit Bot
parent e670fd9795
commit c35b6e675a
17 changed files with 54 additions and 121 deletions

View File

@ -185,8 +185,7 @@ class ChannelSend : public ChannelSendInterface,
uint8_t payloadType,
uint32_t timeStamp,
const uint8_t* payloadData,
size_t payloadSize,
const RTPFragmentationHeader* fragmentation) override;
size_t payloadSize) override;
void OnUplinkPacketLossRate(float packet_loss_rate);
bool InputMute() const;
@ -196,15 +195,13 @@ class ChannelSend : public ChannelSendInterface,
int32_t SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
rtc::ArrayView<const uint8_t> payload,
const RTPFragmentationHeader* fragmentation)
rtc::ArrayView<const uint8_t> payload)
RTC_RUN_ON(encoder_queue_);
int32_t SendMediaTransportAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
rtc::ArrayView<const uint8_t> payload,
const RTPFragmentationHeader* fragmentation)
rtc::ArrayView<const uint8_t> payload)
RTC_RUN_ON(encoder_queue_);
// Return media transport or nullptr if using RTP.
@ -477,8 +474,7 @@ int32_t ChannelSend::SendData(AudioFrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
const uint8_t* payloadData,
size_t payloadSize,
const RTPFragmentationHeader* fragmentation) {
size_t payloadSize) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
rtc::ArrayView<const uint8_t> payload(payloadData, payloadSize);
@ -489,19 +485,16 @@ int32_t ChannelSend::SendData(AudioFrameType frameType,
return 0;
}
return SendMediaTransportAudio(frameType, payloadType, timeStamp, payload,
fragmentation);
return SendMediaTransportAudio(frameType, payloadType, timeStamp, payload);
} else {
return SendRtpAudio(frameType, payloadType, timeStamp, payload,
fragmentation);
return SendRtpAudio(frameType, payloadType, timeStamp, payload);
}
}
int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
rtc::ArrayView<const uint8_t> payload,
const RTPFragmentationHeader* fragmentation) {
rtc::ArrayView<const uint8_t> payload) {
if (_includeAudioLevelIndication) {
// Store current audio level in the RTP sender.
// The level will be used in combination with voice-activity state
@ -572,8 +565,7 @@ int32_t ChannelSend::SendMediaTransportAudio(
AudioFrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
rtc::ArrayView<const uint8_t> payload,
const RTPFragmentationHeader* fragmentation) {
rtc::ArrayView<const uint8_t> payload) {
// TODO(nisse): Use null _transportPtr for MediaTransport.
// RTC_DCHECK(_transportPtr == nullptr);
uint64_t channel_id;

View File

@ -107,8 +107,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
const RTPFragmentationHeader* fragmentation) override {
size_t payload_len_bytes) override {
if (frame_type == AudioFrameType::kEmptyFrame)
return 0;

View File

@ -122,13 +122,11 @@ std::unique_ptr<Packet> AcmSendTestOldApi::NextPacket() {
}
// This method receives the callback from ACM when a new packet is produced.
int32_t AcmSendTestOldApi::SendData(
AudioFrameType frame_type,
int32_t AcmSendTestOldApi::SendData(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
const RTPFragmentationHeader* fragmentation) {
size_t payload_len_bytes) {
// Store the packet locally.
frame_type_ = frame_type;
payload_type_ = payload_type;

View File

@ -54,8 +54,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
const RTPFragmentationHeader* fragmentation) override;
size_t payload_len_bytes) override;
AudioCodingModule* acm() { return acm_.get(); }

View File

@ -282,28 +282,6 @@ int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
return 0;
}
void ConvertEncodedInfoToFragmentationHeader(
const AudioEncoder::EncodedInfo& info,
RTPFragmentationHeader* frag) {
if (info.redundant.empty()) {
frag->fragmentationVectorSize = 0;
return;
}
frag->VerifyAndAllocateFragmentationHeader(
static_cast<uint16_t>(info.redundant.size()));
frag->fragmentationVectorSize = static_cast<uint16_t>(info.redundant.size());
size_t offset = 0;
for (size_t i = 0; i < info.redundant.size(); ++i) {
frag->fragmentationOffset[i] = offset;
offset += info.redundant[i].encoded_bytes;
frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
frag->fragmentationTimeDiff[i] = rtc::dchecked_cast<uint16_t>(
info.encoded_timestamp - info.redundant[i].encoded_timestamp);
frag->fragmentationPlType[i] = info.redundant[i].payload_type;
}
}
void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
if (value != last_value_ || first_time_) {
first_time_ = false;
@ -391,8 +369,6 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
}
}
RTPFragmentationHeader my_fragmentation;
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
AudioFrameType frame_type;
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
frame_type = AudioFrameType::kEmptyFrame;
@ -408,9 +384,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (packetization_callback_) {
packetization_callback_->SendData(
frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
encode_buffer_.data(), encode_buffer_.size(),
my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
: nullptr);
encode_buffer_.data(), encode_buffer_.size());
}
if (vad_callback_) {

View File

@ -108,8 +108,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
const RTPFragmentationHeader* fragmentation) override {
size_t payload_len_bytes) override {
rtc::CritScope lock(&crit_sect_);
++num_calls_;
last_frame_type_ = frame_type;

View File

@ -40,12 +40,27 @@ class AudioPacketizationCallback {
public:
virtual ~AudioPacketizationCallback() {}
virtual int32_t SendData(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes) {
return SendData(frame_type, payload_type, timestamp, payload_data,
payload_len_bytes, nullptr);
}
// TODO(bugs.webrtc.org/6471) Deprecated, delete as soon as downstream
// implementations are updated. Then make above method pure virtual, and
// delete forward declaration of RTPFragmentationHeader.
virtual int32_t SendData(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
const RTPFragmentationHeader* fragmentation) = 0;
const RTPFragmentationHeader* fragmentation) {
return SendData(frame_type, payload_type, timestamp, payload_data,
payload_len_bytes);
}
};
// Callback class used for reporting VAD decision

View File

@ -111,9 +111,7 @@ class Packetizer : public AudioPacketizationCallback {
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_len_bytes,
const RTPFragmentationHeader* fragmentation) override {
RTC_CHECK(!fragmentation);
size_t payload_len_bytes) override {
if (payload_len_bytes == 0) {
return 0;
}

View File

@ -22,8 +22,7 @@ int32_t Channel::SendData(AudioFrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
const uint8_t* payloadData,
size_t payloadSize,
const RTPFragmentationHeader* fragmentation) {
size_t payloadSize) {
RTPHeader rtp_header;
int32_t status;
size_t payloadDataSize = payloadSize;
@ -46,37 +45,6 @@ int32_t Channel::SendData(AudioFrameType frameType,
return 0;
}
// Treat fragmentation separately
if (fragmentation != NULL) {
// If silence for too long, send only new data.
if ((fragmentation->fragmentationVectorSize == 2) &&
(fragmentation->fragmentationTimeDiff[1] <= 0x3fff)) {
// only 0x80 if we have multiple blocks
_payloadData[0] = 0x80 + fragmentation->fragmentationPlType[1];
size_t REDheader = (fragmentation->fragmentationTimeDiff[1] << 10) +
fragmentation->fragmentationLength[1];
_payloadData[1] = uint8_t((REDheader >> 16) & 0x000000FF);
_payloadData[2] = uint8_t((REDheader >> 8) & 0x000000FF);
_payloadData[3] = uint8_t(REDheader & 0x000000FF);
_payloadData[4] = fragmentation->fragmentationPlType[0];
// copy the RED data
memcpy(_payloadData + 5,
payloadData + fragmentation->fragmentationOffset[1],
fragmentation->fragmentationLength[1]);
// copy the normal data
memcpy(_payloadData + 5 + fragmentation->fragmentationLength[1],
payloadData + fragmentation->fragmentationOffset[0],
fragmentation->fragmentationLength[0]);
payloadDataSize += 5;
} else {
// single block (newest one)
memcpy(_payloadData, payloadData + fragmentation->fragmentationOffset[0],
fragmentation->fragmentationLength[0]);
payloadDataSize = fragmentation->fragmentationLength[0];
rtp_header.payloadType = fragmentation->fragmentationPlType[0];
}
} else {
memcpy(_payloadData, payloadData, payloadDataSize);
if (_isStereo) {
if (_leftChannel) {
@ -87,7 +55,6 @@ int32_t Channel::SendData(AudioFrameType frameType,
_leftChannel = true;
}
}
}
_channelCritSect.Enter();
if (_saveBitStream) {

View File

@ -51,8 +51,7 @@ class Channel : public AudioPacketizationCallback {
uint8_t payloadType,
uint32_t timeStamp,
const uint8_t* payloadData,
size_t payloadSize,
const RTPFragmentationHeader* fragmentation) override;
size_t payloadSize) override;
void RegisterReceiverACM(AudioCodingModule* acm);

View File

@ -32,13 +32,11 @@ TestPacketization::TestPacketization(RTPStream *rtpStream, uint16_t frequency)
TestPacketization::~TestPacketization() {
}
int32_t TestPacketization::SendData(
const AudioFrameType /* frameType */,
int32_t TestPacketization::SendData(const AudioFrameType /* frameType */,
const uint8_t payloadType,
const uint32_t timeStamp,
const uint8_t* payloadData,
const size_t payloadSize,
const RTPFragmentationHeader* /* fragmentation */) {
const size_t payloadSize) {
_rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
_frequency);
return 1;

View File

@ -32,8 +32,7 @@ class TestPacketization : public AudioPacketizationCallback {
const uint8_t payloadType,
const uint32_t timeStamp,
const uint8_t* payloadData,
const size_t payloadSize,
const RTPFragmentationHeader* fragmentation) override;
const size_t payloadSize) override;
private:
static void MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType,

View File

@ -64,8 +64,7 @@ int32_t TestPack::SendData(AudioFrameType frame_type,
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
size_t payload_size) {
RTPHeader rtp_header;
int32_t status;

View File

@ -29,8 +29,7 @@ class TestPack : public AudioPacketizationCallback {
uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) override;
size_t payload_size) override;
size_t payload_size();
uint32_t timestamp_diff();

View File

@ -44,8 +44,7 @@ int32_t TestPackStereo::SendData(const AudioFrameType frame_type,
const uint8_t payload_type,
const uint32_t timestamp,
const uint8_t* payload_data,
const size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
const size_t payload_size) {
RTPHeader rtp_header;
int32_t status = 0;

View File

@ -35,8 +35,7 @@ class TestPackStereo : public AudioPacketizationCallback {
const uint8_t payload_type,
const uint32_t timestamp,
const uint8_t* payload_data,
const size_t payload_size,
const RTPFragmentationHeader* fragmentation) override;
const size_t payload_size) override;
uint16_t payload_size();
uint32_t timestamp_diff();

View File

@ -316,7 +316,7 @@ void OpusTest::Run(TestPackStereo* channel,
// Send data to the channel. "channel" will handle the loss simulation.
channel->SendData(AudioFrameType::kAudioFrameSpeech, payload_type_,
rtp_timestamp_, bitstream, bitstream_len_byte, NULL);
rtp_timestamp_, bitstream, bitstream_len_byte);
if (first_packet) {
first_packet = false;
start_time_stamp = rtp_timestamp_;