Deprecate RTPFragmentationHeader argument to AudioPacketizationCallback::SendData
It appears unused everywhere. It will be deleted in a followup cl. Bug: webrtc:6471 Change-Id: Ief992db6e52aee3cf1bc77ffd659ffbc072672ba Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/134212 Reviewed-by: Oskar Sundbom <ossu@webrtc.org> Commit-Queue: Niels Moller <nisse@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27787}
This commit is contained in:
parent
e670fd9795
commit
c35b6e675a
@ -185,8 +185,7 @@ class ChannelSend : public ChannelSendInterface,
|
|||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
size_t payloadSize,
|
size_t payloadSize) override;
|
||||||
const RTPFragmentationHeader* fragmentation) override;
|
|
||||||
|
|
||||||
void OnUplinkPacketLossRate(float packet_loss_rate);
|
void OnUplinkPacketLossRate(float packet_loss_rate);
|
||||||
bool InputMute() const;
|
bool InputMute() const;
|
||||||
@ -196,15 +195,13 @@ class ChannelSend : public ChannelSendInterface,
|
|||||||
int32_t SendRtpAudio(AudioFrameType frameType,
|
int32_t SendRtpAudio(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload)
|
||||||
const RTPFragmentationHeader* fragmentation)
|
|
||||||
RTC_RUN_ON(encoder_queue_);
|
RTC_RUN_ON(encoder_queue_);
|
||||||
|
|
||||||
int32_t SendMediaTransportAudio(AudioFrameType frameType,
|
int32_t SendMediaTransportAudio(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload)
|
||||||
const RTPFragmentationHeader* fragmentation)
|
|
||||||
RTC_RUN_ON(encoder_queue_);
|
RTC_RUN_ON(encoder_queue_);
|
||||||
|
|
||||||
// Return media transport or nullptr if using RTP.
|
// Return media transport or nullptr if using RTP.
|
||||||
@ -477,8 +474,7 @@ int32_t ChannelSend::SendData(AudioFrameType frameType,
|
|||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
size_t payloadSize,
|
size_t payloadSize) {
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
RTC_DCHECK_RUN_ON(&encoder_queue_);
|
RTC_DCHECK_RUN_ON(&encoder_queue_);
|
||||||
rtc::ArrayView<const uint8_t> payload(payloadData, payloadSize);
|
rtc::ArrayView<const uint8_t> payload(payloadData, payloadSize);
|
||||||
|
|
||||||
@ -489,19 +485,16 @@ int32_t ChannelSend::SendData(AudioFrameType frameType,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return SendMediaTransportAudio(frameType, payloadType, timeStamp, payload,
|
return SendMediaTransportAudio(frameType, payloadType, timeStamp, payload);
|
||||||
fragmentation);
|
|
||||||
} else {
|
} else {
|
||||||
return SendRtpAudio(frameType, payloadType, timeStamp, payload,
|
return SendRtpAudio(frameType, payloadType, timeStamp, payload);
|
||||||
fragmentation);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
|
int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload) {
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
if (_includeAudioLevelIndication) {
|
if (_includeAudioLevelIndication) {
|
||||||
// Store current audio level in the RTP sender.
|
// Store current audio level in the RTP sender.
|
||||||
// The level will be used in combination with voice-activity state
|
// The level will be used in combination with voice-activity state
|
||||||
@ -572,8 +565,7 @@ int32_t ChannelSend::SendMediaTransportAudio(
|
|||||||
AudioFrameType frameType,
|
AudioFrameType frameType,
|
||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
rtc::ArrayView<const uint8_t> payload,
|
rtc::ArrayView<const uint8_t> payload) {
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
// TODO(nisse): Use null _transportPtr for MediaTransport.
|
// TODO(nisse): Use null _transportPtr for MediaTransport.
|
||||||
// RTC_DCHECK(_transportPtr == nullptr);
|
// RTC_DCHECK(_transportPtr == nullptr);
|
||||||
uint64_t channel_id;
|
uint64_t channel_id;
|
||||||
|
|||||||
@ -107,8 +107,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
|||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
size_t payload_len_bytes,
|
size_t payload_len_bytes) override {
|
||||||
const RTPFragmentationHeader* fragmentation) override {
|
|
||||||
if (frame_type == AudioFrameType::kEmptyFrame)
|
if (frame_type == AudioFrameType::kEmptyFrame)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|||||||
@ -122,13 +122,11 @@ std::unique_ptr<Packet> AcmSendTestOldApi::NextPacket() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This method receives the callback from ACM when a new packet is produced.
|
// This method receives the callback from ACM when a new packet is produced.
|
||||||
int32_t AcmSendTestOldApi::SendData(
|
int32_t AcmSendTestOldApi::SendData(AudioFrameType frame_type,
|
||||||
AudioFrameType frame_type,
|
uint8_t payload_type,
|
||||||
uint8_t payload_type,
|
uint32_t timestamp,
|
||||||
uint32_t timestamp,
|
const uint8_t* payload_data,
|
||||||
const uint8_t* payload_data,
|
size_t payload_len_bytes) {
|
||||||
size_t payload_len_bytes,
|
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
// Store the packet locally.
|
// Store the packet locally.
|
||||||
frame_type_ = frame_type;
|
frame_type_ = frame_type;
|
||||||
payload_type_ = payload_type;
|
payload_type_ = payload_type;
|
||||||
|
|||||||
@ -54,8 +54,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
|
|||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
size_t payload_len_bytes,
|
size_t payload_len_bytes) override;
|
||||||
const RTPFragmentationHeader* fragmentation) override;
|
|
||||||
|
|
||||||
AudioCodingModule* acm() { return acm_.get(); }
|
AudioCodingModule* acm() { return acm_.get(); }
|
||||||
|
|
||||||
|
|||||||
@ -282,28 +282,6 @@ int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConvertEncodedInfoToFragmentationHeader(
|
|
||||||
const AudioEncoder::EncodedInfo& info,
|
|
||||||
RTPFragmentationHeader* frag) {
|
|
||||||
if (info.redundant.empty()) {
|
|
||||||
frag->fragmentationVectorSize = 0;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
frag->VerifyAndAllocateFragmentationHeader(
|
|
||||||
static_cast<uint16_t>(info.redundant.size()));
|
|
||||||
frag->fragmentationVectorSize = static_cast<uint16_t>(info.redundant.size());
|
|
||||||
size_t offset = 0;
|
|
||||||
for (size_t i = 0; i < info.redundant.size(); ++i) {
|
|
||||||
frag->fragmentationOffset[i] = offset;
|
|
||||||
offset += info.redundant[i].encoded_bytes;
|
|
||||||
frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
|
|
||||||
frag->fragmentationTimeDiff[i] = rtc::dchecked_cast<uint16_t>(
|
|
||||||
info.encoded_timestamp - info.redundant[i].encoded_timestamp);
|
|
||||||
frag->fragmentationPlType[i] = info.redundant[i].payload_type;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
|
void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
|
||||||
if (value != last_value_ || first_time_) {
|
if (value != last_value_ || first_time_) {
|
||||||
first_time_ = false;
|
first_time_ = false;
|
||||||
@ -391,8 +369,6 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RTPFragmentationHeader my_fragmentation;
|
|
||||||
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
|
|
||||||
AudioFrameType frame_type;
|
AudioFrameType frame_type;
|
||||||
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
|
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
|
||||||
frame_type = AudioFrameType::kEmptyFrame;
|
frame_type = AudioFrameType::kEmptyFrame;
|
||||||
@ -408,9 +384,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
|
|||||||
if (packetization_callback_) {
|
if (packetization_callback_) {
|
||||||
packetization_callback_->SendData(
|
packetization_callback_->SendData(
|
||||||
frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
|
frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
|
||||||
encode_buffer_.data(), encode_buffer_.size(),
|
encode_buffer_.data(), encode_buffer_.size());
|
||||||
my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
|
|
||||||
: nullptr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vad_callback_) {
|
if (vad_callback_) {
|
||||||
|
|||||||
@ -108,8 +108,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
|
|||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
size_t payload_len_bytes,
|
size_t payload_len_bytes) override {
|
||||||
const RTPFragmentationHeader* fragmentation) override {
|
|
||||||
rtc::CritScope lock(&crit_sect_);
|
rtc::CritScope lock(&crit_sect_);
|
||||||
++num_calls_;
|
++num_calls_;
|
||||||
last_frame_type_ = frame_type;
|
last_frame_type_ = frame_type;
|
||||||
|
|||||||
@ -40,12 +40,27 @@ class AudioPacketizationCallback {
|
|||||||
public:
|
public:
|
||||||
virtual ~AudioPacketizationCallback() {}
|
virtual ~AudioPacketizationCallback() {}
|
||||||
|
|
||||||
|
virtual int32_t SendData(AudioFrameType frame_type,
|
||||||
|
uint8_t payload_type,
|
||||||
|
uint32_t timestamp,
|
||||||
|
const uint8_t* payload_data,
|
||||||
|
size_t payload_len_bytes) {
|
||||||
|
return SendData(frame_type, payload_type, timestamp, payload_data,
|
||||||
|
payload_len_bytes, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bugs.webrtc.org/6471) Deprecated, delete as soon as downstream
|
||||||
|
// implementations are updated. Then make above method pure virtual, and
|
||||||
|
// delete forward declaration of RTPFragmentationHeader.
|
||||||
virtual int32_t SendData(AudioFrameType frame_type,
|
virtual int32_t SendData(AudioFrameType frame_type,
|
||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
size_t payload_len_bytes,
|
size_t payload_len_bytes,
|
||||||
const RTPFragmentationHeader* fragmentation) = 0;
|
const RTPFragmentationHeader* fragmentation) {
|
||||||
|
return SendData(frame_type, payload_type, timestamp, payload_data,
|
||||||
|
payload_len_bytes);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Callback class used for reporting VAD decision
|
// Callback class used for reporting VAD decision
|
||||||
|
|||||||
@ -111,9 +111,7 @@ class Packetizer : public AudioPacketizationCallback {
|
|||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
size_t payload_len_bytes,
|
size_t payload_len_bytes) override {
|
||||||
const RTPFragmentationHeader* fragmentation) override {
|
|
||||||
RTC_CHECK(!fragmentation);
|
|
||||||
if (payload_len_bytes == 0) {
|
if (payload_len_bytes == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,8 +22,7 @@ int32_t Channel::SendData(AudioFrameType frameType,
|
|||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
size_t payloadSize,
|
size_t payloadSize) {
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
RTPHeader rtp_header;
|
RTPHeader rtp_header;
|
||||||
int32_t status;
|
int32_t status;
|
||||||
size_t payloadDataSize = payloadSize;
|
size_t payloadDataSize = payloadSize;
|
||||||
@ -46,46 +45,14 @@ int32_t Channel::SendData(AudioFrameType frameType,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Treat fragmentation separately
|
memcpy(_payloadData, payloadData, payloadDataSize);
|
||||||
if (fragmentation != NULL) {
|
if (_isStereo) {
|
||||||
// If silence for too long, send only new data.
|
if (_leftChannel) {
|
||||||
if ((fragmentation->fragmentationVectorSize == 2) &&
|
_rtp_header = rtp_header;
|
||||||
(fragmentation->fragmentationTimeDiff[1] <= 0x3fff)) {
|
_leftChannel = false;
|
||||||
// only 0x80 if we have multiple blocks
|
|
||||||
_payloadData[0] = 0x80 + fragmentation->fragmentationPlType[1];
|
|
||||||
size_t REDheader = (fragmentation->fragmentationTimeDiff[1] << 10) +
|
|
||||||
fragmentation->fragmentationLength[1];
|
|
||||||
_payloadData[1] = uint8_t((REDheader >> 16) & 0x000000FF);
|
|
||||||
_payloadData[2] = uint8_t((REDheader >> 8) & 0x000000FF);
|
|
||||||
_payloadData[3] = uint8_t(REDheader & 0x000000FF);
|
|
||||||
|
|
||||||
_payloadData[4] = fragmentation->fragmentationPlType[0];
|
|
||||||
// copy the RED data
|
|
||||||
memcpy(_payloadData + 5,
|
|
||||||
payloadData + fragmentation->fragmentationOffset[1],
|
|
||||||
fragmentation->fragmentationLength[1]);
|
|
||||||
// copy the normal data
|
|
||||||
memcpy(_payloadData + 5 + fragmentation->fragmentationLength[1],
|
|
||||||
payloadData + fragmentation->fragmentationOffset[0],
|
|
||||||
fragmentation->fragmentationLength[0]);
|
|
||||||
payloadDataSize += 5;
|
|
||||||
} else {
|
} else {
|
||||||
// single block (newest one)
|
rtp_header = _rtp_header;
|
||||||
memcpy(_payloadData, payloadData + fragmentation->fragmentationOffset[0],
|
_leftChannel = true;
|
||||||
fragmentation->fragmentationLength[0]);
|
|
||||||
payloadDataSize = fragmentation->fragmentationLength[0];
|
|
||||||
rtp_header.payloadType = fragmentation->fragmentationPlType[0];
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
memcpy(_payloadData, payloadData, payloadDataSize);
|
|
||||||
if (_isStereo) {
|
|
||||||
if (_leftChannel) {
|
|
||||||
_rtp_header = rtp_header;
|
|
||||||
_leftChannel = false;
|
|
||||||
} else {
|
|
||||||
rtp_header = _rtp_header;
|
|
||||||
_leftChannel = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -51,8 +51,7 @@ class Channel : public AudioPacketizationCallback {
|
|||||||
uint8_t payloadType,
|
uint8_t payloadType,
|
||||||
uint32_t timeStamp,
|
uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
size_t payloadSize,
|
size_t payloadSize) override;
|
||||||
const RTPFragmentationHeader* fragmentation) override;
|
|
||||||
|
|
||||||
void RegisterReceiverACM(AudioCodingModule* acm);
|
void RegisterReceiverACM(AudioCodingModule* acm);
|
||||||
|
|
||||||
|
|||||||
@ -32,13 +32,11 @@ TestPacketization::TestPacketization(RTPStream *rtpStream, uint16_t frequency)
|
|||||||
TestPacketization::~TestPacketization() {
|
TestPacketization::~TestPacketization() {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t TestPacketization::SendData(
|
int32_t TestPacketization::SendData(const AudioFrameType /* frameType */,
|
||||||
const AudioFrameType /* frameType */,
|
const uint8_t payloadType,
|
||||||
const uint8_t payloadType,
|
const uint32_t timeStamp,
|
||||||
const uint32_t timeStamp,
|
const uint8_t* payloadData,
|
||||||
const uint8_t* payloadData,
|
const size_t payloadSize) {
|
||||||
const size_t payloadSize,
|
|
||||||
const RTPFragmentationHeader* /* fragmentation */) {
|
|
||||||
_rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
|
_rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
|
||||||
_frequency);
|
_frequency);
|
||||||
return 1;
|
return 1;
|
||||||
|
|||||||
@ -32,8 +32,7 @@ class TestPacketization : public AudioPacketizationCallback {
|
|||||||
const uint8_t payloadType,
|
const uint8_t payloadType,
|
||||||
const uint32_t timeStamp,
|
const uint32_t timeStamp,
|
||||||
const uint8_t* payloadData,
|
const uint8_t* payloadData,
|
||||||
const size_t payloadSize,
|
const size_t payloadSize) override;
|
||||||
const RTPFragmentationHeader* fragmentation) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static void MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType,
|
static void MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType,
|
||||||
|
|||||||
@ -64,8 +64,7 @@ int32_t TestPack::SendData(AudioFrameType frame_type,
|
|||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
size_t payload_size,
|
size_t payload_size) {
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
RTPHeader rtp_header;
|
RTPHeader rtp_header;
|
||||||
int32_t status;
|
int32_t status;
|
||||||
|
|
||||||
|
|||||||
@ -29,8 +29,7 @@ class TestPack : public AudioPacketizationCallback {
|
|||||||
uint8_t payload_type,
|
uint8_t payload_type,
|
||||||
uint32_t timestamp,
|
uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
size_t payload_size,
|
size_t payload_size) override;
|
||||||
const RTPFragmentationHeader* fragmentation) override;
|
|
||||||
|
|
||||||
size_t payload_size();
|
size_t payload_size();
|
||||||
uint32_t timestamp_diff();
|
uint32_t timestamp_diff();
|
||||||
|
|||||||
@ -44,8 +44,7 @@ int32_t TestPackStereo::SendData(const AudioFrameType frame_type,
|
|||||||
const uint8_t payload_type,
|
const uint8_t payload_type,
|
||||||
const uint32_t timestamp,
|
const uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
const size_t payload_size,
|
const size_t payload_size) {
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
RTPHeader rtp_header;
|
RTPHeader rtp_header;
|
||||||
int32_t status = 0;
|
int32_t status = 0;
|
||||||
|
|
||||||
|
|||||||
@ -35,8 +35,7 @@ class TestPackStereo : public AudioPacketizationCallback {
|
|||||||
const uint8_t payload_type,
|
const uint8_t payload_type,
|
||||||
const uint32_t timestamp,
|
const uint32_t timestamp,
|
||||||
const uint8_t* payload_data,
|
const uint8_t* payload_data,
|
||||||
const size_t payload_size,
|
const size_t payload_size) override;
|
||||||
const RTPFragmentationHeader* fragmentation) override;
|
|
||||||
|
|
||||||
uint16_t payload_size();
|
uint16_t payload_size();
|
||||||
uint32_t timestamp_diff();
|
uint32_t timestamp_diff();
|
||||||
|
|||||||
@ -316,7 +316,7 @@ void OpusTest::Run(TestPackStereo* channel,
|
|||||||
|
|
||||||
// Send data to the channel. "channel" will handle the loss simulation.
|
// Send data to the channel. "channel" will handle the loss simulation.
|
||||||
channel->SendData(AudioFrameType::kAudioFrameSpeech, payload_type_,
|
channel->SendData(AudioFrameType::kAudioFrameSpeech, payload_type_,
|
||||||
rtp_timestamp_, bitstream, bitstream_len_byte, NULL);
|
rtp_timestamp_, bitstream, bitstream_len_byte);
|
||||||
if (first_packet) {
|
if (first_packet) {
|
||||||
first_packet = false;
|
first_packet = false;
|
||||||
start_time_stamp = rtp_timestamp_;
|
start_time_stamp = rtp_timestamp_;
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user