henrik.lundin 6d8e011b64 Change NetEq::GetAudio to use AudioFrame
With this change, NetEq now uses AudioFrame as output type, like the
surrounding functions in ACM and VoiceEngine already do.

The computational savings is probably slim, since one memcpy is
removed while another one is added (both in AcmReceiver::GetAudio).

More simplifications and clean-up will be done in
AcmReceiver::GetAudio in future CLs.

BUG=webrtc:5607

Review URL: https://codereview.webrtc.org/1750353002

Cr-Commit-Position: refs/heads/master@{#11874}
2016-03-04 18:34:26 +00:00

109 lines
3.6 KiB
C++

/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm> // Access to min.
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
namespace webrtc {
size_t SyncBuffer::FutureLength() const {
return Size() - next_index_;
}
void SyncBuffer::PushBack(const AudioMultiVector& append_this) {
size_t samples_added = append_this.Size();
AudioMultiVector::PushBack(append_this);
AudioMultiVector::PopFront(samples_added);
if (samples_added <= next_index_) {
next_index_ -= samples_added;
} else {
// This means that we are pushing out future data that was never used.
// assert(false);
// TODO(hlundin): This assert must be disabled to support 60 ms frames.
// This should not happen even for 60 ms frames, but it does. Investigate
// why.
next_index_ = 0;
}
dtmf_index_ -= std::min(dtmf_index_, samples_added);
}
void SyncBuffer::PushFrontZeros(size_t length) {
InsertZerosAtIndex(length, 0);
}
void SyncBuffer::InsertZerosAtIndex(size_t length, size_t position) {
position = std::min(position, Size());
length = std::min(length, Size() - position);
AudioMultiVector::PopBack(length);
for (size_t channel = 0; channel < Channels(); ++channel) {
channels_[channel]->InsertZerosAt(length, position);
}
if (next_index_ >= position) {
// We are moving the |next_index_| sample.
set_next_index(next_index_ + length); // Overflow handled by subfunction.
}
if (dtmf_index_ > 0 && dtmf_index_ >= position) {
// We are moving the |dtmf_index_| sample.
set_dtmf_index(dtmf_index_ + length); // Overflow handled by subfunction.
}
}
void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this,
size_t length,
size_t position) {
position = std::min(position, Size()); // Cap |position| in the valid range.
length = std::min(length, Size() - position);
AudioMultiVector::OverwriteAt(insert_this, length, position);
}
void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this,
size_t position) {
ReplaceAtIndex(insert_this, insert_this.Size(), position);
}
void SyncBuffer::GetNextAudioInterleaved(size_t requested_len,
AudioFrame* output) {
RTC_DCHECK(output);
const size_t samples_to_read = std::min(FutureLength(), requested_len);
output->Reset();
const size_t tot_samples_read =
ReadInterleavedFromIndex(next_index_, samples_to_read, output->data_);
const size_t samples_read_per_channel = tot_samples_read / Channels();
next_index_ += samples_read_per_channel;
output->interleaved_ = true;
output->num_channels_ = Channels();
output->samples_per_channel_ = samples_read_per_channel;
}
void SyncBuffer::IncreaseEndTimestamp(uint32_t increment) {
end_timestamp_ += increment;
}
void SyncBuffer::Flush() {
Zeros(Size());
next_index_ = Size();
end_timestamp_ = 0;
dtmf_index_ = 0;
}
void SyncBuffer::set_next_index(size_t value) {
// Cannot set |next_index_| larger than the size of the buffer.
next_index_ = std::min(value, Size());
}
void SyncBuffer::set_dtmf_index(size_t value) {
// Cannot set |dtmf_index_| larger than the size of the buffer.
dtmf_index_ = std::min(value, Size());
}
} // namespace webrtc