Pass audio to AudioEncoder::Encode() in an ArrayView

Instead of in separate pointer and size arguments.

Review URL: https://codereview.webrtc.org/1418423010

Cr-Commit-Position: refs/heads/master@{#10535}
This commit is contained in:
kwiberg 2015-11-06 01:21:35 -08:00 committed by Commit bot
parent 1a4e9d7572
commit 288886b2ec
32 changed files with 183 additions and 142 deletions

View File

@ -61,6 +61,7 @@ class ArrayView final {
// is const, because the ArrayView doesn't own the array. (To prevent
// mutation, use ArrayView<const T>.)
size_t size() const { return size_; }
bool empty() const { return size_ == 0; }
T* data() const { return data_; }
T& operator[](size_t idx) const {
RTC_DCHECK_LT(idx, size_);
@ -72,6 +73,15 @@ class ArrayView final {
const T* cbegin() const { return data_; }
const T* cend() const { return data_ + size_; }
// Comparing two ArrayViews compares their (pointer,size) pairs; it does
// *not* dereference the pointers.
friend bool operator==(const ArrayView& a, const ArrayView& b) {
return a.data_ == b.data_ && a.size_ == b.size_;
}
friend bool operator!=(const ArrayView& a, const ArrayView& b) {
return !(a == b);
}
private:
// Invariant: !data_ iff size_ == 0.
void CheckInvariant() const { RTC_DCHECK_EQ(!data_, size_ == 0); }

View File

@ -214,4 +214,20 @@ TEST(ArrayViewTest, TestIteration) {
}
}
TEST(ArrayViewTest, TestEmpty) {
EXPECT_TRUE(ArrayView<int>().empty());
const int a[] = {1, 2, 3};
EXPECT_FALSE(ArrayView<const int>(a).empty());
}
TEST(ArrayViewTest, TestCompare) {
int a[] = {1, 2, 3};
int b[] = {1, 2, 3};
EXPECT_EQ(ArrayView<int>(a), ArrayView<int>(a));
EXPECT_EQ(ArrayView<int>(), ArrayView<int>());
EXPECT_NE(ArrayView<int>(a), ArrayView<int>(b));
EXPECT_NE(ArrayView<int>(a), ArrayView<int>());
EXPECT_NE(ArrayView<int>(a), ArrayView<int>(a, 2));
}
} // namespace rtc

View File

@ -21,13 +21,13 @@ int AudioEncoder::RtpTimestampRateHz() const {
return SampleRateHz();
}
AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
const int16_t* audio,
size_t num_samples_per_channel,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_EQ(num_samples_per_channel,
static_cast<size_t>(SampleRateHz() / 100));
AudioEncoder::EncodedInfo AudioEncoder::Encode(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_EQ(audio.size(),
static_cast<size_t>(NumChannels() * SampleRateHz() / 100));
EncodedInfo info =
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
RTC_CHECK_LE(info.encoded_bytes, max_encoded_bytes);

View File

@ -14,6 +14,7 @@
#include <algorithm>
#include <vector>
#include "webrtc/base/array_view.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@ -91,13 +92,12 @@ class AudioEncoder {
// Encode() checks some preconditions, calls EncodeInternal() which does the
// actual work, and then checks some postconditions.
EncodedInfo Encode(uint32_t rtp_timestamp,
const int16_t* audio,
size_t num_samples_per_channel,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded);
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) = 0;

View File

@ -97,7 +97,7 @@ int AudioEncoderCng::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_GE(max_encoded_bytes,
@ -106,9 +106,8 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
RTC_CHECK_EQ(speech_buffer_.size(),
rtp_timestamps_.size() * samples_per_10ms_frame);
rtp_timestamps_.push_back(rtp_timestamp);
for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
speech_buffer_.push_back(audio[i]);
}
RTC_DCHECK_EQ(samples_per_10ms_frame, audio.size());
speech_buffer_.insert(speech_buffer_.end(), audio.cbegin(), audio.cend());
const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
if (rtp_timestamps_.size() < frames_to_encode) {
return EncodedInfo();
@ -242,9 +241,12 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
AudioEncoder::EncodedInfo info;
for (size_t i = 0; i < frames_to_encode; ++i) {
info = speech_encoder_->Encode(
rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes, encoded);
info =
speech_encoder_->Encode(rtp_timestamps_.front(),
rtc::ArrayView<const int16_t>(
&speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame),
max_encoded_bytes, encoded);
if (i + 1 == frames_to_encode) {
RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
} else {

View File

@ -75,8 +75,10 @@ class AudioEncoderCngTest : public ::testing::Test {
void Encode() {
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
encoded_.size(), &encoded_[0]);
encoded_info_ = cng_->Encode(
timestamp_,
rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms_),
encoded_.size(), &encoded_[0]);
timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
}

View File

@ -57,7 +57,7 @@ class AudioEncoderCng final : public AudioEncoder {
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;

View File

@ -88,16 +88,13 @@ int AudioEncoderPcm::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
const int num_samples = SampleRateHz() / 100 * NumChannels();
if (speech_buffer_.empty()) {
first_timestamp_in_buffer_ = rtp_timestamp;
}
for (int i = 0; i < num_samples; ++i) {
speech_buffer_.push_back(audio[i]);
}
speech_buffer_.insert(speech_buffer_.end(), audio.begin(), audio.end());
if (speech_buffer_.size() < full_frame_samples_) {
return EncodedInfo();
}

View File

@ -42,7 +42,7 @@ class AudioEncoderPcm : public AudioEncoder {
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;

View File

@ -93,7 +93,7 @@ int AudioEncoderG722::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes());

View File

@ -42,7 +42,7 @@ class AudioEncoderG722 final : public AudioEncoder {
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;

View File

@ -10,7 +10,7 @@
#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h"
#include <cstring>
#include <algorithm>
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/common_types.h"
@ -91,7 +91,7 @@ int AudioEncoderIlbc::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
@ -101,9 +101,9 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
first_timestamp_in_buffer_ = rtp_timestamp;
// Buffer input.
std::memcpy(input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_,
audio,
kSampleRateHz / 100 * sizeof(audio[0]));
RTC_DCHECK_EQ(static_cast<size_t>(kSampleRateHz / 100), audio.size());
std::copy(audio.cbegin(), audio.cend(),
input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_);
// If we don't yet have enough buffered input for a whole packet, we're done
// for now.

View File

@ -41,7 +41,7 @@ class AudioEncoderIlbc final : public AudioEncoder {
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;

View File

@ -61,7 +61,7 @@ class AudioEncoderIsacT final : public AudioEncoder {
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;

View File

@ -115,7 +115,7 @@ int AudioEncoderIsacT<T>::GetTargetBitrate() const {
template <typename T>
AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (!packet_in_progress_) {
@ -127,7 +127,7 @@ AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
IsacBandwidthInfo bwinfo = bwinfo_->Get();
T::SetBandwidthInfo(isac_state_, &bwinfo);
}
int r = T::Encode(isac_state_, audio, encoded);
int r = T::Encode(isac_state_, audio.data(), encoded);
RTC_CHECK_GE(r, 0) << "Encode failed (error code "
<< T::GetErrorCode(isac_state_) << ")";

View File

@ -32,7 +32,7 @@ class MockAudioEncoder final : public AudioEncoder {
// Note, we explicitly chose not to create a mock for the Encode method.
MOCK_METHOD4(EncodeInternal,
EncodedInfo(uint32_t timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded));
MOCK_METHOD0(Reset, void());

View File

@ -132,13 +132,13 @@ int AudioEncoderOpus::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (input_buffer_.empty())
first_timestamp_in_buffer_ = rtp_timestamp;
input_buffer_.insert(input_buffer_.end(), audio,
audio + SamplesPer10msFrame());
RTC_DCHECK_EQ(static_cast<size_t>(SamplesPer10msFrame()), audio.size());
input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
if (input_buffer_.size() <
(static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame())) {
return EncodedInfo();

View File

@ -62,7 +62,7 @@ class AudioEncoderOpus final : public AudioEncoder {
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;

View File

@ -10,6 +10,7 @@
#include <string>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/opus/opus_inst.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
@ -44,8 +45,7 @@ class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
void PrepareSpeechData(int channel, int block_length_ms, int loop_length_ms);
int EncodeDecode(WebRtcOpusEncInst* encoder,
const int16_t* input_audio,
size_t input_samples,
rtc::ArrayView<const int16_t> input_audio,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type);
@ -96,13 +96,14 @@ void OpusTest::SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
}
int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
const int16_t* input_audio,
size_t input_samples,
rtc::ArrayView<const int16_t> input_audio,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type) {
int encoded_bytes_int = WebRtcOpus_Encode(encoder, input_audio, input_samples,
kMaxBytes, bitstream_);
int encoded_bytes_int = WebRtcOpus_Encode(
encoder, input_audio.data(),
rtc::CheckedDivExact(input_audio.size(), static_cast<size_t>(channels_)),
kMaxBytes, bitstream_);
EXPECT_GE(encoded_bytes_int, 0);
encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
int est_len = WebRtcOpus_DurationEst(decoder, bitstream_, encoded_bytes_);
@ -129,8 +130,7 @@ void OpusTest::TestDtxEffect(bool dtx) {
channels_ == 1 ? 32000 : 64000));
// Set input audio as silence.
int16_t* silence = new int16_t[kOpus20msFrameSamples * channels_];
memset(silence, 0, sizeof(int16_t) * kOpus20msFrameSamples * channels_);
std::vector<int16_t> silence(kOpus20msFrameSamples * channels_, 0);
// Setting DTX.
EXPECT_EQ(0, dtx ? WebRtcOpus_EnableDtx(opus_encoder_) :
@ -142,9 +142,8 @@ void OpusTest::TestDtxEffect(bool dtx) {
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, speech_data_.GetNextBlock(),
kOpus20msFrameSamples, opus_decoder_, output_data_decode,
&audio_type)));
opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
output_data_decode, &audio_type)));
// If not DTX, it should never enter DTX mode. If DTX, we do not care since
// whether it enters DTX depends on the signal type.
if (!dtx) {
@ -158,10 +157,9 @@ void OpusTest::TestDtxEffect(bool dtx) {
// We input some silent segments. In DTX mode, the encoder will stop sending.
// However, DTX may happen after a while.
for (int i = 0; i < 30; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
output_data_decode, &audio_type)));
EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, opus_decoder_,
output_data_decode, &audio_type)));
if (!dtx) {
EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
@ -183,9 +181,9 @@ void OpusTest::TestDtxEffect(bool dtx) {
// DTX mode is maintained 19 frames.
for (int i = 0; i < 19; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, kOpus20msFrameSamples,
opus_decoder_, output_data_decode, &audio_type)));
static_cast<size_t>(
EncodeDecode(opus_encoder_, silence, opus_decoder_,
output_data_decode, &audio_type)));
if (dtx) {
EXPECT_EQ(0U, encoded_bytes_) // Send 0 byte.
<< "Opus should have entered DTX mode.";
@ -201,10 +199,9 @@ void OpusTest::TestDtxEffect(bool dtx) {
}
// Quit DTX after 19 frames.
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
output_data_decode, &audio_type)));
EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, opus_decoder_,
output_data_decode, &audio_type)));
EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
@ -212,10 +209,9 @@ void OpusTest::TestDtxEffect(bool dtx) {
EXPECT_EQ(0, audio_type); // Speech.
// Enters DTX again immediately.
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
output_data_decode, &audio_type)));
EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, opus_decoder_,
output_data_decode, &audio_type)));
if (dtx) {
EXPECT_EQ(1U, encoded_bytes_); // Send 1 byte.
EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
@ -232,10 +228,9 @@ void OpusTest::TestDtxEffect(bool dtx) {
silence[0] = 10000;
if (dtx) {
// Verify that encoder/decoder can jump out from DTX mode.
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
output_data_decode, &audio_type)));
EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
opus_encoder_, silence, opus_decoder_,
output_data_decode, &audio_type)));
EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
@ -244,7 +239,6 @@ void OpusTest::TestDtxEffect(bool dtx) {
// Free memory.
delete[] output_data_decode;
delete[] silence;
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
}
@ -314,10 +308,9 @@ TEST_P(OpusTest, OpusEncodeDecode) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, speech_data_.GetNextBlock(),
kOpus20msFrameSamples, opus_decoder_, output_data_decode,
&audio_type)));
static_cast<size_t>(
EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
opus_decoder_, output_data_decode, &audio_type)));
// Free memory.
delete[] output_data_decode;
@ -374,10 +367,9 @@ TEST_P(OpusTest, OpusDecodeInit) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, speech_data_.GetNextBlock(),
kOpus20msFrameSamples, opus_decoder_, output_data_decode,
&audio_type)));
static_cast<size_t>(
EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
opus_decoder_, output_data_decode, &audio_type)));
WebRtcOpus_DecoderInit(opus_decoder_);
@ -513,10 +505,9 @@ TEST_P(OpusTest, OpusDecodePlc) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(EncodeDecode(
opus_encoder_, speech_data_.GetNextBlock(),
kOpus20msFrameSamples, opus_decoder_, output_data_decode,
&audio_type)));
static_cast<size_t>(
EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
opus_decoder_, output_data_decode, &audio_type)));
// Call decoder PLC.
int16_t* plc_buffer = new int16_t[kOpus20msFrameSamples * channels_];
@ -542,10 +533,12 @@ TEST_P(OpusTest, OpusDurationEstimation) {
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
// 10 ms. We use only first 10 ms of a 20 ms block.
int encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
speech_data_.GetNextBlock(),
kOpus10msFrameSamples,
kMaxBytes, bitstream_);
auto speech_block = speech_data_.GetNextBlock();
int encoded_bytes_int = WebRtcOpus_Encode(
opus_encoder_, speech_block.data(),
rtc::CheckedDivExact(speech_block.size(),
2 * static_cast<size_t>(channels_)),
kMaxBytes, bitstream_);
EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus10msFrameSamples,
static_cast<size_t>(WebRtcOpus_DurationEst(
@ -553,10 +546,11 @@ TEST_P(OpusTest, OpusDurationEstimation) {
static_cast<size_t>(encoded_bytes_int))));
// 20 ms
encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
speech_data_.GetNextBlock(),
kOpus20msFrameSamples,
kMaxBytes, bitstream_);
speech_block = speech_data_.GetNextBlock();
encoded_bytes_int = WebRtcOpus_Encode(
opus_encoder_, speech_block.data(),
rtc::CheckedDivExact(speech_block.size(), static_cast<size_t>(channels_)),
kMaxBytes, bitstream_);
EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(WebRtcOpus_DurationEst(
@ -594,10 +588,12 @@ TEST_P(OpusTest, OpusDecodeRepacketized) {
OpusRepacketizer* rp = opus_repacketizer_create();
for (int idx = 0; idx < kPackets; idx++) {
encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
speech_data_.GetNextBlock(),
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
auto speech_block = speech_data_.GetNextBlock();
encoded_bytes_ =
WebRtcOpus_Encode(opus_encoder_, speech_block.data(),
rtc::CheckedDivExact(speech_block.size(),
static_cast<size_t>(channels_)),
kMaxBytes, bitstream_);
EXPECT_EQ(OPUS_OK, opus_repacketizer_cat(rp, bitstream_, encoded_bytes_));
}

View File

@ -54,12 +54,11 @@ int AudioEncoderCopyRed::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
EncodedInfo info = speech_encoder_->Encode(
rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
max_encoded_bytes, encoded);
EncodedInfo info =
speech_encoder_->Encode(rtp_timestamp, audio, max_encoded_bytes, encoded);
RTC_CHECK_GE(max_encoded_bytes,
info.encoded_bytes + secondary_info_.encoded_bytes);
RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";

View File

@ -44,7 +44,7 @@ class AudioEncoderCopyRed final : public AudioEncoder {
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;

View File

@ -60,8 +60,10 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
void Encode() {
ASSERT_TRUE(red_.get() != NULL);
encoded_info_ = red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
encoded_.size(), &encoded_[0]);
encoded_info_ = red_->Encode(
timestamp_,
rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms),
encoded_.size(), &encoded_[0]);
timestamp_ += num_audio_samples_10ms;
}
@ -83,7 +85,7 @@ class MockEncodeHelper {
}
AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
const int16_t* audio,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (write_payload_) {

View File

@ -149,7 +149,9 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes());
encoded_info = audio_encoder->Encode(
rtp_timestamp, input_data.audio, input_data.length_per_channel,
rtp_timestamp, rtc::ArrayView<const int16_t>(
input_data.audio, input_data.audio_channel *
input_data.length_per_channel),
encode_buffer_.size(), encode_buffer_.data());
encode_buffer_.SetSize(encoded_info.encoded_bytes);
bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000);

View File

@ -656,7 +656,11 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
}
void InsertAudio() {
memcpy(input_frame_.data_, audio_loop_.GetNextBlock(), kNumSamples10ms);
// TODO(kwiberg): Use std::copy here. Might be complications because AFAICS
// this call confuses the number of samples with the number of bytes, and
// ends up copying only half of what it should.
memcpy(input_frame_.data_, audio_loop_.GetNextBlock().data(),
kNumSamples10ms);
AudioCodingModuleTestOldApi::InsertAudio();
}
@ -774,9 +778,9 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
// Encode new frame.
uint32_t input_timestamp = rtp_header_.header.timestamp;
while (info.encoded_bytes == 0) {
info = isac_encoder_->Encode(
input_timestamp, audio_loop_.GetNextBlock(), kNumSamples10ms,
max_encoded_bytes, encoded.get());
info =
isac_encoder_->Encode(input_timestamp, audio_loop_.GetNextBlock(),
max_encoded_bytes, encoded.get());
input_timestamp += 160; // 10 ms at 16 kHz.
}
EXPECT_EQ(rtp_header_.header.timestamp + kPacketSizeSamples,

View File

@ -46,8 +46,8 @@ class CodecOwnerTest : public ::testing::Test {
int expected_send_even_if_empty) {
uint8_t out[kPacketSizeSamples];
AudioEncoder::EncodedInfo encoded_info;
encoded_info = codec_owner_.Encoder()->Encode(
timestamp_, kZeroData, kDataLengthSamples, kPacketSizeSamples, out);
encoded_info = codec_owner_.Encoder()->Encode(timestamp_, kZeroData,
kPacketSizeSamples, out);
timestamp_ += kDataLengthSamples;
EXPECT_TRUE(encoded_info.redundant.empty());
EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
@ -146,24 +146,26 @@ TEST_F(CodecOwnerTest, ExternalEncoder) {
AudioEncoder::EncodedInfo info;
EXPECT_CALL(external_encoder, SampleRateHz())
.WillRepeatedly(Return(kSampleRateHz));
EXPECT_CALL(external_encoder, NumChannels()).WillRepeatedly(Return(1));
{
InSequence s;
info.encoded_timestamp = 0;
EXPECT_CALL(external_encoder,
EncodeInternal(0, audio, arraysize(encoded), encoded))
EncodeInternal(0, rtc::ArrayView<const int16_t>(audio),
arraysize(encoded), encoded))
.WillOnce(Return(info));
EXPECT_CALL(external_encoder, Mark("A"));
EXPECT_CALL(external_encoder, Mark("B"));
info.encoded_timestamp = 2;
EXPECT_CALL(external_encoder,
EncodeInternal(2, audio, arraysize(encoded), encoded))
EncodeInternal(2, rtc::ArrayView<const int16_t>(audio),
arraysize(encoded), encoded))
.WillOnce(Return(info));
EXPECT_CALL(external_encoder, Die());
}
info = codec_owner_.Encoder()->Encode(0, audio, arraysize(audio),
arraysize(encoded), encoded);
info = codec_owner_.Encoder()->Encode(0, audio, arraysize(encoded), encoded);
EXPECT_EQ(0u, info.encoded_timestamp);
external_encoder.Mark("A");
@ -172,14 +174,12 @@ TEST_F(CodecOwnerTest, ExternalEncoder) {
codec_inst.pacsize = kPacketSizeSamples;
ASSERT_TRUE(codec_owner_.SetEncoders(codec_inst, -1, VADNormal, -1));
// Don't expect any more calls to the external encoder.
info = codec_owner_.Encoder()->Encode(1, audio, arraysize(audio),
arraysize(encoded), encoded);
info = codec_owner_.Encoder()->Encode(1, audio, arraysize(encoded), encoded);
external_encoder.Mark("B");
// Change back to external encoder again.
codec_owner_.SetEncoders(&external_encoder, -1, VADNormal, -1);
info = codec_owner_.Encoder()->Encode(2, audio, arraysize(audio),
arraysize(encoded), encoded);
info = codec_owner_.Encoder()->Encode(2, audio, arraysize(encoded), encoded);
EXPECT_EQ(2u, info.encoded_timestamp);
}

View File

@ -158,7 +158,10 @@ class AudioDecoderTest : public ::testing::Test {
interleaved_input.get());
encoded_info_ = audio_encoder_->Encode(
0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100,
0, rtc::ArrayView<const int16_t>(interleaved_input.get(),
audio_encoder_->NumChannels() *
audio_encoder_->SampleRateHz() /
100),
data_length_ * 2, output);
}
EXPECT_EQ(payload_type_, encoded_info_.payload_type);

View File

@ -939,8 +939,10 @@ class NetEqBgnTest : public NetEqDecodingTest {
uint32_t receive_timestamp = 0;
for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
size_t enc_len_bytes = WebRtcPcm16b_Encode(
input.GetNextBlock(), expected_samples_per_channel, payload);
auto block = input.GetNextBlock();
ASSERT_EQ(expected_samples_per_channel, block.size());
size_t enc_len_bytes =
WebRtcPcm16b_Encode(block.data(), block.size(), payload);
ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
number_channels = 0;

View File

@ -66,8 +66,10 @@ class NetEqIlbcQualityTest : public NetEqQualityTest {
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
info = encoder_->Encode(dummy_timestamp, &in_data[encoded_samples],
kFrameSizeSamples, max_bytes, payload);
info = encoder_->Encode(dummy_timestamp,
rtc::ArrayView<const int16_t>(
in_data + encoded_samples, kFrameSizeSamples),
max_bytes, payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);

View File

@ -66,8 +66,10 @@ class NetEqPcmuQualityTest : public NetEqQualityTest {
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
info = encoder_->Encode(dummy_timestamp, &in_data[encoded_samples],
kFrameSizeSamples, max_bytes, payload);
info = encoder_->Encode(dummy_timestamp,
rtc::ArrayView<const int16_t>(
in_data + encoded_samples, kFrameSizeSamples),
max_bytes, payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);

View File

@ -43,13 +43,14 @@ bool AudioLoop::Init(const std::string file_name,
return true;
}
const int16_t* AudioLoop::GetNextBlock() {
rtc::ArrayView<const int16_t> AudioLoop::GetNextBlock() {
// Check that the AudioLoop is initialized.
if (block_length_samples_ == 0) return NULL;
if (block_length_samples_ == 0)
return rtc::ArrayView<const int16_t>();
const int16_t* output_ptr = &audio_array_[next_index_];
next_index_ = (next_index_ + block_length_samples_) % loop_length_samples_;
return output_ptr;
return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
}

View File

@ -13,6 +13,7 @@
#include <string>
#include "webrtc/base/array_view.h"
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/typedefs.h"
@ -40,10 +41,9 @@ class AudioLoop {
bool Init(const std::string file_name, size_t max_loop_length_samples,
size_t block_length_samples);
// Returns a pointer to the next block of audio. The number given as
// |block_length_samples| to the Init() function determines how many samples
// that can be safely read from the pointer.
const int16_t* GetNextBlock();
// Returns a (pointer,size) pair for the next block of audio. The size is
// equal to the |block_length_samples| Init() argument.
rtc::ArrayView<const int16_t> GetNextBlock();
private:
size_t next_index_;

View File

@ -62,11 +62,12 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
bool drift_flipped = false;
int32_t packet_input_time_ms =
rtp_gen.GetRtpHeader(kPayloadType, kInputBlockSizeSamples, &rtp_header);
const int16_t* input_samples = audio_loop.GetNextBlock();
if (!input_samples) exit(1);
auto input_samples = audio_loop.GetNextBlock();
if (input_samples.empty())
exit(1);
uint8_t input_payload[kInputBlockSizeSamples * sizeof(int16_t)];
size_t payload_len =
WebRtcPcm16b_Encode(input_samples, kInputBlockSizeSamples, input_payload);
size_t payload_len = WebRtcPcm16b_Encode(input_samples.data(),
input_samples.size(), input_payload);
assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
// Main loop.
@ -93,10 +94,10 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
kInputBlockSizeSamples,
&rtp_header);
input_samples = audio_loop.GetNextBlock();
if (!input_samples) return -1;
payload_len = WebRtcPcm16b_Encode(const_cast<int16_t*>(input_samples),
kInputBlockSizeSamples,
input_payload);
if (input_samples.empty())
return -1;
payload_len = WebRtcPcm16b_Encode(input_samples.data(),
input_samples.size(), input_payload);
assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
}