Changed AudioEncoder::Encode to take an rtc::Buffer* instead of uint8_t* and a maximum size.

For backwards compatibility, I've added kept the old interface to
Encode() and EncodeInternal and created default implementations of both
variants of EncodeInternal(), each calling the other. At least one of
the variants must be implemented in a subclass or we'll run out of stack
and explode. Would be nice if we could catch that before runtime. :/

The new interface to EncodeInternal() is protected, since it should
never be called from the outside.

Was unable to mark the old EncodeInternal() as RTC_DEPRECATED, since the
default implementaion of the new variant needs to call it to work around
old implementations. The old Encode() variant is deprecated, at least.

Added a test for backwards compatibility in audio_encoder_unittest.cc.
For the added test I broke out MockEncodeHelper from
audio_encoder_copy_red_unittest.cc and renamed it MockAudioEncoderHelper.

Review URL: https://codereview.webrtc.org/1725143003

Cr-Commit-Position: refs/heads/master@{#11823}
This commit is contained in:
ossu 2016-03-01 00:41:31 -08:00 committed by Commit bot
parent 22c2b4814a
commit 10a029e952
33 changed files with 638 additions and 316 deletions

View File

@ -145,13 +145,14 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
last_rtp_timestamp_ = rtp_timestamp;
first_frame_ = false;
encode_buffer_.SetSize(encoder_stack_->MaxEncodedBytes());
// Clear the buffer before reuse - encoded data will get appended.
encode_buffer_.Clear();
encoded_info = encoder_stack_->Encode(
rtp_timestamp, rtc::ArrayView<const int16_t>(
input_data.audio, input_data.audio_channel *
input_data.length_per_channel),
encode_buffer_.size(), encode_buffer_.data());
encode_buffer_.SetSize(encoded_info.encoded_bytes);
&encode_buffer_);
bitrate_logger_.MaybeLog(encoder_stack_->GetTargetBitrate() / 1000);
if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
// Not enough data.

View File

@ -774,8 +774,7 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
bool CbReceiveImpl() {
SleepMs(1);
const size_t max_encoded_bytes = isac_encoder_->MaxEncodedBytes();
std::unique_ptr<uint8_t[]> encoded(new uint8_t[max_encoded_bytes]);
rtc::Buffer encoded;
AudioEncoder::EncodedInfo info;
{
rtc::CritScope lock(&crit_sect_);
@ -790,7 +789,7 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
while (info.encoded_bytes == 0) {
info =
isac_encoder_->Encode(input_timestamp, audio_loop_.GetNextBlock(),
max_encoded_bytes, encoded.get());
&encoded);
input_timestamp += 160; // 10 ms at 16 kHz.
}
EXPECT_EQ(rtp_header_.header.timestamp + kPacketSizeSamples,
@ -801,7 +800,7 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
// Now we're not holding the crit sect when calling ACM.
// Insert into ACM.
EXPECT_EQ(0, acm_->IncomingPacket(encoded.get(), info.encoded_bytes,
EXPECT_EQ(0, acm_->IncomingPacket(encoded.data(), info.encoded_bytes,
rtp_header_));
// Pull audio.
@ -1633,9 +1632,6 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
MockAudioEncoder mock_encoder;
// Set expectations on the mock encoder and also delegate the calls to the
// real encoder.
EXPECT_CALL(mock_encoder, MaxEncodedBytes())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::MaxEncodedBytes));
EXPECT_CALL(mock_encoder, SampleRateHz())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SampleRateHz));
@ -1652,9 +1648,14 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
EXPECT_CALL(mock_encoder, GetTargetBitrate())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));
EXPECT_CALL(mock_encoder, EncodeInternal(_, _, _, _))
EXPECT_CALL(mock_encoder, EncodeInternal(_, _, _))
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::EncodeInternal));
.WillRepeatedly(Invoke(&encoder,
static_cast<
AudioEncoder::EncodedInfo(AudioEncoder::*)(
uint32_t,
rtc::ArrayView<const int16_t>,
rtc::Buffer*)>(&AudioEncoderPcmU::Encode)));
EXPECT_CALL(mock_encoder, SetFec(_))
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SetFec));

View File

@ -42,10 +42,10 @@ class RentACodecTestF : public ::testing::Test {
uint32_t expected_timestamp,
int expected_payload_type,
int expected_send_even_if_empty) {
uint8_t out[kPacketSizeSamples];
rtc::Buffer out;
AudioEncoder::EncodedInfo encoded_info;
encoded_info =
encoder_->Encode(timestamp_, kZeroData, kPacketSizeSamples, out);
encoder_->Encode(timestamp_, kZeroData, &out);
timestamp_ += kDataLengthSamples;
EXPECT_TRUE(encoded_info.redundant.empty());
EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
@ -115,7 +115,7 @@ TEST(RentACodecTest, ExternalEncoder) {
EXPECT_EQ(&external_encoder, rac.RentEncoderStack(&param));
const int kPacketSizeSamples = kSampleRateHz / 100;
int16_t audio[kPacketSizeSamples] = {0};
uint8_t encoded[kPacketSizeSamples];
rtc::Buffer encoded;
AudioEncoder::EncodedInfo info;
{
@ -123,19 +123,19 @@ TEST(RentACodecTest, ExternalEncoder) {
info.encoded_timestamp = 0;
EXPECT_CALL(external_encoder,
EncodeInternal(0, rtc::ArrayView<const int16_t>(audio),
arraysize(encoded), encoded))
&encoded))
.WillOnce(Return(info));
EXPECT_CALL(external_encoder, Mark("A"));
EXPECT_CALL(external_encoder, Mark("B"));
info.encoded_timestamp = 2;
EXPECT_CALL(external_encoder,
EncodeInternal(2, rtc::ArrayView<const int16_t>(audio),
arraysize(encoded), encoded))
&encoded))
.WillOnce(Return(info));
EXPECT_CALL(external_encoder, Die());
}
info = external_encoder.Encode(0, audio, arraysize(encoded), encoded);
info = external_encoder.Encode(0, audio, &encoded);
EXPECT_EQ(0u, info.encoded_timestamp);
external_encoder.Mark("A");
@ -147,13 +147,13 @@ TEST(RentACodecTest, ExternalEncoder) {
EXPECT_EQ(param.speech_encoder, rac.RentEncoderStack(&param));
// Don't expect any more calls to the external encoder.
info = param.speech_encoder->Encode(1, audio, arraysize(encoded), encoded);
info = param.speech_encoder->Encode(1, audio, &encoded);
external_encoder.Mark("B");
// Change back to external encoder again.
param.speech_encoder = &external_encoder;
EXPECT_EQ(&external_encoder, rac.RentEncoderStack(&param));
info = external_encoder.Encode(2, audio, arraysize(encoded), encoded);
info = external_encoder.Encode(2, audio, &encoded);
EXPECT_EQ(2u, info.encoded_timestamp);
}

View File

@ -24,6 +24,28 @@ int AudioEncoder::RtpTimestampRateHz() const {
}
AudioEncoder::EncodedInfo AudioEncoder::Encode(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) {
TRACE_EVENT0("webrtc", "AudioEncoder::Encode");
RTC_CHECK_EQ(audio.size(),
static_cast<size_t>(NumChannels() * SampleRateHz() / 100));
const size_t old_size = encoded->size();
EncodedInfo info = EncodeInternal(rtp_timestamp, audio, encoded);
RTC_CHECK_EQ(encoded->size() - old_size, info.encoded_bytes);
return info;
}
AudioEncoder::EncodedInfo AudioEncoder::Encode(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
return DEPRECATED_Encode(rtp_timestamp, audio, max_encoded_bytes, encoded);
}
AudioEncoder::EncodedInfo AudioEncoder::DEPRECATED_Encode(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
@ -37,6 +59,33 @@ AudioEncoder::EncodedInfo AudioEncoder::Encode(
return info;
}
AudioEncoder::EncodedInfo AudioEncoder::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded)
{
EncodedInfo info;
encoded->AppendData(MaxEncodedBytes(), [&] (rtc::ArrayView<uint8_t> encoded) {
info = EncodeInternal(rtp_timestamp, audio,
encoded.size(), encoded.data());
return info.encoded_bytes;
});
return info;
}
AudioEncoder::EncodedInfo AudioEncoder::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded)
{
rtc::Buffer temp_buffer;
EncodedInfo info = EncodeInternal(rtp_timestamp, audio, &temp_buffer);
RTC_DCHECK_LE(temp_buffer.size(), max_encoded_bytes);
std::memcpy(encoded, temp_buffer.data(), info.encoded_bytes);
return info;
}
bool AudioEncoder::SetFec(bool enable) {
return !enable;
}

View File

@ -15,6 +15,8 @@
#include <vector>
#include "webrtc/base/array_view.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/deprecation.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@ -85,21 +87,40 @@ class AudioEncoder {
// Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
// NumChannels() samples). Multi-channel audio must be sample-interleaved.
// The encoder produces zero or more bytes of output in |encoded| and
// returns additional encoding information.
// The caller is responsible for making sure that |max_encoded_bytes| is
// not smaller than the number of bytes actually produced by the encoder.
// Encode() checks some preconditions, calls EncodeInternal() which does the
// actual work, and then checks some postconditions.
// The encoder appends zero or more bytes of output to |encoded| and returns
// additional encoding information. Encode() checks some preconditions, calls
// EncodeInternal() which does the actual work, and then checks some
// postconditions.
EncodedInfo Encode(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded);
rtc::Buffer* encoded);
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) = 0;
// Deprecated interface to Encode (remove eventually, bug 5591). May incur a
// copy. The encoder produces zero or more bytes of output in |encoded| and
// returns additional encoding information. The caller is responsible for
// making sure that |max_encoded_bytes| is not smaller than the number of
// bytes actually produced by the encoder.
RTC_DEPRECATED EncodedInfo Encode(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded);
EncodedInfo DEPRECATED_Encode(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded);
// Deprecated interface of EncodeInternal (also bug 5591). May incur a copy.
// Subclasses implement this to perform the actual encoding. Called by
// Encode(). By default, this is implemented as a call to the newer
// EncodeInternal() that accepts an rtc::Buffer instead of a raw pointer.
// That version is protected, so see below. At least one of the two
// interfaces of EncodeInternal _must_ be implemented by a subclass.
virtual EncodedInfo EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded);
// Resets the encoder to its starting state, discarding any input that has
// been fed to the encoder but not yet emitted in a packet.
@ -138,6 +159,16 @@ class AudioEncoder {
// encoder is free to adjust or disregard the given bitrate (the default
// implementation does the latter).
virtual void SetTargetBitrate(int target_bps);
protected:
// Subclasses implement this to perform the actual encoding. Called by
// Encode(). For compatibility reasons, this is implemented by default as a
// call to the older version of EncodeInternal(). At least one of the two
// interfaces of EncodeInternal _must_ be implemented by a subclass.
// Preferably this one.
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
using ::testing::_;
using ::testing::Invoke;
using ::testing::Return;
namespace webrtc {
TEST(AudioEncoderTest, EncodeInternalRedirectsOk) {
const size_t kPayloadSize = 16;
const uint8_t payload[kPayloadSize] =
{0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8,
0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0};
MockAudioEncoderDeprecated old_impl;
MockAudioEncoder new_impl;
MockAudioEncoderBase* impls[] = { &old_impl, &new_impl };
for (auto* impl : impls) {
EXPECT_CALL(*impl, Die());
EXPECT_CALL(*impl, MaxEncodedBytes())
.WillRepeatedly(Return(kPayloadSize * 2));
EXPECT_CALL(*impl, NumChannels()).WillRepeatedly(Return(1));
EXPECT_CALL(*impl, SampleRateHz()).WillRepeatedly(Return(8000));
}
EXPECT_CALL(old_impl, EncodeInternal(_, _, _, _)).WillOnce(
Invoke(MockAudioEncoderDeprecated::CopyEncoding(payload)));
EXPECT_CALL(new_impl, EncodeInternal(_, _, _)).WillOnce(
Invoke(MockAudioEncoder::CopyEncoding(payload)));
int16_t audio[80];
uint8_t output_array[kPayloadSize * 2];
rtc::Buffer output_buffer;
AudioEncoder* old_encoder = &old_impl;
AudioEncoder* new_encoder = &new_impl;
auto old_info = old_encoder->Encode(0, audio, &output_buffer);
auto new_info = new_encoder->DEPRECATED_Encode(0, audio,
kPayloadSize * 2,
output_array);
EXPECT_EQ(old_info.encoded_bytes, kPayloadSize);
EXPECT_EQ(new_info.encoded_bytes, kPayloadSize);
EXPECT_EQ(output_buffer.size(), kPayloadSize);
for (size_t i = 0; i != kPayloadSize; ++i) {
EXPECT_EQ(output_buffer.data()[i], payload[i]);
EXPECT_EQ(output_array[i], payload[i]);
}
}
} // namespace webrtc

View File

@ -100,10 +100,7 @@ int AudioEncoderCng::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_GE(max_encoded_bytes,
static_cast<size_t>(num_cng_coefficients_ + 1));
rtc::Buffer* encoded) {
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
RTC_CHECK_EQ(speech_buffer_.size(),
rtp_timestamps_.size() * samples_per_10ms_frame);
@ -145,12 +142,12 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
EncodedInfo info;
switch (activity) {
case Vad::kPassive: {
info = EncodePassive(frames_to_encode, max_encoded_bytes, encoded);
info = EncodePassive(frames_to_encode, encoded);
last_frame_active_ = false;
break;
}
case Vad::kActive: {
info = EncodeActive(frames_to_encode, max_encoded_bytes, encoded);
info = EncodeActive(frames_to_encode, encoded);
last_frame_active_ = true;
break;
}
@ -204,31 +201,37 @@ void AudioEncoderCng::SetTargetBitrate(int bits_per_second) {
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
rtc::Buffer* encoded) {
bool force_sid = last_frame_active_;
bool output_produced = false;
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
RTC_CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
const size_t bytes_to_encode = frames_to_encode * samples_per_10ms_frame;
AudioEncoder::EncodedInfo info;
for (size_t i = 0; i < frames_to_encode; ++i) {
// It's important not to pass &info.encoded_bytes directly to
// WebRtcCng_Encode(), since later loop iterations may return zero in that
// value, in which case we don't want to overwrite any value from an earlier
// iteration.
size_t encoded_bytes_tmp = 0;
RTC_CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
&speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, encoded,
&encoded_bytes_tmp, force_sid),
0);
if (encoded_bytes_tmp > 0) {
RTC_CHECK(!output_produced);
info.encoded_bytes = encoded_bytes_tmp;
output_produced = true;
force_sid = false;
}
}
encoded->AppendData(bytes_to_encode, [&] (rtc::ArrayView<uint8_t> encoded) {
for (size_t i = 0; i < frames_to_encode; ++i) {
// It's important not to pass &info.encoded_bytes directly to
// WebRtcCng_Encode(), since later loop iterations may return zero in
// that value, in which case we don't want to overwrite any value from
// an earlier iteration.
size_t encoded_bytes_tmp = 0;
RTC_CHECK_GE(
WebRtcCng_Encode(cng_inst_.get(),
&speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, encoded.data(),
&encoded_bytes_tmp, force_sid),
0);
if (encoded_bytes_tmp > 0) {
RTC_CHECK(!output_produced);
info.encoded_bytes = encoded_bytes_tmp;
output_produced = true;
force_sid = false;
}
}
return info.encoded_bytes;
});
info.encoded_timestamp = rtp_timestamps_.front();
info.payload_type = cng_payload_type_;
info.send_even_if_empty = true;
@ -238,8 +241,7 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
rtc::Buffer* encoded) {
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
AudioEncoder::EncodedInfo info;
for (size_t i = 0; i < frames_to_encode; ++i) {
@ -248,7 +250,7 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
rtc::ArrayView<const int16_t>(
&speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame),
max_encoded_bytes, encoded);
encoded);
if (i + 1 == frames_to_encode) {
RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
} else {

View File

@ -30,6 +30,8 @@ class Vad;
class AudioEncoderCng final : public AudioEncoder {
public:
using AudioEncoder::EncodeInternal;
struct Config {
bool IsOk() const;
@ -59,8 +61,7 @@ class AudioEncoderCng final : public AudioEncoder {
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
rtc::Buffer* encoded) override;
void Reset() override;
bool SetFec(bool enable) override;
bool SetDtx(bool enable) override;
@ -71,11 +72,9 @@ class AudioEncoderCng final : public AudioEncoder {
private:
EncodedInfo EncodePassive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
rtc::Buffer* encoded);
EncodedInfo EncodeActive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
rtc::Buffer* encoded);
size_t SamplesPer10msFrame() const;
AudioEncoder* speech_encoder_;

View File

@ -70,7 +70,6 @@ class AudioEncoderCngTest : public ::testing::Test {
EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
.WillRepeatedly(Return(kMockMaxEncodedBytes));
cng_.reset(new AudioEncoderCng(config_));
encoded_.resize(cng_->MaxEncodedBytes(), 0);
}
void Encode() {
@ -78,7 +77,7 @@ class AudioEncoderCngTest : public ::testing::Test {
encoded_info_ = cng_->Encode(
timestamp_,
rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms_),
encoded_.size(), &encoded_[0]);
&encoded_);
timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
}
@ -89,12 +88,13 @@ class AudioEncoderCngTest : public ::testing::Test {
InSequence s;
AudioEncoder::EncodedInfo info;
for (size_t j = 0; j < num_calls - 1; ++j) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Return(info));
}
info.encoded_bytes = kMockReturnEncodedBytes;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Invoke(
MockAudioEncoder::FakeEncoding(kMockReturnEncodedBytes)));
}
// Verifies that the cng_ object waits until it has collected
@ -108,7 +108,7 @@ class AudioEncoderCngTest : public ::testing::Test {
.WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
// Don't expect any calls to the encoder yet.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _)).Times(0);
for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
@ -191,7 +191,7 @@ class AudioEncoderCngTest : public ::testing::Test {
uint32_t timestamp_;
int16_t audio_[kMaxNumSamples];
size_t num_audio_samples_10ms_;
std::vector<uint8_t> encoded_;
rtc::Buffer encoded_;
AudioEncoder::EncodedInfo encoded_info_;
int sample_rate_hz_;
};
@ -259,7 +259,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillRepeatedly(Return(Vad::kPassive));
// Expect no calls at all to the speech encoder mock.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _)).Times(0);
uint32_t expected_timestamp = timestamp_;
for (size_t i = 0; i < 100; ++i) {
Encode();
@ -341,7 +341,7 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
// Verifies that the correct payload type is set when CNG is encoded.
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
CreateCng();
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _)).Times(0);
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1U));
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
@ -373,9 +373,8 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
encoded_info_.payload_type = 0;
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kActive));
AudioEncoder::EncodedInfo info;
info.encoded_bytes = kMockReturnEncodedBytes;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).WillOnce(Return(info));
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _)).WillOnce(
Invoke(MockAudioEncoder::FakeEncoding(kMockReturnEncodedBytes)));
Encode();
EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);

View File

@ -80,8 +80,7 @@ int AudioEncoderPcm::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
rtc::Buffer* encoded) {
if (speech_buffer_.empty()) {
first_timestamp_in_buffer_ = rtp_timestamp;
}
@ -90,12 +89,16 @@ AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
return EncodedInfo();
}
RTC_CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
RTC_CHECK_GE(max_encoded_bytes, full_frame_samples_);
EncodedInfo info;
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
info.encoded_bytes =
EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
encoded->AppendData(MaxEncodedBytes(),
[&] (rtc::ArrayView<uint8_t> encoded) {
return EncodeCall(&speech_buffer_[0],
full_frame_samples_,
encoded.data());
});
speech_buffer_.clear();
return info;
}

View File

@ -20,6 +20,8 @@ namespace webrtc {
class AudioEncoderPcm : public AudioEncoder {
public:
using AudioEncoder::EncodeInternal;
struct Config {
public:
bool IsOk() const;
@ -41,15 +43,15 @@ class AudioEncoderPcm : public AudioEncoder {
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
protected:
AudioEncoderPcm(const Config& config, int sample_rate_hz);
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) override;
virtual size_t EncodeCall(const int16_t* audio,
size_t input_len,
uint8_t* encoded) = 0;

View File

@ -91,13 +91,16 @@ int AudioEncoderG722::GetTargetBitrate() const {
return static_cast<int>(64000 * NumChannels());
}
void AudioEncoderG722::Reset() {
num_10ms_frames_buffered_ = 0;
for (size_t i = 0; i < num_channels_; ++i)
RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
}
AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
rtc::Buffer* encoded) {
if (num_10ms_frames_buffered_ == 0)
first_timestamp_in_buffer_ = rtp_timestamp;
@ -117,38 +120,38 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
num_10ms_frames_buffered_ = 0;
const size_t samples_per_channel = SamplesPerChannel();
for (size_t i = 0; i < num_channels_; ++i) {
const size_t encoded = WebRtcG722_Encode(
const size_t bytes_encoded = WebRtcG722_Encode(
encoders_[i].encoder, encoders_[i].speech_buffer.get(),
samples_per_channel, encoders_[i].encoded_buffer.data());
RTC_CHECK_EQ(encoded, samples_per_channel / 2);
RTC_CHECK_EQ(bytes_encoded, samples_per_channel / 2);
}
// Interleave the encoded bytes of the different channels. Each separate
// channel and the interleaved stream encodes two samples per byte, most
// significant half first.
for (size_t i = 0; i < samples_per_channel / 2; ++i) {
for (size_t j = 0; j < num_channels_; ++j) {
uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
interleave_buffer_.data()[j] = two_samples >> 4;
interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
}
for (size_t j = 0; j < num_channels_; ++j)
encoded[i * num_channels_ + j] = interleave_buffer_.data()[2 * j] << 4 |
interleave_buffer_.data()[2 * j + 1];
}
const size_t bytes_to_encode = samples_per_channel / 2 * num_channels_;
EncodedInfo info;
info.encoded_bytes = samples_per_channel / 2 * num_channels_;
info.encoded_bytes = encoded->AppendData(
bytes_to_encode, [&] (rtc::ArrayView<uint8_t> encoded) {
// Interleave the encoded bytes of the different channels. Each separate
// channel and the interleaved stream encodes two samples per byte, most
// significant half first.
for (size_t i = 0; i < samples_per_channel / 2; ++i) {
for (size_t j = 0; j < num_channels_; ++j) {
uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
interleave_buffer_.data()[j] = two_samples >> 4;
interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
}
for (size_t j = 0; j < num_channels_; ++j)
encoded[i * num_channels_ + j] =
interleave_buffer_.data()[2 * j] << 4 |
interleave_buffer_.data()[2 * j + 1];
}
return bytes_to_encode;
});
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
return info;
}
void AudioEncoderG722::Reset() {
num_10ms_frames_buffered_ = 0;
for (size_t i = 0; i < num_channels_; ++i)
RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
}
AudioEncoderG722::EncoderState::EncoderState() {
RTC_CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
}

View File

@ -23,6 +23,8 @@ struct CodecInst;
class AudioEncoderG722 final : public AudioEncoder {
public:
using AudioEncoder::EncodeInternal;
struct Config {
bool IsOk() const;
@ -42,11 +44,12 @@ class AudioEncoderG722 final : public AudioEncoder {
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
void Reset() override;
protected:
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
rtc::Buffer* encoded) override;
private:
// The encoder state for one channel.

View File

@ -92,16 +92,13 @@ int AudioEncoderIlbc::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
rtc::Buffer* encoded) {
// Save timestamp if starting a new packet.
if (num_10ms_frames_buffered_ == 0)
first_timestamp_in_buffer_ = rtp_timestamp;
// Buffer input.
RTC_DCHECK_EQ(static_cast<size_t>(kSampleRateHz / 100), audio.size());
std::copy(audio.cbegin(), audio.cend(),
input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_);
@ -114,15 +111,24 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
// Encode buffered input.
RTC_DCHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
num_10ms_frames_buffered_ = 0;
const int output_len = WebRtcIlbcfix_Encode(
encoder_,
input_buffer_,
kSampleRateHz / 100 * num_10ms_frames_per_packet_,
encoded);
RTC_CHECK_GE(output_len, 0);
size_t encoded_bytes =
encoded->AppendData(
RequiredOutputSizeBytes(),
[&] (rtc::ArrayView<uint8_t> encoded) {
const int r = WebRtcIlbcfix_Encode(
encoder_,
input_buffer_,
kSampleRateHz / 100 * num_10ms_frames_per_packet_,
encoded.data());
RTC_CHECK_GE(r, 0);
return static_cast<size_t>(r);
});
RTC_DCHECK_EQ(encoded_bytes, RequiredOutputSizeBytes());
EncodedInfo info;
info.encoded_bytes = static_cast<size_t>(output_len);
RTC_DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
info.encoded_bytes = encoded_bytes;
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = config_.payload_type;
return info;

View File

@ -21,6 +21,8 @@ struct CodecInst;
class AudioEncoderIlbc final : public AudioEncoder {
public:
using AudioEncoder::EncodeInternal;
struct Config {
bool IsOk() const;
@ -42,8 +44,7 @@ class AudioEncoderIlbc final : public AudioEncoder {
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
rtc::Buffer* encoded) override;
void Reset() override;
private:

View File

@ -23,6 +23,8 @@ struct CodecInst;
template <typename T>
class AudioEncoderIsacT final : public AudioEncoder {
public:
using AudioEncoder::EncodeInternal;
// Allowed combinations of sample rate, frame size, and bit rate are
// - 16000 Hz, 30 ms, 10000-32000 bps
// - 16000 Hz, 60 ms, 10000-32000 bps
@ -62,8 +64,7 @@ class AudioEncoderIsacT final : public AudioEncoder {
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
rtc::Buffer* encoded) override;
void Reset() override;
private:

View File

@ -117,8 +117,7 @@ template <typename T>
AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
rtc::Buffer* encoded) {
if (!packet_in_progress_) {
// Starting a new packet; remember the timestamp for later.
packet_in_progress_ = true;
@ -128,22 +127,26 @@ AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
IsacBandwidthInfo bwinfo = bwinfo_->Get();
T::SetBandwidthInfo(isac_state_, &bwinfo);
}
int r = T::Encode(isac_state_, audio.data(), encoded);
RTC_CHECK_GE(r, 0) << "Encode failed (error code "
<< T::GetErrorCode(isac_state_) << ")";
// T::Encode doesn't allow us to tell it the size of the output
// buffer. All we can do is check for an overrun after the fact.
RTC_CHECK_LE(static_cast<size_t>(r), max_encoded_bytes);
size_t encoded_bytes = encoded->AppendData(
kSufficientEncodeBufferSizeBytes,
[&] (rtc::ArrayView<uint8_t> encoded) {
int r = T::Encode(isac_state_, audio.data(), encoded.data());
if (r == 0)
RTC_CHECK_GE(r, 0) << "Encode failed (error code "
<< T::GetErrorCode(isac_state_) << ")";
return static_cast<size_t>(r);
});
if (encoded_bytes == 0)
return EncodedInfo();
// Got enough input to produce a packet. Return the saved timestamp from
// the first chunk of input that went into the packet.
packet_in_progress_ = false;
EncodedInfo info;
info.encoded_bytes = r;
info.encoded_bytes = encoded_bytes;
info.encoded_timestamp = packet_timestamp_;
info.payload_type = config_.payload_type;
return info;

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
namespace webrtc {
MockAudioEncoder::FakeEncoding::FakeEncoding(
const AudioEncoder::EncodedInfo& info)
: info_(info) { }
MockAudioEncoder::FakeEncoding::FakeEncoding(size_t encoded_bytes) {
info_.encoded_bytes = encoded_bytes;
}
AudioEncoder::EncodedInfo MockAudioEncoder::FakeEncoding::operator()(
uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) {
encoded->SetSize(encoded->size() + info_.encoded_bytes);
return info_;
}
MockAudioEncoder::CopyEncoding::CopyEncoding(
AudioEncoder::EncodedInfo info,
rtc::ArrayView<const uint8_t> payload)
: info_(info), payload_(payload) { }
MockAudioEncoder::CopyEncoding::CopyEncoding(
rtc::ArrayView<const uint8_t> payload)
: payload_(payload) {
info_.encoded_bytes = payload_.size();
}
AudioEncoder::EncodedInfo MockAudioEncoder::CopyEncoding::operator()(
uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) {
RTC_CHECK(encoded);
RTC_CHECK_LE(info_.encoded_bytes, payload_.size());
encoded->AppendData(payload_.data(), info_.encoded_bytes);
return info_;
}
MockAudioEncoderDeprecated::CopyEncoding::CopyEncoding(
AudioEncoder::EncodedInfo info,
rtc::ArrayView<const uint8_t> payload)
: info_(info), payload_(payload) { }
MockAudioEncoderDeprecated::CopyEncoding::CopyEncoding(
rtc::ArrayView<const uint8_t> payload)
: payload_(payload) {
info_.encoded_bytes = payload_.size();
}
AudioEncoder::EncodedInfo MockAudioEncoderDeprecated::CopyEncoding::operator()(
uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_bytes_encoded,
uint8_t* encoded) {
RTC_CHECK(encoded);
RTC_CHECK_LE(info_.encoded_bytes, payload_.size());
std::memcpy(encoded, payload_.data(), info_.encoded_bytes);
return info_;
}
} // namespace webrtc

View File

@ -11,15 +11,16 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_MOCK_MOCK_AUDIO_ENCODER_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_MOCK_MOCK_AUDIO_ENCODER_H_
#include "webrtc/base/array_view.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace webrtc {
class MockAudioEncoder final : public AudioEncoder {
class MockAudioEncoderBase : public AudioEncoder {
public:
~MockAudioEncoder() override { Die(); }
~MockAudioEncoderBase() override { Die(); }
MOCK_METHOD0(Die, void());
MOCK_METHOD1(Mark, void(std::string desc));
MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
@ -29,12 +30,6 @@ class MockAudioEncoder final : public AudioEncoder {
MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
MOCK_CONST_METHOD0(GetTargetBitrate, int());
// Note, we explicitly chose not to create a mock for the Encode method.
MOCK_METHOD4(EncodeInternal,
EncodedInfo(uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded));
MOCK_METHOD0(Reset, void());
MOCK_METHOD1(SetFec, bool(bool enable));
MOCK_METHOD1(SetDtx, bool(bool enable));
@ -46,6 +41,90 @@ class MockAudioEncoder final : public AudioEncoder {
MOCK_METHOD1(SetMaxPayloadSize, void(int max_payload_size_bytes));
};
class MockAudioEncoder final : public MockAudioEncoderBase {
public:
using AudioEncoder::EncodeInternal;
// Note, we explicitly chose not to create a mock for the Encode method.
MOCK_METHOD3(EncodeInternal,
EncodedInfo(uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded));
class FakeEncoding {
public:
// Creates a functor that will return |info| and adjust the rtc::Buffer
// given as input to it, so it is info.encoded_bytes larger.
FakeEncoding(const AudioEncoder::EncodedInfo& info);
// Shorthand version of the constructor above, for when only setting
// encoded_bytes in the EncodedInfo object matters.
FakeEncoding(size_t encoded_bytes);
AudioEncoder::EncodedInfo operator()(uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded);
private:
AudioEncoder::EncodedInfo info_;
};
class CopyEncoding {
public:
// Creates a functor that will return |info| and append the data in the
// payload to the buffer given as input to it. Up to info.encoded_bytes are
// appended - make sure the payload is big enough! Since it uses an
// ArrayView, it _does not_ copy the payload. Make sure it doesn't fall out
// of scope!
CopyEncoding(AudioEncoder::EncodedInfo info,
rtc::ArrayView<const uint8_t> payload);
// Shorthand version of the constructor above, for when you wish to append
// the whole payload and do not care about any EncodedInfo attribute other
// than encoded_bytes.
CopyEncoding(rtc::ArrayView<const uint8_t> payload);
AudioEncoder::EncodedInfo operator()(uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded);
private:
AudioEncoder::EncodedInfo info_;
rtc::ArrayView<const uint8_t> payload_;
};
};
class MockAudioEncoderDeprecated final : public MockAudioEncoderBase {
public:
using AudioEncoder::EncodeInternal;
// Note, we explicitly chose not to create a mock for the Encode method.
MOCK_METHOD4(EncodeInternal,
EncodedInfo(uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded));
// A functor like MockAudioEncoder::CopyEncoding above, but which has the
// deprecated Encode signature. Currently only used in one test and should be
// removed once that backwards compatibility is.
class CopyEncoding {
public:
CopyEncoding(AudioEncoder::EncodedInfo info,
rtc::ArrayView<const uint8_t> payload);
CopyEncoding(rtc::ArrayView<const uint8_t> payload);
AudioEncoder::EncodedInfo operator()(uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_bytes_encoded,
uint8_t* encoded);
private:
AudioEncoder::EncodedInfo info_;
rtc::ArrayView<const uint8_t> payload_;
};
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_MOCK_MOCK_AUDIO_ENCODER_H_

View File

@ -130,36 +130,6 @@ int AudioEncoderOpus::GetTargetBitrate() const {
return config_.bitrate_bps;
}
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (input_buffer_.empty())
first_timestamp_in_buffer_ = rtp_timestamp;
RTC_DCHECK_EQ(SamplesPer10msFrame(), audio.size());
input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
if (input_buffer_.size() <
(Num10msFramesPerPacket() * SamplesPer10msFrame())) {
return EncodedInfo();
}
RTC_CHECK_EQ(input_buffer_.size(),
Num10msFramesPerPacket() * SamplesPer10msFrame());
int status = WebRtcOpus_Encode(
inst_, &input_buffer_[0],
rtc::CheckedDivExact(input_buffer_.size(), config_.num_channels),
rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded);
RTC_CHECK_GE(status, 0); // Fails only if fed invalid data.
input_buffer_.clear();
EncodedInfo info;
info.encoded_bytes = static_cast<size_t>(status);
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = config_.payload_type;
info.send_even_if_empty = true; // Allows Opus to send empty packets.
info.speech = (status > 0);
return info;
}
void AudioEncoderOpus::Reset() {
RTC_CHECK(RecreateEncoderInstance(config_));
}
@ -212,6 +182,47 @@ void AudioEncoderOpus::SetTargetBitrate(int bits_per_second) {
RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config_.bitrate_bps));
}
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) {
if (input_buffer_.empty())
first_timestamp_in_buffer_ = rtp_timestamp;
input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
if (input_buffer_.size() <
(Num10msFramesPerPacket() * SamplesPer10msFrame())) {
return EncodedInfo();
}
RTC_CHECK_EQ(input_buffer_.size(),
Num10msFramesPerPacket() * SamplesPer10msFrame());
const size_t max_encoded_bytes = MaxEncodedBytes();
EncodedInfo info;
info.encoded_bytes =
encoded->AppendData(
max_encoded_bytes, [&] (rtc::ArrayView<uint8_t> encoded) {
int status = WebRtcOpus_Encode(
inst_, &input_buffer_[0],
rtc::CheckedDivExact(input_buffer_.size(),
config_.num_channels),
rtc::saturated_cast<int16_t>(max_encoded_bytes),
encoded.data());
RTC_CHECK_GE(status, 0); // Fails only if fed invalid data.
return static_cast<size_t>(status);
});
input_buffer_.clear();
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = config_.payload_type;
info.send_even_if_empty = true; // Allows Opus to send empty packets.
info.speech = (info.encoded_bytes > 0);
return info;
}
size_t AudioEncoderOpus::Num10msFramesPerPacket() const {
return static_cast<size_t>(rtc::CheckedDivExact(config_.frame_size_ms, 10));
}

View File

@ -23,6 +23,8 @@ struct CodecInst;
class AudioEncoderOpus final : public AudioEncoder {
public:
using AudioEncoder::EncodeInternal;
enum ApplicationMode {
kVoip = 0,
kAudio = 1,
@ -61,11 +63,6 @@ class AudioEncoderOpus final : public AudioEncoder {
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
bool SetFec(bool enable) override;
@ -84,6 +81,11 @@ class AudioEncoderOpus final : public AudioEncoder {
ApplicationMode application() const { return config_.application; }
bool dtx_enabled() const { return config_.dtx_enabled; }
protected:
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) override;
private:
size_t Num10msFramesPerPacket() const;
size_t SamplesPer10msFrame() const;

View File

@ -19,6 +19,8 @@ struct CodecInst;
class AudioEncoderPcm16B final : public AudioEncoderPcm {
public:
using AudioEncoder::EncodeInternal;
struct Config : public AudioEncoderPcm::Config {
public:
Config() : AudioEncoderPcm::Config(107), sample_rate_hz(8000) {}

View File

@ -55,13 +55,14 @@ int AudioEncoderCopyRed::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
rtc::Buffer* encoded) {
const size_t primary_offset = encoded->size();
EncodedInfo info =
speech_encoder_->Encode(rtp_timestamp, audio, max_encoded_bytes, encoded);
RTC_CHECK_GE(max_encoded_bytes,
info.encoded_bytes + secondary_info_.encoded_bytes);
speech_encoder_->Encode(rtp_timestamp, audio, encoded);
RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
RTC_DCHECK_EQ(encoded->size() - primary_offset, info.encoded_bytes);
if (info.encoded_bytes > 0) {
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
@ -70,13 +71,13 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
info.redundant.push_back(info);
RTC_DCHECK_EQ(info.redundant.size(), 1u);
if (secondary_info_.encoded_bytes > 0) {
memcpy(&encoded[info.encoded_bytes], secondary_encoded_.data(),
secondary_info_.encoded_bytes);
encoded->AppendData(secondary_encoded_);
info.redundant.push_back(secondary_info_);
RTC_DCHECK_EQ(info.redundant.size(), 2u);
}
// Save primary to secondary.
secondary_encoded_.SetData(encoded, info.encoded_bytes);
secondary_encoded_.SetData(encoded->data() + primary_offset,
info.encoded_bytes);
secondary_info_ = info;
RTC_DCHECK_EQ(info.speech, info.redundant[0].speech);
}

View File

@ -24,6 +24,8 @@ namespace webrtc {
// into one packet.
class AudioEncoderCopyRed final : public AudioEncoder {
public:
using AudioEncoder::EncodeInternal;
struct Config {
public:
int payload_type;
@ -42,10 +44,6 @@ class AudioEncoderCopyRed final : public AudioEncoder {
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
bool SetFec(bool enable) override;
bool SetDtx(bool enable) override;
@ -54,6 +52,11 @@ class AudioEncoderCopyRed final : public AudioEncoder {
void SetProjectedPacketLossRate(double fraction) override;
void SetTargetBitrate(int target_bps) override;
protected:
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
rtc::ArrayView<const int16_t> audio,
rtc::Buffer* encoded) override;
private:
AudioEncoder* speech_encoder_;
int red_payload_type_;

View File

@ -47,7 +47,6 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
.WillRepeatedly(Return(sample_rate_hz_));
EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
.WillRepeatedly(Return(kMockMaxEncodedBytes));
encoded_.resize(red_->MaxEncodedBytes(), 0);
}
void TearDown() override {
@ -60,10 +59,11 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
void Encode() {
ASSERT_TRUE(red_.get() != NULL);
encoded_.Clear();
encoded_info_ = red_->Encode(
timestamp_,
rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms),
encoded_.size(), &encoded_[0]);
&encoded_);
timestamp_ += num_audio_samples_10ms;
}
@ -73,34 +73,11 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
int16_t audio_[kMaxNumSamples];
const int sample_rate_hz_;
size_t num_audio_samples_10ms;
std::vector<uint8_t> encoded_;
rtc::Buffer encoded_;
AudioEncoder::EncodedInfo encoded_info_;
const int red_payload_type_;
};
class MockEncodeHelper {
public:
MockEncodeHelper() : write_payload_(false), payload_(NULL) {
memset(&info_, 0, sizeof(info_));
}
AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (write_payload_) {
RTC_CHECK(encoded);
RTC_CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
memcpy(encoded, payload_, info_.encoded_bytes);
}
return info_;
}
AudioEncoder::EncodedInfo info_;
bool write_payload_;
uint8_t* payload_;
};
TEST_F(AudioEncoderCopyRedTest, CreateAndDestroy) {
}
@ -143,7 +120,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
InSequence s;
MockFunction<void(int check_point_id)> check;
for (int i = 1; i <= 6; ++i) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillRepeatedly(Return(AudioEncoder::EncodedInfo()));
EXPECT_CALL(check, Call(i));
Encode();
@ -154,12 +131,16 @@ TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
// Checks that no output is produced if the underlying codec doesn't emit any
// new data, even if the RED codec is loaded with a secondary encoding.
TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
// Start with one Encode() call that will produce output.
static const size_t kEncodedSize = 17;
AudioEncoder::EncodedInfo info;
info.encoded_bytes = kEncodedSize;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
{
InSequence s;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(kEncodedSize)))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(0)))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(kEncodedSize)));
}
// Start with one Encode() call that will produce output.
Encode();
// First call is a special case, since it does not include a secondary
// payload.
@ -167,16 +148,10 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes);
// Next call to the speech encoder will not produce any output.
info.encoded_bytes = 0;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
// Final call to the speech encoder will produce output.
info.encoded_bytes = kEncodedSize;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
Encode();
EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
ASSERT_EQ(2u, encoded_info_.redundant.size());
@ -190,10 +165,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
static const int kNumPackets = 10;
InSequence s;
for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
AudioEncoder::EncodedInfo info;
info.encoded_bytes = encode_size;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size)));
}
// First call is a special case, since it does not include a secondary
@ -213,13 +186,13 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
// Checks that the correct timestamps are returned.
TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
MockEncodeHelper helper;
helper.info_.encoded_bytes = 17;
helper.info_.encoded_timestamp = timestamp_;
uint32_t primary_timestamp = timestamp_;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
AudioEncoder::EncodedInfo info;
info.encoded_bytes = 17;
info.encoded_timestamp = timestamp_;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
// First call is a special case, since it does not include a secondary
// payload.
@ -228,7 +201,10 @@ TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
uint32_t secondary_timestamp = primary_timestamp;
primary_timestamp = timestamp_;
helper.info_.encoded_timestamp = timestamp_;
info.encoded_timestamp = timestamp_;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
Encode();
ASSERT_EQ(2u, encoded_info_.redundant.size());
EXPECT_EQ(primary_timestamp, encoded_info_.redundant[0].encoded_timestamp);
@ -240,30 +216,26 @@ TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
// Let the mock encoder write payloads with increasing values. The first
// payload will have values 0, 1, 2, ..., kPayloadLenBytes - 1.
MockEncodeHelper helper;
static const size_t kPayloadLenBytes = 5;
helper.info_.encoded_bytes = kPayloadLenBytes;
helper.write_payload_ = true;
uint8_t payload[kPayloadLenBytes];
for (uint8_t i = 0; i < kPayloadLenBytes; ++i) {
payload[i] = i;
}
helper.payload_ = payload;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillRepeatedly(Invoke(MockAudioEncoder::CopyEncoding(payload)));
// First call is a special case, since it does not include a secondary
// payload.
Encode();
EXPECT_EQ(kPayloadLenBytes, encoded_info_.encoded_bytes);
for (size_t i = 0; i < kPayloadLenBytes; ++i) {
EXPECT_EQ(i, encoded_[i]);
EXPECT_EQ(i, encoded_.data()[i]);
}
for (int j = 0; j < 5; ++j) {
// Increment all values of the payload by 10.
for (size_t i = 0; i < kPayloadLenBytes; ++i)
helper.payload_[i] += 10;
payload[i] += 10;
Encode();
ASSERT_EQ(2u, encoded_info_.redundant.size());
@ -271,9 +243,9 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
EXPECT_EQ(kPayloadLenBytes, encoded_info_.redundant[1].encoded_bytes);
for (size_t i = 0; i < kPayloadLenBytes; ++i) {
// Check primary payload.
EXPECT_EQ((j + 1) * 10 + i, encoded_[i]);
EXPECT_EQ((j + 1) * 10 + i, encoded_.data()[i]);
// Check secondary payload.
EXPECT_EQ(j * 10 + i, encoded_[i + kPayloadLenBytes]);
EXPECT_EQ(j * 10 + i, encoded_.data()[i + kPayloadLenBytes]);
}
}
}
@ -281,13 +253,12 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
// Checks correct propagation of payload type.
// Checks that the correct timestamps are returned.
TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
MockEncodeHelper helper;
helper.info_.encoded_bytes = 17;
const int primary_payload_type = red_payload_type_ + 1;
helper.info_.payload_type = primary_payload_type;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
AudioEncoder::EncodedInfo info;
info.encoded_bytes = 17;
info.payload_type = primary_payload_type;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
// First call is a special case, since it does not include a secondary
// payload.
@ -297,7 +268,10 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
EXPECT_EQ(red_payload_type_, encoded_info_.payload_type);
const int secondary_payload_type = red_payload_type_ + 2;
helper.info_.payload_type = secondary_payload_type;
info.payload_type = secondary_payload_type;
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _))
.WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
Encode();
ASSERT_EQ(2u, encoded_info_.redundant.size());
EXPECT_EQ(secondary_payload_type, encoded_info_.redundant[0].payload_type);

View File

@ -100,10 +100,8 @@ class AudioDecoderTest : public ::testing::Test {
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
32000),
codec_input_rate_hz_(32000), // Legacy default value.
encoded_(NULL),
frame_size_(0),
data_length_(0),
encoded_bytes_(0),
channels_(1),
payload_type_(17),
decoder_(NULL) {}
@ -115,8 +113,6 @@ class AudioDecoderTest : public ::testing::Test {
codec_input_rate_hz_ = audio_encoder_->SampleRateHz();
// Create arrays.
ASSERT_GT(data_length_, 0u) << "The test must set data_length_ > 0";
// Longest encoded data is produced by PCM16b with 2 bytes per sample.
encoded_ = new uint8_t[data_length_ * 2];
// Logging to view input and output in Matlab.
// Use 'gyp -Denable_data_logging=1' to enable logging.
DataLog::CreateLog();
@ -128,9 +124,6 @@ class AudioDecoderTest : public ::testing::Test {
virtual void TearDown() {
delete decoder_;
decoder_ = NULL;
// Delete arrays.
delete [] encoded_;
encoded_ = NULL;
// Close log.
DataLog::ReturnLog();
}
@ -141,15 +134,15 @@ class AudioDecoderTest : public ::testing::Test {
// implementations are gone.
virtual int EncodeFrame(const int16_t* input,
size_t input_len_samples,
uint8_t* output) {
encoded_info_.encoded_bytes = 0;
rtc::Buffer* output) {
AudioEncoder::EncodedInfo encoded_info;
const size_t samples_per_10ms = audio_encoder_->SampleRateHz() / 100;
RTC_CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
input_len_samples);
std::unique_ptr<int16_t[]> interleaved_input(
new int16_t[channels_ * samples_per_10ms]);
for (size_t i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
EXPECT_EQ(0u, encoded_info.encoded_bytes);
// Duplicate the mono input signal to however many channels the test
// wants.
@ -157,15 +150,15 @@ class AudioDecoderTest : public ::testing::Test {
samples_per_10ms, channels_,
interleaved_input.get());
encoded_info_ = audio_encoder_->Encode(
encoded_info = audio_encoder_->Encode(
0, rtc::ArrayView<const int16_t>(interleaved_input.get(),
audio_encoder_->NumChannels() *
audio_encoder_->SampleRateHz() /
100),
data_length_ * 2, output);
output);
}
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
return static_cast<int>(encoded_info_.encoded_bytes);
EXPECT_EQ(payload_type_, encoded_info.payload_type);
return static_cast<int>(encoded_info.encoded_bytes);
}
// Encodes and decodes audio. The absolute difference between the input and
@ -179,7 +172,8 @@ class AudioDecoderTest : public ::testing::Test {
ASSERT_GE(channel_diff_tolerance, 0) <<
"Test must define a channel_diff_tolerance >= 0";
size_t processed_samples = 0u;
encoded_bytes_ = 0u;
rtc::Buffer encoded;
size_t encoded_bytes = 0u;
InitEncoder();
std::vector<int16_t> input;
std::vector<int16_t> decoded;
@ -191,23 +185,23 @@ class AudioDecoderTest : public ::testing::Test {
ASSERT_TRUE(input_audio_.Read(
frame_size_, codec_input_rate_hz_, &input[processed_samples]));
size_t enc_len = EncodeFrame(
&input[processed_samples], frame_size_, &encoded_[encoded_bytes_]);
&input[processed_samples], frame_size_, &encoded);
// Make sure that frame_size_ * channels_ samples are allocated and free.
decoded.resize((processed_samples + frame_size_) * channels_, 0);
AudioDecoder::SpeechType speech_type;
size_t dec_len = decoder_->Decode(
&encoded_[encoded_bytes_], enc_len, codec_input_rate_hz_,
&encoded.data()[encoded_bytes], enc_len, codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
&decoded[processed_samples * channels_], &speech_type);
EXPECT_EQ(frame_size_ * channels_, dec_len);
encoded_bytes_ += enc_len;
encoded_bytes += enc_len;
processed_samples += frame_size_;
}
// For some codecs it doesn't make sense to check expected number of bytes,
// since the number can vary for different platforms. Opus and iSAC are
// such codecs. In this case expected_bytes is set to 0.
if (expected_bytes) {
EXPECT_EQ(expected_bytes, encoded_bytes_);
EXPECT_EQ(expected_bytes, encoded_bytes);
}
CompareInputOutput(
input, decoded, processed_samples, channels_, tolerance, delay);
@ -226,12 +220,13 @@ class AudioDecoderTest : public ::testing::Test {
std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
ASSERT_TRUE(
input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
size_t enc_len = EncodeFrame(input.get(), frame_size_, encoded_);
rtc::Buffer encoded;
size_t enc_len = EncodeFrame(input.get(), frame_size_, &encoded);
size_t dec_len;
AudioDecoder::SpeechType speech_type1, speech_type2;
decoder_->Reset();
std::unique_ptr<int16_t[]> output1(new int16_t[frame_size_ * channels_]);
dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
dec_len = decoder_->Decode(encoded.data(), enc_len, codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
output1.get(), &speech_type1);
ASSERT_LE(dec_len, frame_size_ * channels_);
@ -239,7 +234,7 @@ class AudioDecoderTest : public ::testing::Test {
// Re-init decoder and decode again.
decoder_->Reset();
std::unique_ptr<int16_t[]> output2(new int16_t[frame_size_ * channels_]);
dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
dec_len = decoder_->Decode(encoded.data(), enc_len, codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
output2.get(), &speech_type2);
ASSERT_LE(dec_len, frame_size_ * channels_);
@ -256,11 +251,13 @@ class AudioDecoderTest : public ::testing::Test {
std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
ASSERT_TRUE(
input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
size_t enc_len = EncodeFrame(input.get(), frame_size_, encoded_);
rtc::Buffer encoded;
size_t enc_len = EncodeFrame(input.get(), frame_size_, &encoded);
AudioDecoder::SpeechType speech_type;
decoder_->Reset();
std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
size_t dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
output.get(), &speech_type);
EXPECT_EQ(frame_size_ * channels_, dec_len);
@ -273,13 +270,10 @@ class AudioDecoderTest : public ::testing::Test {
test::ResampleInputAudioFile input_audio_;
int codec_input_rate_hz_;
uint8_t* encoded_;
size_t frame_size_;
size_t data_length_;
size_t encoded_bytes_;
size_t channels_;
const int payload_type_;
AudioEncoder::EncodedInfo encoded_info_;
AudioDecoder* decoder_;
std::unique_ptr<AudioEncoder> audio_encoder_;
};
@ -348,11 +342,13 @@ class AudioDecoderIlbcTest : public AudioDecoderTest {
std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
ASSERT_TRUE(
input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
size_t enc_len = EncodeFrame(input.get(), frame_size_, encoded_);
rtc::Buffer encoded;
size_t enc_len = EncodeFrame(input.get(), frame_size_, &encoded);
AudioDecoder::SpeechType speech_type;
decoder_->Reset();
std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
size_t dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
output.get(), &speech_type);
EXPECT_EQ(frame_size_, dec_len);

View File

@ -60,8 +60,7 @@ class NetEqIlbcQualityTest : public NetEqQualityTest {
int EncodeBlock(int16_t* in_data,
size_t block_size_samples,
uint8_t* payload,
size_t max_bytes) override {
rtc::Buffer* payload, size_t max_bytes) override {
const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
@ -70,7 +69,7 @@ class NetEqIlbcQualityTest : public NetEqQualityTest {
info = encoder_->Encode(dummy_timestamp,
rtc::ArrayView<const int16_t>(
in_data + encoded_samples, kFrameSizeSamples),
max_bytes, payload);
payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);

View File

@ -44,7 +44,7 @@ class NetEqIsacQualityTest : public NetEqQualityTest {
void SetUp() override;
void TearDown() override;
virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
uint8_t* payload, size_t max_bytes);
rtc::Buffer* payload, size_t max_bytes);
private:
ISACFIX_MainStruct* isac_encoder_;
int bit_rate_kbps_;
@ -78,7 +78,7 @@ void NetEqIsacQualityTest::TearDown() {
int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
size_t block_size_samples,
uint8_t* payload, size_t max_bytes) {
rtc::Buffer* payload, size_t max_bytes) {
// ISAC takes 10 ms for every call.
const int subblocks = kIsacBlockDurationMs / 10;
const int subblock_length = 10 * kIsacInputSamplingKhz;
@ -89,7 +89,11 @@ int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
// The Isac encoder does not perform encoding (and returns 0) until it
// receives a sequence of sub-blocks that amount to the frame duration.
EXPECT_EQ(0, value);
value = WebRtcIsacfix_Encode(isac_encoder_, &in_data[pointer], payload);
payload->AppendData(max_bytes, [&] (rtc::ArrayView<uint8_t> payload) {
value = WebRtcIsacfix_Encode(isac_encoder_, &in_data[pointer],
payload.data());
return (value >= 0) ? static_cast<size_t>(value) : 0;
});
}
EXPECT_GT(value, 0);
return value;

View File

@ -104,7 +104,7 @@ class NetEqOpusQualityTest : public NetEqQualityTest {
void SetUp() override;
void TearDown() override;
virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
uint8_t* payload, size_t max_bytes);
rtc::Buffer* payload, size_t max_bytes);
private:
WebRtcOpusEncInst* opus_encoder_;
OpusRepacketizer* repacketizer_;
@ -175,25 +175,33 @@ void NetEqOpusQualityTest::TearDown() {
int NetEqOpusQualityTest::EncodeBlock(int16_t* in_data,
size_t block_size_samples,
uint8_t* payload, size_t max_bytes) {
rtc::Buffer* payload, size_t max_bytes) {
EXPECT_EQ(block_size_samples, sub_block_size_samples_ * sub_packets_);
int16_t* pointer = in_data;
int value;
opus_repacketizer_init(repacketizer_);
for (int idx = 0; idx < sub_packets_; idx++) {
value = WebRtcOpus_Encode(opus_encoder_, pointer, sub_block_size_samples_,
max_bytes, payload);
Log() << "Encoded a frame with Opus mode "
<< (value == 0 ? 0 : payload[0] >> 3)
<< std::endl;
if (OPUS_OK != opus_repacketizer_cat(repacketizer_, payload, value)) {
payload->AppendData(max_bytes, [&] (rtc::ArrayView<uint8_t> payload) {
value = WebRtcOpus_Encode(opus_encoder_,
pointer, sub_block_size_samples_,
max_bytes, payload.data());
Log() << "Encoded a frame with Opus mode "
<< (value == 0 ? 0 : payload[0] >> 3)
<< std::endl;
return (value >= 0) ? static_cast<size_t>(value) : 0;
});
if (OPUS_OK != opus_repacketizer_cat(repacketizer_,
payload->data(), value)) {
opus_repacketizer_init(repacketizer_);
// If the repacketization fails, we discard this frame.
return 0;
}
pointer += sub_block_size_samples_ * channels_;
}
value = opus_repacketizer_out(repacketizer_, payload,
value = opus_repacketizer_out(repacketizer_, payload->data(),
static_cast<opus_int32>(max_bytes));
EXPECT_GE(value, 0);
return value;

View File

@ -60,8 +60,7 @@ class NetEqPcmuQualityTest : public NetEqQualityTest {
int EncodeBlock(int16_t* in_data,
size_t block_size_samples,
uint8_t* payload,
size_t max_bytes) override {
rtc::Buffer* payload, size_t max_bytes) override {
const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
@ -70,7 +69,7 @@ class NetEqPcmuQualityTest : public NetEqQualityTest {
info = encoder_->Encode(dummy_timestamp,
rtc::ArrayView<const int16_t>(
in_data + encoded_samples, kFrameSizeSamples),
max_bytes, payload);
payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);

View File

@ -249,7 +249,6 @@ NetEqQualityTest::NetEqQualityTest(int block_duration_ms,
neteq_.reset(NetEq::Create(config));
max_payload_bytes_ = in_size_samples_ * channels_ * sizeof(int16_t);
in_data_.reset(new int16_t[in_size_samples_ * channels_]);
payload_.reset(new uint8_t[max_payload_bytes_]);
out_data_.reset(new int16_t[out_size_samples_ * channels_]);
}
@ -380,7 +379,7 @@ int NetEqQualityTest::Transmit() {
if (!PacketLost()) {
int ret = neteq_->InsertPacket(
rtp_header_,
rtc::ArrayView<const uint8_t>(payload_.get(), payload_size_bytes_),
rtc::ArrayView<const uint8_t>(payload_.data(), payload_size_bytes_),
packet_input_time_ms * in_sampling_khz_);
if (ret != NetEq::kOK)
return -1;
@ -416,8 +415,9 @@ void NetEqQualityTest::Simulate() {
// Assume 10 packets in packets buffer.
while (decodable_time_ms_ - 10 * block_duration_ms_ < decoded_time_ms_) {
ASSERT_TRUE(in_file_->Read(in_size_samples_ * channels_, &in_data_[0]));
payload_.Clear();
payload_size_bytes_ = EncodeBlock(&in_data_[0],
in_size_samples_, &payload_[0],
in_size_samples_, &payload_,
max_payload_bytes_);
total_payload_size_bytes_ += payload_size_bytes_;
decodable_time_ms_ = Transmit() + block_duration_ms_;

View File

@ -77,7 +77,7 @@ class NetEqQualityTest : public ::testing::Test {
// 2. save the bit stream to |payload| of |max_bytes| bytes in size,
// 3. returns the length of the payload (in bytes),
virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
uint8_t* payload, size_t max_bytes) = 0;
rtc::Buffer* payload, size_t max_bytes) = 0;
// PacketLost(...) determines weather a packet sent at an indicated time gets
// lost or not.
@ -128,7 +128,7 @@ class NetEqQualityTest : public ::testing::Test {
std::unique_ptr<LossModel> loss_model_;
std::unique_ptr<int16_t[]> in_data_;
std::unique_ptr<uint8_t[]> payload_;
rtc::Buffer payload_;
std::unique_ptr<int16_t[]> out_data_;
WebRtcRTPHeader rtp_header_;

View File

@ -167,6 +167,7 @@
'<(webrtc_root)/tools/tools.gyp:agc_test_utils',
],
'sources': [
'audio_coding/codecs/audio_encoder_unittest.cc',
'audio_coding/codecs/cng/audio_encoder_cng_unittest.cc',
'audio_coding/acm2/acm_receiver_unittest_oldapi.cc',
'audio_coding/acm2/audio_coding_module_unittest_oldapi.cc',
@ -185,6 +186,7 @@
'audio_coding/codecs/opus/audio_encoder_opus_unittest.cc',
'audio_coding/codecs/opus/opus_unittest.cc',
'audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc',
'audio_coding/codecs/mock/mock_audio_encoder.cc',
'audio_coding/neteq/audio_classifier_unittest.cc',
'audio_coding/neteq/audio_multi_vector_unittest.cc',
'audio_coding/neteq/audio_vector_unittest.cc',