diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/acm2/acm_receiver.cc index 1990768bc7..5649f07b2b 100644 --- a/webrtc/modules/audio_coding/acm2/acm_receiver.cc +++ b/webrtc/modules/audio_coding/acm2/acm_receiver.cc @@ -137,8 +137,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) { // Accessing members, take the lock. rtc::CritScope lock(&crit_sect_); - enum NetEqOutputType type; - if (neteq_->GetAudio(audio_frame, &type) != NetEq::kOK) { + if (neteq_->GetAudio(audio_frame) != NetEq::kOK) { LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed."; return -1; } diff --git a/webrtc/modules/audio_coding/neteq/include/neteq.h b/webrtc/modules/audio_coding/neteq/include/neteq.h index dff09db3db..d53551f897 100644 --- a/webrtc/modules/audio_coding/neteq/include/neteq.h +++ b/webrtc/modules/audio_coding/neteq/include/neteq.h @@ -54,14 +54,6 @@ struct NetEqNetworkStatistics { int max_waiting_time_ms; }; -enum NetEqOutputType { - kOutputNormal, - kOutputPLC, - kOutputCNG, - kOutputPLCtoCNG, - kOutputVADPassive -}; - enum NetEqPlayoutMode { kPlayoutOn, kPlayoutOff, @@ -165,11 +157,11 @@ class NetEq { // Instructs NetEq to deliver 10 ms of audio data. The data is written to // |audio_frame|. All data in |audio_frame| is wiped; |data_|, |interleaved_|, - // |num_channels_|, and |samples_per_channel_| are updated upon success. If - // an error is returned, some fields may not have been updated. - // The speech type is written to |type|, if |type| is not NULL. + // |num_channels_|, |samples_per_channel_|, |speech_type_|, and + // |vad_activity_| are updated upon success. If an error is returned, some + // fields may not have been updated. // Returns kOK on success, or kFail in case of an error. - virtual int GetAudio(AudioFrame* audio_frame, NetEqOutputType* type) = 0; + virtual int GetAudio(AudioFrame* audio_frame) = 0; // Associates |rtp_payload_type| with |codec| and |codec_name|, and stores the // information in the codec database. Returns 0 on success, -1 on failure. diff --git a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc index 7712b244bd..50c24a3b73 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc +++ b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc @@ -188,16 +188,14 @@ class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest, } void GetAndVerifyOutput() override { - NetEqOutputType output_type; // Get audio from internal decoder instance. - EXPECT_EQ(NetEq::kOK, - neteq_internal_->GetAudio(&output_internal_, &output_type)); + EXPECT_EQ(NetEq::kOK, neteq_internal_->GetAudio(&output_internal_)); EXPECT_EQ(1u, output_internal_.num_channels_); EXPECT_EQ(static_cast(kOutputLengthMs * sample_rate_hz_ / 1000), output_internal_.samples_per_channel_); // Get audio from external decoder instance. - GetOutputAudio(&output_, &output_type); + GetOutputAudio(&output_); for (size_t i = 0; i < output_.samples_per_channel_; ++i) { ASSERT_EQ(output_.data_[i], output_internal_.data_[i]) @@ -251,30 +249,30 @@ class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest, .WillRepeatedly(Return(false)); } - virtual void UpdateState(NetEqOutputType output_type) { + virtual void UpdateState(AudioFrame::SpeechType output_type) { switch (test_state_) { case kInitialPhase: { - if (output_type == kOutputNormal) { + if (output_type == AudioFrame::kNormalSpeech) { test_state_ = kNormalPhase; } break; } case kNormalPhase: { - if (output_type == kOutputPLC) { + if (output_type == AudioFrame::kPLC) { test_state_ = kExpandPhase; } break; } case kExpandPhase: { - if (output_type == kOutputPLCtoCNG) { + if (output_type == AudioFrame::kPLCCNG) { test_state_ = kFadedExpandPhase; - } else if (output_type == kOutputNormal) { + } else if (output_type == AudioFrame::kNormalSpeech) { test_state_ = kRecovered; } break; } case kFadedExpandPhase: { - if (output_type == kOutputNormal) { + if (output_type == AudioFrame::kNormalSpeech) { test_state_ = kRecovered; } break; @@ -287,9 +285,8 @@ class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest, void GetAndVerifyOutput() override { AudioFrame output; - NetEqOutputType output_type; - GetOutputAudio(&output, &output_type); - UpdateState(output_type); + GetOutputAudio(&output); + UpdateState(output.speech_type_); if (test_state_ == kExpandPhase || test_state_ == kFadedExpandPhase) { // Don't verify the output in this phase of the test. @@ -369,22 +366,22 @@ TEST_F(LargeTimestampJumpTest, JumpLongerThanHalfRangeAndWrap) { class ShortTimestampJumpTest : public LargeTimestampJumpTest { protected: - void UpdateState(NetEqOutputType output_type) override { + void UpdateState(AudioFrame::SpeechType output_type) override { switch (test_state_) { case kInitialPhase: { - if (output_type == kOutputNormal) { + if (output_type == AudioFrame::kNormalSpeech) { test_state_ = kNormalPhase; } break; } case kNormalPhase: { - if (output_type == kOutputPLC) { + if (output_type == AudioFrame::kPLC) { test_state_ = kExpandPhase; } break; } case kExpandPhase: { - if (output_type == kOutputNormal) { + if (output_type == AudioFrame::kNormalSpeech) { test_state_ = kRecovered; } break; diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc index fc74f2de8b..b4cc915951 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc +++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc @@ -150,33 +150,33 @@ int NetEqImpl::InsertSyncPacket(const WebRtcRTPHeader& rtp_header, namespace { void SetAudioFrameActivityAndType(bool vad_enabled, - NetEqOutputType type, + NetEqImpl::OutputType type, AudioFrame::VADActivity last_vad_activity, AudioFrame* audio_frame) { switch (type) { - case kOutputNormal: { + case NetEqImpl::OutputType::kNormalSpeech: { audio_frame->speech_type_ = AudioFrame::kNormalSpeech; audio_frame->vad_activity_ = AudioFrame::kVadActive; break; } - case kOutputVADPassive: { + case NetEqImpl::OutputType::kVadPassive: { // This should only be reached if the VAD is enabled. RTC_DCHECK(vad_enabled); audio_frame->speech_type_ = AudioFrame::kNormalSpeech; audio_frame->vad_activity_ = AudioFrame::kVadPassive; break; } - case kOutputCNG: { + case NetEqImpl::OutputType::kCNG: { audio_frame->speech_type_ = AudioFrame::kCNG; audio_frame->vad_activity_ = AudioFrame::kVadPassive; break; } - case kOutputPLC: { + case NetEqImpl::OutputType::kPLC: { audio_frame->speech_type_ = AudioFrame::kPLC; audio_frame->vad_activity_ = last_vad_activity; break; } - case kOutputPLCtoCNG: { + case NetEqImpl::OutputType::kPLCCNG: { audio_frame->speech_type_ = AudioFrame::kPLCCNG; audio_frame->vad_activity_ = AudioFrame::kVadPassive; break; @@ -191,7 +191,7 @@ void SetAudioFrameActivityAndType(bool vad_enabled, } } -int NetEqImpl::GetAudio(AudioFrame* audio_frame, NetEqOutputType* type) { +int NetEqImpl::GetAudio(AudioFrame* audio_frame) { TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio"); rtc::CritScope lock(&crit_sect_); int error = GetAudioInternal(audio_frame); @@ -202,9 +202,6 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame, NetEqOutputType* type) { error_code_ = error; return kFail; } - if (type) { - *type = LastOutputType(); - } SetAudioFrameActivityAndType(vad_->enabled(), LastOutputType(), last_vad_activity_, audio_frame); last_vad_activity_ = audio_frame->vad_activity_; @@ -2068,20 +2065,20 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) { decision_logic_->SetSampleRate(fs_hz_, output_size_samples_); } -NetEqOutputType NetEqImpl::LastOutputType() { +NetEqImpl::OutputType NetEqImpl::LastOutputType() { assert(vad_.get()); assert(expand_.get()); if (last_mode_ == kModeCodecInternalCng || last_mode_ == kModeRfc3389Cng) { - return kOutputCNG; + return OutputType::kCNG; } else if (last_mode_ == kModeExpand && expand_->MuteFactor(0) == 0) { // Expand mode has faded down to background noise only (very long expand). - return kOutputPLCtoCNG; + return OutputType::kPLCCNG; } else if (last_mode_ == kModeExpand) { - return kOutputPLC; + return OutputType::kPLC; } else if (vad_->running() && !vad_->active_speech()) { - return kOutputVADPassive; + return OutputType::kVadPassive; } else { - return kOutputNormal; + return OutputType::kNormalSpeech; } } diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.h b/webrtc/modules/audio_coding/neteq/neteq_impl.h index 12cb6f45ae..514fdaad39 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_impl.h +++ b/webrtc/modules/audio_coding/neteq/neteq_impl.h @@ -57,6 +57,14 @@ struct PreemptiveExpandFactory; class NetEqImpl : public webrtc::NetEq { public: + enum class OutputType { + kNormalSpeech, + kPLC, + kCNG, + kPLCCNG, + kVadPassive + }; + // Creates a new NetEqImpl object. The object will assume ownership of all // injected dependencies, and will delete them when done. NetEqImpl(const NetEq::Config& config, @@ -96,7 +104,7 @@ class NetEqImpl : public webrtc::NetEq { int InsertSyncPacket(const WebRtcRTPHeader& rtp_header, uint32_t receive_timestamp) override; - int GetAudio(AudioFrame* audio_frame, NetEqOutputType* type) override; + int GetAudio(AudioFrame* audio_frame) override; int RegisterPayloadType(NetEqDecoder codec, const std::string& codec_name, @@ -310,7 +318,7 @@ class NetEqImpl : public webrtc::NetEq { // Returns the output type for the audio produced by the latest call to // GetAudio(). - NetEqOutputType LastOutputType() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + OutputType LastOutputType() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); // Updates Expand and Merge. virtual void UpdatePlcComponents(int fs_hz, size_t channels) diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc index cb4405d16d..e1eb403022 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc +++ b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc @@ -466,11 +466,10 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) { // Pull audio once. const size_t kMaxOutputSize = static_cast(10 * kSampleRateHz / 1000); AudioFrame output; - NetEqOutputType type; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); // Start with a simple check that the fake decoder is behaving as expected. EXPECT_EQ(kPayloadLengthSamples, @@ -542,11 +541,10 @@ TEST_F(NetEqImplTest, ReorderedPacket) { // Pull audio once. const size_t kMaxOutputSize = static_cast(10 * kSampleRateHz / 1000); AudioFrame output; - NetEqOutputType type; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); // Insert two more packets. The first one is out of order, and is already too // old, the second one is the expected next packet. @@ -571,10 +569,10 @@ TEST_F(NetEqImplTest, ReorderedPacket) { Return(kPayloadLengthSamples))); // Pull audio once. - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); // Now check the packet buffer, and make sure it is empty, since the // out-of-order packet should have been discarded. @@ -611,12 +609,11 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) { // Pull audio once. const size_t kMaxOutputSize = static_cast(10 * kSampleRateHz / 1000); AudioFrame output; - NetEqOutputType type; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); ASSERT_LE(output.samples_per_channel_, kMaxOutputSize); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputPLC, type); + EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); // Register the payload type. EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType( @@ -633,11 +630,11 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) { // Pull audio repeatedly and make sure we get normal output, that is not PLC. for (size_t i = 0; i < 3; ++i) { - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); ASSERT_LE(output.samples_per_channel_, kMaxOutputSize); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputNormal, type) + EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_) << "NetEq did not decode the packets as expected."; } } @@ -719,12 +716,11 @@ TEST_F(NetEqImplTest, CodecInternalCng) { AudioFrame output; uint32_t timestamp; uint32_t last_timestamp; - NetEqOutputType type; - NetEqOutputType expected_type[8] = { - kOutputNormal, kOutputNormal, - kOutputCNG, kOutputCNG, - kOutputCNG, kOutputCNG, - kOutputNormal, kOutputNormal + AudioFrame::SpeechType expected_type[8] = { + AudioFrame::kNormalSpeech, AudioFrame::kNormalSpeech, + AudioFrame::kCNG, AudioFrame::kCNG, + AudioFrame::kCNG, AudioFrame::kCNG, + AudioFrame::kNormalSpeech, AudioFrame::kNormalSpeech }; int expected_timestamp_increment[8] = { -1, // will not be used. @@ -734,15 +730,15 @@ TEST_F(NetEqImplTest, CodecInternalCng) { 50 * kSampleRateKhz, 10 * kSampleRateKhz }; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_TRUE(neteq_->GetPlayoutTimestamp(&last_timestamp)); for (size_t i = 1; i < 6; ++i) { ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(expected_type[i - 1], type); + EXPECT_EQ(expected_type[i - 1], output.speech_type_); EXPECT_TRUE(neteq_->GetPlayoutTimestamp(×tamp)); - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_TRUE(neteq_->GetPlayoutTimestamp(×tamp)); EXPECT_EQ(timestamp, last_timestamp + expected_timestamp_increment[i]); last_timestamp = timestamp; @@ -758,8 +754,8 @@ TEST_F(NetEqImplTest, CodecInternalCng) { for (size_t i = 6; i < 8; ++i) { ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(expected_type[i - 1], type); - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(expected_type[i - 1], output.speech_type_); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_TRUE(neteq_->GetPlayoutTimestamp(×tamp)); EXPECT_EQ(timestamp, last_timestamp + expected_timestamp_increment[i]); last_timestamp = timestamp; @@ -848,10 +844,9 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) { neteq_->InsertPacket(rtp_header, payload, kReceiveTime)); AudioFrame output; - NetEqOutputType type; // First call to GetAudio will try to decode the "faulty" packet. // Expect kFail return value... - EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output)); // ... and kOtherDecoderError error code. EXPECT_EQ(NetEq::kOtherDecoderError, neteq_->LastError()); // Output size and number of channels should be correct. @@ -861,7 +856,7 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) { // Second call to GetAudio will decode the packet that is ok. No errors are // expected. - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_EQ(kExpectedOutputSize, output.samples_per_channel_ * kChannels); EXPECT_EQ(kChannels, output.num_channels_); } @@ -954,11 +949,10 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) { // Pull audio once. const size_t kMaxOutputSize = static_cast(10 * kSampleRateHz / 1000); AudioFrame output; - NetEqOutputType type; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); EXPECT_CALL(mock_decoder, Die()); } @@ -1047,33 +1041,31 @@ TEST_F(NetEqImplTest, DecodingError) { // Pull audio. const size_t kMaxOutputSize = static_cast(10 * kSampleRateHz / 1000); AudioFrame output; - NetEqOutputType type; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); // Pull audio again. Decoder fails. - EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output)); EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError()); EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError()); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - // TODO(minyue): should NetEq better give kOutputPLC, since it is actually an - // expansion. - EXPECT_EQ(kOutputNormal, type); + // We are not expecting anything for output.speech_type_, since an error was + // returned. // Pull audio again, should continue an expansion. - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputPLC, type); + EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); // Pull audio again, should behave normal. - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); EXPECT_CALL(mock_decoder, Die()); } @@ -1158,27 +1150,25 @@ TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) { // Pull audio. const size_t kMaxOutputSize = static_cast(10 * kSampleRateHz / 1000); AudioFrame output; - NetEqOutputType type; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, output.speech_type_); // Pull audio again. Decoder fails. - EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output)); EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError()); EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError()); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - // TODO(minyue): should NetEq better give kOutputPLC, since it is actually an - // expansion. - EXPECT_EQ(kOutputCNG, type); + // We are not expecting anything for output.speech_type_, since an error was + // returned. // Pull audio again, should resume codec CNG. - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output)); EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_); EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, output.speech_type_); EXPECT_CALL(mock_decoder, Die()); } diff --git a/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc index 91ae4d0f3d..770ebd5783 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc +++ b/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc @@ -177,7 +177,6 @@ struct NetEqNetworkStatsCheck { } void RunTest(int num_loops, NetEqNetworkStatsCheck expects) { - NetEqOutputType output_type; uint32_t time_now; uint32_t next_send_time; @@ -195,7 +194,7 @@ struct NetEqNetworkStatsCheck { InsertPacket(rtp_header_, payload_, next_send_time); } } - GetOutputAudio(&output_frame_, &output_type); + GetOutputAudio(&output_frame_); time_now += kOutputLengthMs; } CheckNetworkStatistics(expects); diff --git a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc index 5beeeea909..4ee17d2a44 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc +++ b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc @@ -211,14 +211,12 @@ class NetEqStereoTest : public ::testing::TestWithParam { next_arrival_time = GetArrivalTime(next_send_time); } while (Lost()); // If lost, immediately read the next packet. } - NetEqOutputType output_type; // Get audio from mono instance. - EXPECT_EQ(NetEq::kOK, neteq_mono_->GetAudio(&output_, &output_type)); + EXPECT_EQ(NetEq::kOK, neteq_mono_->GetAudio(&output_)); EXPECT_EQ(1u, output_.num_channels_); EXPECT_EQ(output_size_samples_, output_.samples_per_channel_); // Get audio from multi-channel instance. - ASSERT_EQ(NetEq::kOK, - neteq_->GetAudio(&output_multi_channel_, &output_type)); + ASSERT_EQ(NetEq::kOK, neteq_->GetAudio(&output_multi_channel_)); EXPECT_EQ(num_channels_, output_multi_channel_.num_channels_); EXPECT_EQ(output_size_samples_, output_multi_channel_.samples_per_channel_); diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc index 8d401a257a..340cf581b6 100644 --- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc +++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc @@ -426,8 +426,7 @@ void NetEqDecodingTest::Process() { } // Get audio from NetEq. - NetEqOutputType type; - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) || (out_frame_.samples_per_channel_ == kBlockSize16kHz) || (out_frame_.samples_per_channel_ == kBlockSize32kHz) || @@ -611,8 +610,7 @@ TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) { } // Pull out all data. for (size_t i = 0; i < num_frames; ++i) { - NetEqOutputType type; - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } @@ -653,8 +651,7 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) { } // Pull out data once. - NetEqOutputType type; - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } @@ -681,8 +678,7 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) { } // Pull out data once. - NetEqOutputType type; - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } @@ -703,7 +699,6 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, const size_t kPayloadBytes = kSamples * 2; double next_input_time_ms = 0.0; double t_ms; - NetEqOutputType type; // Insert speech for 5 seconds. const int kSpeechDurationMs = 5000; @@ -720,11 +715,11 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, next_input_time_ms += static_cast(kFrameSizeMs) * drift_factor; } // Pull out data once. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); int32_t delay_before = timestamp - PlayoutTimestamp(); // Insert CNG for 1 minute (= 60000 ms). @@ -747,11 +742,11 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, next_input_time_ms += static_cast(kCngPeriodMs) * drift_factor; } // Pull out data once. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); if (network_freeze_ms > 0) { // First keep pulling audio for |network_freeze_ms| without inserting @@ -760,9 +755,9 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, const double loop_end_time = t_ms + network_freeze_ms; for (; t_ms < loop_end_time; t_ms += 10) { // Pull out data once. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); } bool pull_once = pull_audio_during_freeze; // If |pull_once| is true, GetAudio will be called once half-way through @@ -772,9 +767,9 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, if (pull_once && next_input_time_ms >= pull_time_ms) { pull_once = false; // Pull out data once. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); t_ms += 10; } // Insert one CNG frame each 100 ms. @@ -793,7 +788,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, // Insert speech again until output type is speech. double speech_restart_time_ms = t_ms; - while (type != kOutputNormal) { + while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) { // Each turn in this for loop is 10 ms. while (next_input_time_ms <= t_ms) { // Insert one 30 ms speech frame. @@ -806,7 +801,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, next_input_time_ms += kFrameSizeMs * drift_factor; } // Pull out data once. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); // Increase clock. t_ms += 10; @@ -927,13 +922,12 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { PopulateRtpInfo(0, 0, &rtp_info); rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid. EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); - NetEqOutputType type; // Set all of |out_data_| to 1, and verify that it was set to 0 by the call // to GetAudio. for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { out_frame_.data_[i] = 1; } - EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &type)); + EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_)); // Verify that there is a decoder error to check. EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError()); @@ -965,13 +959,12 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { } TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) { - NetEqOutputType type; // Set all of |out_data_| to 1, and verify that it was set to 0 by the call // to GetAudio. for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { out_frame_.data_[i] = 1; } - EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + EXPECT_EQ(0, neteq_->GetAudio(&out_frame_)); // Verify that the first block of samples is set to 0. static const int kExpectedOutputLength = kInitSampleRateHz / 100; // 10 ms at initial sample rate. @@ -1006,7 +999,6 @@ class NetEqBgnTest : public NetEqDecodingTest { ASSERT_TRUE(false); // Unsupported test case. } - NetEqOutputType type; AudioFrame output; test::AudioLoop input; // We are using the same 32 kHz input file for all tests, regardless of @@ -1035,10 +1027,10 @@ class NetEqBgnTest : public NetEqDecodingTest { payload, enc_len_bytes), receive_timestamp)); output.Reset(); - ASSERT_EQ(0, neteq_->GetAudio(&output, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(1u, output.num_channels_); ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); - ASSERT_EQ(kOutputNormal, type); + ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); // Next packet. rtp_info.header.timestamp += expected_samples_per_channel; @@ -1051,7 +1043,7 @@ class NetEqBgnTest : public NetEqDecodingTest { // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull // one frame without checking speech-type. This is the first frame pulled // without inserting any packet, and might not be labeled as PLC. - ASSERT_EQ(0, neteq_->GetAudio(&output, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(1u, output.num_channels_); ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); @@ -1066,10 +1058,10 @@ class NetEqBgnTest : public NetEqDecodingTest { for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) { output.Reset(); memset(output.data_, 1, sizeof(output.data_)); // Set to non-zero. - ASSERT_EQ(0, neteq_->GetAudio(&output, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(1u, output.num_channels_); ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); - if (type == kOutputPLCtoCNG) { + if (output.speech_type_ == AudioFrame::kPLCCNG) { plc_to_cng = true; double sum_squared = 0; for (size_t k = 0; @@ -1077,7 +1069,7 @@ class NetEqBgnTest : public NetEqDecodingTest { sum_squared += output.data_[k] * output.data_[k]; TestCondition(sum_squared, n > kFadingThreshold); } else { - EXPECT_EQ(kOutputPLC, type); + EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); } } EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred. @@ -1239,11 +1231,10 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) { } // Insert some packets which decode to noise. We are not interested in // actual decoded values. - NetEqOutputType output_type; uint32_t receive_timestamp = 0; for (int n = 0; n < 100; ++n) { ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp)); - ASSERT_EQ(0, neteq_->GetAudio(&output, &output_type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); ASSERT_EQ(1u, output.num_channels_); @@ -1259,7 +1250,7 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) { // Insert sync-packets, the decoded sequence should be all-zero. for (int n = 0; n < kNumSyncPackets; ++n) { ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp)); - ASSERT_EQ(0, neteq_->GetAudio(&output, &output_type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); ASSERT_EQ(1u, output.num_channels_); if (n > algorithmic_frame_delay) { @@ -1275,7 +1266,7 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) { // network statistics would show some packet loss. for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) { ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp)); - ASSERT_EQ(0, neteq_->GetAudio(&output, &output_type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); if (n >= algorithmic_frame_delay + 1) { // Expect that this frame contain samples from regular RTP. EXPECT_TRUE(IsAllNonZero( @@ -1309,12 +1300,11 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) { } // Insert some packets which decode to noise. We are not interested in // actual decoded values. - NetEqOutputType output_type; uint32_t receive_timestamp = 0; int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1; for (int n = 0; n < algorithmic_frame_delay; ++n) { ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp)); - ASSERT_EQ(0, neteq_->GetAudio(&output, &output_type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); ASSERT_EQ(1u, output.num_channels_); rtp_info.header.sequenceNumber++; @@ -1351,7 +1341,7 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) { // Decode. for (int n = 0; n < kNumSyncPackets; ++n) { - ASSERT_EQ(0, neteq_->GetAudio(&output, &output_type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); ASSERT_EQ(1u, output.num_channels_); EXPECT_TRUE(IsAllNonZero( @@ -1418,8 +1408,7 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no, } // Pull out data once. AudioFrame output; - NetEqOutputType output_type; - ASSERT_EQ(0, neteq_->GetAudio(&output, &output_type)); + ASSERT_EQ(0, neteq_->GetAudio(&output)); ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_); ASSERT_EQ(1u, output.num_channels_); @@ -1471,7 +1460,6 @@ void NetEqDecodingTest::DuplicateCng() { algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8); // Insert three speech packets. Three are needed to get the frame length // correct. - NetEqOutputType type; uint8_t payload[kPayloadBytes] = {0}; WebRtcRTPHeader rtp_info; for (int i = 0; i < 3; ++i) { @@ -1481,11 +1469,11 @@ void NetEqDecodingTest::DuplicateCng() { timestamp += kSamples; // Pull audio once. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } // Verify speech output. - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); // Insert same CNG packet twice. const int kCngPeriodMs = 100; @@ -1498,9 +1486,9 @@ void NetEqDecodingTest::DuplicateCng() { rtp_info, rtc::ArrayView(payload, payload_len), 0)); // Pull audio once and make sure CNG is played. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); EXPECT_EQ(timestamp - algorithmic_delay_samples, PlayoutTimestamp()); // Insert the same CNG packet again. Note that at this point it is old, since @@ -1512,9 +1500,9 @@ void NetEqDecodingTest::DuplicateCng() { // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since // we have already pulled out CNG once. for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) { - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); EXPECT_EQ(timestamp - algorithmic_delay_samples, PlayoutTimestamp()); } @@ -1526,9 +1514,9 @@ void NetEqDecodingTest::DuplicateCng() { ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0)); // Pull audio once and verify that the output is speech again. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples, PlayoutTimestamp()); } @@ -1564,10 +1552,9 @@ TEST_F(NetEqDecodingTest, CngFirst) { timestamp += kCngPeriodSamples; // Pull audio once and make sure CNG is played. - NetEqOutputType type; - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); - EXPECT_EQ(kOutputCNG, type); + EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); // Insert some speech packets. for (int i = 0; i < 3; ++i) { @@ -1577,11 +1564,11 @@ TEST_F(NetEqDecodingTest, CngFirst) { timestamp += kSamples; // Pull audio once. - ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &type)); + ASSERT_EQ(0, neteq_->GetAudio(&out_frame_)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } // Verify speech output. - EXPECT_EQ(kOutputNormal, type); + EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); } } // namespace webrtc diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc index 94436e193c..2608d9a03b 100644 --- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc +++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc @@ -43,10 +43,9 @@ void NetEqExternalDecoderTest::InsertPacket( neteq_->InsertPacket(rtp_header, payload, receive_timestamp)); } -void NetEqExternalDecoderTest::GetOutputAudio(AudioFrame* output, - NetEqOutputType* output_type) { +void NetEqExternalDecoderTest::GetOutputAudio(AudioFrame* output) { // Get audio from regular instance. - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(output, output_type)); + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(output)); EXPECT_EQ(channels_, output->num_channels_); EXPECT_EQ(static_cast(kOutputLengthMs * sample_rate_hz_ / 1000), output->samples_per_channel_); diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h index bd9f01aae4..8999d027ab 100644 --- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h +++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h @@ -43,7 +43,7 @@ class NetEqExternalDecoderTest { uint32_t receive_timestamp); // Get 10 ms of audio data. - void GetOutputAudio(AudioFrame* output, NetEqOutputType* output_type); + void GetOutputAudio(AudioFrame* output); NetEq* neteq() { return neteq_.get(); } diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc index f1577df26f..32c085d3e0 100644 --- a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc +++ b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc @@ -105,7 +105,7 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms, // Get output audio, but don't do anything with it. AudioFrame out_frame; - int error = neteq->GetAudio(&out_frame, NULL); + int error = neteq->GetAudio(&out_frame); if (error != NetEq::kOK) return -1; diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc index 1155987706..5f874ad8db 100644 --- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc +++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc @@ -391,7 +391,7 @@ int NetEqQualityTest::Transmit() { } int NetEqQualityTest::DecodeBlock() { - int ret = neteq_->GetAudio(&out_frame_, NULL); + int ret = neteq_->GetAudio(&out_frame_); if (ret != NetEq::kOK) { return -1; diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc index a339199e2a..fdb66714cf 100644 --- a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc +++ b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc @@ -605,7 +605,7 @@ int main(int argc, char* argv[]) { // Check if it is time to get output audio. while (time_now_ms >= next_output_time_ms && output_event_available) { webrtc::AudioFrame out_frame; - int error = neteq->GetAudio(&out_frame, NULL); + int error = neteq->GetAudio(&out_frame); if (error != NetEq::kOK) { std::cerr << "GetAudio returned error code " << neteq->LastError() << std::endl;