diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc index db6f125629..a2d08ac004 100644 --- a/modules/audio_coding/acm2/audio_coding_module.cc +++ b/modules/audio_coding/acm2/audio_coding_module.cc @@ -37,6 +37,8 @@ namespace { // 48 kHz data. constexpr size_t kInitialInputDataBufferSize = 6 * 480; +constexpr int32_t kMaxInputSampleRateHz = 192000; + class AudioCodingModuleImpl final : public AudioCodingModule { public: explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config); @@ -346,7 +348,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, return -1; } - if (audio_frame.sample_rate_hz_ > 192000) { + if (audio_frame.sample_rate_hz_ > kMaxInputSampleRateHz) { assert(false); RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, input frequency not valid"; return -1; @@ -463,20 +465,25 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, *ptr_out = &preprocess_frame_; preprocess_frame_.num_channels_ = in_frame.num_channels_; preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; - std::array audio; - const int16_t* src_ptr_audio = in_frame.data(); + std::array audio; + const int16_t* src_ptr_audio; if (down_mix) { - // If a resampling is required the output of a down-mix is written into a + // If a resampling is required, the output of a down-mix is written into a // local buffer, otherwise, it will be written to the output frame. int16_t* dest_ptr_audio = resample ? audio.data() : preprocess_frame_.mutable_data(); + RTC_DCHECK_GE(audio.size(), preprocess_frame_.samples_per_channel_); RTC_DCHECK_GE(audio.size(), in_frame.samples_per_channel_); DownMixFrame(in_frame, rtc::ArrayView( dest_ptr_audio, preprocess_frame_.samples_per_channel_)); preprocess_frame_.num_channels_ = 1; - // Set the input of the resampler is the down-mixed signal. + + // Set the input of the resampler to the down-mixed signal. src_ptr_audio = audio.data(); + } else { + // Set the input of the resampler to the original data. + src_ptr_audio = in_frame.data(); } preprocess_frame_.timestamp_ = expected_codec_ts_; diff --git a/modules/audio_coding/include/audio_coding_module.h b/modules/audio_coding/include/audio_coding_module.h index a0aa5d05ec..102e2de83b 100644 --- a/modules/audio_coding/include/audio_coding_module.h +++ b/modules/audio_coding/include/audio_coding_module.h @@ -33,8 +33,6 @@ class AudioEncoder; class AudioFrame; struct RTPHeader; -#define WEBRTC_10MS_PCM_AUDIO 960 // 16 bits super wideband 48 kHz - // Callback class used for sending data ready to be packetized class AudioPacketizationCallback { public: diff --git a/modules/audio_coding/test/EncodeDecodeTest.cc b/modules/audio_coding/test/EncodeDecodeTest.cc index a1c005cbba..3ed1789e93 100644 --- a/modules/audio_coding/test/EncodeDecodeTest.cc +++ b/modules/audio_coding/test/EncodeDecodeTest.cc @@ -24,6 +24,12 @@ namespace webrtc { +namespace { +// Buffer size for stereo 48 kHz audio. +constexpr size_t kWebRtc10MsPcmAudio = 960; + +} // namespace + TestPacketization::TestPacketization(RTPStream* rtpStream, uint16_t frequency) : _rtpStream(rtpStream), _frequency(frequency), _seqNo(0) {} @@ -92,7 +98,7 @@ void Sender::Run() { } Receiver::Receiver() - : _playoutLengthSmpls(WEBRTC_10MS_PCM_AUDIO), + : _playoutLengthSmpls(kWebRtc10MsPcmAudio), _payloadSizeBytes(MAX_INCOMING_PAYLOAD) {} void Receiver::Setup(AudioCodingModule* acm, @@ -139,7 +145,7 @@ void Receiver::Setup(AudioCodingModule* acm, _pcmFile.Open(file_name, 32000, "wb+"); _realPayloadSizeBytes = 0; - _playoutBuffer = new int16_t[WEBRTC_10MS_PCM_AUDIO]; + _playoutBuffer = new int16_t[kWebRtc10MsPcmAudio]; _frequency = playSampFreq; _acm = acm; _firstTime = true;