diff --git a/modules/audio_device/android/aaudio_player.cc b/modules/audio_device/android/aaudio_player.cc index f8af965314..cdc82fdd9c 100644 --- a/modules/audio_device/android/aaudio_player.cc +++ b/modules/audio_device/android/aaudio_player.cc @@ -122,7 +122,7 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { // size per callback used by AAudio. Use an initial capacity of 50ms to ensure // that the buffer can cache old data and at the same time be prepared for // increased burst size in AAudio if buffer underruns are detected. - const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer(); + const size_t capacity = 5 * audio_parameters.frames_per_10ms_buffer(); fine_audio_buffer_.reset(new FineAudioBuffer( audio_device_buffer_, audio_parameters.sample_rate(), capacity)); } @@ -184,16 +184,16 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, // Read audio data from the WebRTC source using the FineAudioBuffer object // and write that data into |audio_data| to be played out by AAudio. - const size_t num_bytes = - sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames; // Prime output with zeros during a short initial phase to avoid distortion. // TODO(henrika): do more work to figure out of if the initial forced silence // period is really needed. if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) { + const size_t num_bytes = + sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames; memset(audio_data, 0, num_bytes); } else { fine_audio_buffer_->GetPlayoutData( - rtc::ArrayView(static_cast(audio_data), num_bytes), + rtc::MakeArrayView(static_cast(audio_data), num_frames), static_cast(latency_millis_ + 0.5)); } diff --git a/modules/audio_device/android/aaudio_recorder.cc b/modules/audio_device/android/aaudio_recorder.cc index 346707b3f7..10fcb4d848 100644 --- a/modules/audio_device/android/aaudio_recorder.cc +++ b/modules/audio_device/android/aaudio_recorder.cc @@ -180,11 +180,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( } // Copy recorded audio in |audio_data| to the WebRTC sink using the // FineAudioBuffer object. - const size_t num_bytes = - sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames; fine_audio_buffer_->DeliverRecordedData( - rtc::ArrayView(static_cast(audio_data), - num_bytes), + rtc::MakeArrayView(static_cast(audio_data), num_frames), static_cast(latency_millis_ + 0.5)); return AAUDIO_CALLBACK_RESULT_CONTINUE; diff --git a/modules/audio_device/android/opensles_player.cc b/modules/audio_device/android/opensles_player.cc index d177c87f58..4e0193b96c 100644 --- a/modules/audio_device/android/opensles_player.cc +++ b/modules/audio_device/android/opensles_player.cc @@ -212,16 +212,16 @@ void OpenSLESPlayer::AllocateDataBuffers() { // recommended to construct audio buffers so that they contain an exact // multiple of this number. If so, callbacks will occur at regular intervals, // which reduces jitter. - const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer(); - ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes); + const size_t buffer_size_in_samples = audio_parameters_.frames_per_buffer(); + ALOGD("native buffer size: %" PRIuS, buffer_size_in_samples); ALOGD("native buffer size in ms: %.2f", audio_parameters_.GetBufferSizeInMilliseconds()); - fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_, - audio_parameters_.sample_rate(), - 2 * buffer_size_in_bytes)); + fine_audio_buffer_.reset( + new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(), + 2 * audio_parameters_.frames_per_buffer())); // Allocated memory for audio buffers. for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { - audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]); + audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]); } } @@ -393,13 +393,14 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) { ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff); } last_play_time_ = current_time; - SLint8* audio_ptr = audio_buffers_[buffer_index_].get(); + SLint8* audio_ptr8 = + reinterpret_cast(audio_buffers_[buffer_index_].get()); if (silence) { RTC_DCHECK(thread_checker_.CalledOnValidThread()); // Avoid aquiring real audio data from WebRTC and fill the buffer with // zeros instead. Used to prime the buffer with silence and to avoid asking // for audio data from two different threads. - memset(audio_ptr, 0, audio_parameters_.GetBytesPerBuffer()); + memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer()); } else { RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread()); // Read audio data from the WebRTC source using the FineAudioBuffer object @@ -407,13 +408,13 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) { // OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support // delay estimation. fine_audio_buffer_->GetPlayoutData( - rtc::ArrayView(audio_ptr, - audio_parameters_.GetBytesPerBuffer()), + rtc::ArrayView(audio_buffers_[buffer_index_].get(), + audio_parameters_.frames_per_buffer()), 25); } // Enqueue the decoded audio buffer for playback. SLresult err = (*simple_buffer_queue_) - ->Enqueue(simple_buffer_queue_, audio_ptr, + ->Enqueue(simple_buffer_queue_, audio_ptr8, audio_parameters_.GetBytesPerBuffer()); if (SL_RESULT_SUCCESS != err) { ALOGE("Enqueue failed: %d", err); diff --git a/modules/audio_device/android/opensles_player.h b/modules/audio_device/android/opensles_player.h index 167c356330..20107585a6 100644 --- a/modules/audio_device/android/opensles_player.h +++ b/modules/audio_device/android/opensles_player.h @@ -143,9 +143,8 @@ class OpenSLESPlayer { SLDataFormat_PCM pcm_format_; // Queue of audio buffers to be used by the player object for rendering - // audio. They will be used in a Round-robin way and the size of each buffer - // is given by FineAudioBuffer::RequiredBufferSizeBytes(). - std::unique_ptr audio_buffers_[kNumOfOpenSLESBuffers]; + // audio. + std::unique_ptr audio_buffers_[kNumOfOpenSLESBuffers]; // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data // in chunks of 10ms. It then allows for this data to be pulled in diff --git a/modules/audio_device/android/opensles_recorder.cc b/modules/audio_device/android/opensles_recorder.cc index 446ef98910..0070e73f8b 100644 --- a/modules/audio_device/android/opensles_recorder.cc +++ b/modules/audio_device/android/opensles_recorder.cc @@ -344,12 +344,12 @@ void OpenSLESRecorder::AllocateDataBuffers() { RTC_DCHECK(audio_device_buffer_); fine_audio_buffer_.reset( new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(), - 2 * audio_parameters_.GetBytesPerBuffer())); + 2 * audio_parameters_.frames_per_buffer())); // Allocate queue of audio buffers that stores recorded audio samples. - const int data_size_bytes = audio_parameters_.GetBytesPerBuffer(); - audio_buffers_.reset(new std::unique_ptr[kNumOfOpenSLESBuffers]); + const int data_size_samples = audio_parameters_.frames_per_buffer(); + audio_buffers_.reset(new std::unique_ptr[kNumOfOpenSLESBuffers]); for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { - audio_buffers_[i].reset(new SLint8[data_size_bytes]); + audio_buffers_[i].reset(new SLint16[data_size_samples]); } } @@ -374,12 +374,12 @@ void OpenSLESRecorder::ReadBufferQueue() { // since there is no support to turn off built-in EC in combination with // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use // these estimates) will never be active. - const size_t size_in_bytes = - static_cast(audio_parameters_.GetBytesPerBuffer()); - const int8_t* data = - static_cast(audio_buffers_[buffer_index_].get()); + const size_t size_in_samples = + static_cast(audio_parameters_.frames_per_buffer()); fine_audio_buffer_->DeliverRecordedData( - rtc::ArrayView(data, size_in_bytes), 25); + rtc::ArrayView(audio_buffers_[buffer_index_].get(), + size_in_samples), + 25); // Enqueue the utilized audio buffer and use if for recording again. EnqueueAudioBuffer(); } @@ -387,8 +387,10 @@ void OpenSLESRecorder::ReadBufferQueue() { bool OpenSLESRecorder::EnqueueAudioBuffer() { SLresult err = (*simple_buffer_queue_) - ->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(), - audio_parameters_.GetBytesPerBuffer()); + ->Enqueue( + simple_buffer_queue_, + reinterpret_cast(audio_buffers_[buffer_index_].get()), + audio_parameters_.GetBytesPerBuffer()); if (SL_RESULT_SUCCESS != err) { ALOGE("Enqueue failed: %s", GetSLErrorString(err)); return false; diff --git a/modules/audio_device/android/opensles_recorder.h b/modules/audio_device/android/opensles_recorder.h index 478c0340af..ee1ede51d5 100644 --- a/modules/audio_device/android/opensles_recorder.h +++ b/modules/audio_device/android/opensles_recorder.h @@ -176,9 +176,9 @@ class OpenSLESRecorder { // Queue of audio buffers to be used by the recorder object for capturing // audio. They will be used in a Round-robin way and the size of each buffer - // is given by AudioParameters::GetBytesPerBuffer(), i.e., it corresponds to + // is given by AudioParameters::frames_per_buffer(), i.e., it corresponds to // the native OpenSL ES buffer size. - std::unique_ptr[]> audio_buffers_; + std::unique_ptr[]> audio_buffers_; // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue. // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ... diff --git a/modules/audio_device/fine_audio_buffer.cc b/modules/audio_device/fine_audio_buffer.cc index c1f12855e5..2fae686d28 100644 --- a/modules/audio_device/fine_audio_buffer.cc +++ b/modules/audio_device/fine_audio_buffer.cc @@ -26,7 +26,6 @@ FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer, : device_buffer_(device_buffer), sample_rate_(sample_rate), samples_per_10_ms_(static_cast(sample_rate_ * 10 / 1000)), - bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)), playout_buffer_(0, capacity), record_buffer_(0, capacity) { RTC_LOG(INFO) << "samples_per_10_ms_: " << samples_per_10_ms_; @@ -42,52 +41,53 @@ void FineAudioBuffer::ResetRecord() { record_buffer_.Clear(); } -void FineAudioBuffer::GetPlayoutData(rtc::ArrayView audio_buffer, +void FineAudioBuffer::GetPlayoutData(rtc::ArrayView audio_buffer, int playout_delay_ms) { // Ask WebRTC for new data in chunks of 10ms until we have enough to // fulfill the request. It is possible that the buffer already contains // enough samples from the last round. - const size_t num_bytes = audio_buffer.size(); - while (playout_buffer_.size() < num_bytes) { + while (playout_buffer_.size() < audio_buffer.size()) { // Get 10ms decoded audio from WebRTC. device_buffer_->RequestPlayoutData(samples_per_10_ms_); // Append |bytes_per_10_ms_| elements to the end of the buffer. - const size_t bytes_written = playout_buffer_.AppendData( - bytes_per_10_ms_, [&](rtc::ArrayView buf) { + const size_t samples_written = playout_buffer_.AppendData( + samples_per_10_ms_, [&](rtc::ArrayView buf) { const size_t samples_per_channel = device_buffer_->GetPlayoutData(buf.data()); // TODO(henrika): this class is only used on mobile devices and is // currently limited to mono. Modifications are needed for stereo. - return sizeof(int16_t) * samples_per_channel; + return samples_per_channel; }); - RTC_DCHECK_EQ(bytes_per_10_ms_, bytes_written); + RTC_DCHECK_EQ(samples_per_10_ms_, samples_written); } + + const size_t num_bytes = audio_buffer.size() * sizeof(int16_t); // Provide the requested number of bytes to the consumer. memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes); // Move remaining samples to start of buffer to prepare for next round. - memmove(playout_buffer_.data(), playout_buffer_.data() + num_bytes, - playout_buffer_.size() - num_bytes); - playout_buffer_.SetSize(playout_buffer_.size() - num_bytes); + memmove(playout_buffer_.data(), playout_buffer_.data() + audio_buffer.size(), + (playout_buffer_.size() - audio_buffer.size()) * sizeof(int16_t)); + playout_buffer_.SetSize(playout_buffer_.size() - audio_buffer.size()); // Cache playout latency for usage in DeliverRecordedData(); playout_delay_ms_ = playout_delay_ms; } void FineAudioBuffer::DeliverRecordedData( - rtc::ArrayView audio_buffer, + rtc::ArrayView audio_buffer, int record_delay_ms) { // Always append new data and grow the buffer if needed. record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size()); // Consume samples from buffer in chunks of 10ms until there is not - // enough data left. The number of remaining bytes in the cache is given by + // enough data left. The number of remaining samples in the cache is given by // the new size of the buffer. - while (record_buffer_.size() >= bytes_per_10_ms_) { + while (record_buffer_.size() >= samples_per_10_ms_) { device_buffer_->SetRecordedBuffer(record_buffer_.data(), samples_per_10_ms_); device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms); device_buffer_->DeliverRecordedData(); - memmove(record_buffer_.data(), record_buffer_.data() + bytes_per_10_ms_, - record_buffer_.size() - bytes_per_10_ms_); - record_buffer_.SetSize(record_buffer_.size() - bytes_per_10_ms_); + memmove(record_buffer_.data(), record_buffer_.data() + samples_per_10_ms_, + (record_buffer_.size() - samples_per_10_ms_) * sizeof(int16_t)); + record_buffer_.SetSize(record_buffer_.size() - samples_per_10_ms_); } } diff --git a/modules/audio_device/fine_audio_buffer.h b/modules/audio_device/fine_audio_buffer.h index fb80e98842..5bc42ba214 100644 --- a/modules/audio_device/fine_audio_buffer.h +++ b/modules/audio_device/fine_audio_buffer.h @@ -21,12 +21,13 @@ namespace webrtc { class AudioDeviceBuffer; -// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with audio data -// corresponding to 10ms of data. It then allows for this data to be pulled in -// a finer or coarser granularity. I.e. interacting with this class instead of -// directly with the AudioDeviceBuffer one can ask for any number of audio data -// samples. This class also ensures that audio data can be delivered to the ADB -// in 10ms chunks when the size of the provided audio buffers differs from 10ms. +// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with 16-bit PCM +// audio samples corresponding to 10ms of data. It then allows for this data +// to be pulled in a finer or coarser granularity. I.e. interacting with this +// class instead of directly with the AudioDeviceBuffer one can ask for any +// number of audio data samples. This class also ensures that audio data can be +// delivered to the ADB in 10ms chunks when the size of the provided audio +// buffers differs from 10ms. // As an example: calling DeliverRecordedData() with 5ms buffers will deliver // accumulated 10ms worth of data to the ADB every second call. // TODO(henrika): add support for stereo when mobile platforms need it. @@ -42,7 +43,7 @@ class FineAudioBuffer { size_t capacity); ~FineAudioBuffer(); - // Clears buffers and counters dealing with playour and/or recording. + // Clears buffers and counters dealing with playout and/or recording. void ResetPlayout(); void ResetRecord(); @@ -52,7 +53,7 @@ class FineAudioBuffer { // silence instead. The provided delay estimate in |playout_delay_ms| should // contain an estime of the latency between when an audio frame is read from // WebRTC and when it is played out on the speaker. - void GetPlayoutData(rtc::ArrayView audio_buffer, + void GetPlayoutData(rtc::ArrayView audio_buffer, int playout_delay_ms); // Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer @@ -62,9 +63,9 @@ class FineAudioBuffer { // They can be fixed values on most platforms and they are ignored if an // external (hardware/built-in) AEC is used. // Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores - // 5ms of data and sends a total of 10ms to WebRTC and clears the intenal + // 5ms of data and sends a total of 10ms to WebRTC and clears the internal // cache. Call #3 restarts the scheme above. - void DeliverRecordedData(rtc::ArrayView audio_buffer, + void DeliverRecordedData(rtc::ArrayView audio_buffer, int record_delay_ms); private: @@ -79,14 +80,12 @@ class FineAudioBuffer { const int sample_rate_; // Number of audio samples per 10ms. const size_t samples_per_10_ms_; - // Number of audio bytes per 10ms. - const size_t bytes_per_10_ms_; // Storage for output samples from which a consumer can read audio buffers // in any size using GetPlayoutData(). - rtc::BufferT playout_buffer_; + rtc::BufferT playout_buffer_; // Storage for input samples that are about to be delivered to the WebRTC // ADB or remains from the last successful delivery of a 10ms audio buffer. - rtc::BufferT record_buffer_; + rtc::BufferT record_buffer_; // Contains latest delay estimate given to GetPlayoutData(). int playout_delay_ms_ = 0; }; diff --git a/modules/audio_device/fine_audio_buffer_unittest.cc b/modules/audio_device/fine_audio_buffer_unittest.cc index d6bf542eb8..de0cc6dc7a 100644 --- a/modules/audio_device/fine_audio_buffer_unittest.cc +++ b/modules/audio_device/fine_audio_buffer_unittest.cc @@ -34,7 +34,7 @@ const int kSamplesPer10Ms = kSampleRate * 10 / 1000; // buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around // will happen. // |buffer| is the audio buffer to verify. -bool VerifyBuffer(const int8_t* buffer, int buffer_number, int size) { +bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) { int start_value = (buffer_number * size) % SCHAR_MAX; for (int i = 0; i < size; ++i) { if (buffer[i] != (i + start_value) % SCHAR_MAX) { @@ -52,10 +52,9 @@ bool VerifyBuffer(const int8_t* buffer, int buffer_number, int size) { // |samples_per_10_ms| is the number of samples that should be written to the // buffer (|arg0|). ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) { - int8_t* buffer = static_cast(arg0); - int bytes_per_10_ms = samples_per_10_ms * static_cast(sizeof(int16_t)); - int start_value = (iteration * bytes_per_10_ms) % SCHAR_MAX; - for (int i = 0; i < bytes_per_10_ms; ++i) { + int16_t* buffer = static_cast(arg0); + int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX; + for (int i = 0; i < samples_per_10_ms; ++i) { buffer[i] = (i + start_value) % SCHAR_MAX; } return samples_per_10_ms; @@ -63,7 +62,7 @@ ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) { // Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer() // for details. -void UpdateInputBuffer(int8_t* buffer, int iteration, int size) { +void UpdateInputBuffer(int16_t* buffer, int iteration, int size) { int start_value = (iteration * size) % SCHAR_MAX; for (int i = 0; i < size; ++i) { buffer[i] = (i + start_value) % SCHAR_MAX; @@ -75,18 +74,16 @@ void UpdateInputBuffer(int8_t* buffer, int iteration, int size) { // supplied using a buffer size that is smaller or larger than 10ms. // See VerifyBuffer() for details. ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) { - const int8_t* buffer = static_cast(arg0); - int bytes_per_10_ms = samples_per_10_ms * static_cast(sizeof(int16_t)); - int start_value = (iteration * bytes_per_10_ms) % SCHAR_MAX; - for (int i = 0; i < bytes_per_10_ms; ++i) { + const int16_t* buffer = static_cast(arg0); + int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX; + for (int i = 0; i < samples_per_10_ms; ++i) { EXPECT_EQ(buffer[i], (i + start_value) % SCHAR_MAX); } return 0; } void RunFineBufferTest(int frame_size_in_samples) { - const int kFrameSizeBytes = - frame_size_in_samples * static_cast(sizeof(int16_t)); + const int kFrameSizeSamples = frame_size_in_samples; const int kNumberOfFrames = 5; // Ceiling of integer division: 1 + ((x - 1) / y) const int kNumberOfUpdateBufferCalls = @@ -118,17 +115,17 @@ void RunFineBufferTest(int frame_size_in_samples) { .WillRepeatedly(Return(kSamplesPer10Ms)); FineAudioBuffer fine_buffer(&audio_device_buffer, kSampleRate, - kFrameSizeBytes); - std::unique_ptr out_buffer(new int8_t[kFrameSizeBytes]); - std::unique_ptr in_buffer(new int8_t[kFrameSizeBytes]); + kFrameSizeSamples); + std::unique_ptr out_buffer(new int16_t[kFrameSizeSamples]); + std::unique_ptr in_buffer(new int16_t[kFrameSizeSamples]); for (int i = 0; i < kNumberOfFrames; ++i) { fine_buffer.GetPlayoutData( - rtc::ArrayView(out_buffer.get(), kFrameSizeBytes), 0); - EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeBytes)); - UpdateInputBuffer(in_buffer.get(), i, kFrameSizeBytes); + rtc::ArrayView(out_buffer.get(), kFrameSizeSamples), 0); + EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeSamples)); + UpdateInputBuffer(in_buffer.get(), i, kFrameSizeSamples); fine_buffer.DeliverRecordedData( - rtc::ArrayView(in_buffer.get(), kFrameSizeBytes), 0); + rtc::ArrayView(in_buffer.get(), kFrameSizeSamples), 0); } } diff --git a/modules/audio_device/ios/audio_device_ios.h b/modules/audio_device/ios/audio_device_ios.h index 460f0ef44f..c51a9799da 100644 --- a/modules/audio_device/ios/audio_device_ios.h +++ b/modules/audio_device/ios/audio_device_ios.h @@ -246,7 +246,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric, // On real iOS devices, the size will be fixed and set once. For iOS // simulators, the size can vary from callback to callback and the size // will be changed dynamically to account for this behavior. - rtc::BufferT record_audio_buffer_; + rtc::BufferT record_audio_buffer_; // Set to 1 when recording is active and 0 otherwise. volatile int recording_; diff --git a/modules/audio_device/ios/audio_device_ios.mm b/modules/audio_device/ios/audio_device_ios.mm index aa551263a4..5a6d5a9e85 100644 --- a/modules/audio_device/ios/audio_device_ios.mm +++ b/modules/audio_device/ios/audio_device_ios.mm @@ -360,12 +360,11 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags // Simply return if recording is not enabled. if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result; - const size_t num_bytes = num_frames * VoiceProcessingAudioUnit::kBytesPerSample; // Set the size of our own audio buffer and clear it first to avoid copying // in combination with potential reallocations. // On real iOS devices, the size will only be set once (at first callback). record_audio_buffer_.Clear(); - record_audio_buffer_.SetSize(num_bytes); + record_audio_buffer_.SetSize(num_frames); // Allocate AudioBuffers to be used as storage for the received audio. // The AudioBufferList structure works as a placeholder for the @@ -376,8 +375,9 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags audio_buffer_list.mNumberBuffers = 1; AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0]; audio_buffer->mNumberChannels = record_parameters_.channels(); - audio_buffer->mDataByteSize = record_audio_buffer_.size(); - audio_buffer->mData = record_audio_buffer_.data(); + audio_buffer->mDataByteSize = + record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample; + audio_buffer->mData = reinterpret_cast(record_audio_buffer_.data()); // Obtain the recorded audio samples by initiating a rendering cycle. // Since it happens on the input bus, the |io_data| parameter is a reference @@ -409,16 +409,13 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, AudioBuffer* audio_buffer = &io_data->mBuffers[0]; RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels); - // Get pointer to internal audio buffer to which new audio data shall be - // written. - const size_t size_in_bytes = audio_buffer->mDataByteSize; - RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames); - int8_t* destination = reinterpret_cast(audio_buffer->mData); // Produce silence and give audio unit a hint about it if playout is not // activated. if (!rtc::AtomicOps::AcquireLoad(&playing_)) { + const size_t size_in_bytes = audio_buffer->mDataByteSize; + RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames); *flags |= kAudioUnitRenderAction_OutputIsSilence; - memset(destination, 0, size_in_bytes); + memset(static_cast(audio_buffer->mData), 0, size_in_bytes); return noErr; } @@ -454,8 +451,9 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, // Read decoded 16-bit PCM samples from WebRTC (using a size that matches // the native I/O audio unit) and copy the result to the audio buffer in the // |io_data| destination. - fine_audio_buffer_->GetPlayoutData(rtc::ArrayView(destination, size_in_bytes), - kFixedPlayoutDelayEstimate); + fine_audio_buffer_->GetPlayoutData( + rtc::ArrayView(static_cast(audio_buffer->mData), num_frames), + kFixedPlayoutDelayEstimate); return noErr; } @@ -704,9 +702,9 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { // the native audio unit buffer size. Use a reasonable capacity to avoid // reallocations while audio is played to reduce risk of glitches. RTC_DCHECK(audio_device_buffer_); - const size_t capacity_in_bytes = 2 * playout_parameters_.GetBytesPerBuffer(); + const size_t capacity_in_samples = 2 * playout_parameters_.frames_per_buffer(); fine_audio_buffer_.reset(new FineAudioBuffer( - audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_bytes)); + audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_samples)); } bool AudioDeviceIOS::CreateAudioUnit() { diff --git a/sdk/android/src/jni/audio_device/aaudio_player.cc b/sdk/android/src/jni/audio_device/aaudio_player.cc index 90e809626e..00164ccda8 100644 --- a/sdk/android/src/jni/audio_device/aaudio_player.cc +++ b/sdk/android/src/jni/audio_device/aaudio_player.cc @@ -123,7 +123,7 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { // size per callback used by AAudio. Use an initial capacity of 50ms to ensure // that the buffer can cache old data and at the same time be prepared for // increased burst size in AAudio if buffer underruns are detected. - const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer(); + const size_t capacity = 5 * audio_parameters.frames_per_10ms_buffer(); fine_audio_buffer_.reset(new FineAudioBuffer( audio_device_buffer_, audio_parameters.sample_rate(), capacity)); } @@ -200,16 +200,16 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, // Read audio data from the WebRTC source using the FineAudioBuffer object // and write that data into |audio_data| to be played out by AAudio. - const size_t num_bytes = - sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames; // Prime output with zeros during a short initial phase to avoid distortion. // TODO(henrika): do more work to figure out of if the initial forced silence // period is really needed. if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) { + const size_t num_bytes = + sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames; memset(audio_data, 0, num_bytes); } else { fine_audio_buffer_->GetPlayoutData( - rtc::ArrayView(static_cast(audio_data), num_bytes), + rtc::MakeArrayView(static_cast(audio_data), num_frames), static_cast(latency_millis_ + 0.5)); } diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.cc b/sdk/android/src/jni/audio_device/aaudio_recorder.cc index f77348feb3..f1cc58cf85 100644 --- a/sdk/android/src/jni/audio_device/aaudio_recorder.cc +++ b/sdk/android/src/jni/audio_device/aaudio_recorder.cc @@ -192,11 +192,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( } // Copy recorded audio in |audio_data| to the WebRTC sink using the // FineAudioBuffer object. - const size_t num_bytes = - sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames; fine_audio_buffer_->DeliverRecordedData( - rtc::ArrayView(static_cast(audio_data), - num_bytes), + rtc::MakeArrayView(static_cast(audio_data), num_frames), static_cast(latency_millis_ + 0.5)); return AAUDIO_CALLBACK_RESULT_CONTINUE; diff --git a/sdk/android/src/jni/audio_device/opensles_player.cc b/sdk/android/src/jni/audio_device/opensles_player.cc index 0a0819de2b..41ebd85353 100644 --- a/sdk/android/src/jni/audio_device/opensles_player.cc +++ b/sdk/android/src/jni/audio_device/opensles_player.cc @@ -222,16 +222,16 @@ void OpenSLESPlayer::AllocateDataBuffers() { // recommended to construct audio buffers so that they contain an exact // multiple of this number. If so, callbacks will occur at regular intervals, // which reduces jitter. - const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer(); - ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes); + const size_t buffer_size_in_samples = audio_parameters_.frames_per_buffer(); + ALOGD("native buffer size: %" PRIuS, buffer_size_in_samples); ALOGD("native buffer size in ms: %.2f", audio_parameters_.GetBufferSizeInMilliseconds()); - fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_, - audio_parameters_.sample_rate(), - 2 * buffer_size_in_bytes)); + fine_audio_buffer_.reset( + new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(), + 2 * audio_parameters_.frames_per_buffer())); // Allocated memory for audio buffers. for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { - audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]); + audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]); } } @@ -403,13 +403,14 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) { ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff); } last_play_time_ = current_time; - SLint8* audio_ptr = audio_buffers_[buffer_index_].get(); + SLint8* audio_ptr8 = + reinterpret_cast(audio_buffers_[buffer_index_].get()); if (silence) { RTC_DCHECK(thread_checker_.CalledOnValidThread()); // Avoid aquiring real audio data from WebRTC and fill the buffer with // zeros instead. Used to prime the buffer with silence and to avoid asking // for audio data from two different threads. - memset(audio_ptr, 0, audio_parameters_.GetBytesPerBuffer()); + memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer()); } else { RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread()); // Read audio data from the WebRTC source using the FineAudioBuffer object @@ -417,13 +418,13 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) { // OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support // delay estimation. fine_audio_buffer_->GetPlayoutData( - rtc::ArrayView(audio_ptr, - audio_parameters_.GetBytesPerBuffer()), + rtc::ArrayView(audio_buffers_[buffer_index_].get(), + audio_parameters_.frames_per_buffer()), 25); } // Enqueue the decoded audio buffer for playback. SLresult err = (*simple_buffer_queue_) - ->Enqueue(simple_buffer_queue_, audio_ptr, + ->Enqueue(simple_buffer_queue_, audio_ptr8, audio_parameters_.GetBytesPerBuffer()); if (SL_RESULT_SUCCESS != err) { ALOGE("Enqueue failed: %d", err); diff --git a/sdk/android/src/jni/audio_device/opensles_player.h b/sdk/android/src/jni/audio_device/opensles_player.h index ba29bc853d..fe110a6ef2 100644 --- a/sdk/android/src/jni/audio_device/opensles_player.h +++ b/sdk/android/src/jni/audio_device/opensles_player.h @@ -140,9 +140,8 @@ class OpenSLESPlayer : public AudioOutput { SLDataFormat_PCM pcm_format_; // Queue of audio buffers to be used by the player object for rendering - // audio. They will be used in a Round-robin way and the size of each buffer - // is given by FineAudioBuffer::RequiredBufferSizeBytes(). - std::unique_ptr audio_buffers_[kNumOfOpenSLESBuffers]; + // audio. + std::unique_ptr audio_buffers_[kNumOfOpenSLESBuffers]; // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data // in chunks of 10ms. It then allows for this data to be pulled in diff --git a/sdk/android/src/jni/audio_device/opensles_recorder.cc b/sdk/android/src/jni/audio_device/opensles_recorder.cc index b3af25bcfb..8962137210 100644 --- a/sdk/android/src/jni/audio_device/opensles_recorder.cc +++ b/sdk/android/src/jni/audio_device/opensles_recorder.cc @@ -355,12 +355,12 @@ void OpenSLESRecorder::AllocateDataBuffers() { RTC_DCHECK(audio_device_buffer_); fine_audio_buffer_.reset( new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(), - 2 * audio_parameters_.GetBytesPerBuffer())); + 2 * audio_parameters_.frames_per_buffer())); // Allocate queue of audio buffers that stores recorded audio samples. - const int data_size_bytes = audio_parameters_.GetBytesPerBuffer(); - audio_buffers_.reset(new std::unique_ptr[kNumOfOpenSLESBuffers]); + const int data_size_samples = audio_parameters_.frames_per_buffer(); + audio_buffers_.reset(new std::unique_ptr[kNumOfOpenSLESBuffers]); for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { - audio_buffers_[i].reset(new SLint8[data_size_bytes]); + audio_buffers_[i].reset(new SLint16[data_size_samples]); } } @@ -385,12 +385,12 @@ void OpenSLESRecorder::ReadBufferQueue() { // since there is no support to turn off built-in EC in combination with // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use // these estimates) will never be active. - const size_t size_in_bytes = - static_cast(audio_parameters_.GetBytesPerBuffer()); - const int8_t* data = - static_cast(audio_buffers_[buffer_index_].get()); + const size_t size_in_samples = + static_cast(audio_parameters_.frames_per_buffer()); fine_audio_buffer_->DeliverRecordedData( - rtc::ArrayView(data, size_in_bytes), 25); + rtc::ArrayView(audio_buffers_[buffer_index_].get(), + size_in_samples), + 25); // Enqueue the utilized audio buffer and use if for recording again. EnqueueAudioBuffer(); } @@ -398,8 +398,10 @@ void OpenSLESRecorder::ReadBufferQueue() { bool OpenSLESRecorder::EnqueueAudioBuffer() { SLresult err = (*simple_buffer_queue_) - ->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(), - audio_parameters_.GetBytesPerBuffer()); + ->Enqueue( + simple_buffer_queue_, + reinterpret_cast(audio_buffers_[buffer_index_].get()), + audio_parameters_.GetBytesPerBuffer()); if (SL_RESULT_SUCCESS != err) { ALOGE("Enqueue failed: %s", GetSLErrorString(err)); return false; diff --git a/sdk/android/src/jni/audio_device/opensles_recorder.h b/sdk/android/src/jni/audio_device/opensles_recorder.h index 2f6bd6a03f..cb109c34cd 100644 --- a/sdk/android/src/jni/audio_device/opensles_recorder.h +++ b/sdk/android/src/jni/audio_device/opensles_recorder.h @@ -173,9 +173,9 @@ class OpenSLESRecorder : public AudioInput { // Queue of audio buffers to be used by the recorder object for capturing // audio. They will be used in a Round-robin way and the size of each buffer - // is given by AudioParameters::GetBytesPerBuffer(), i.e., it corresponds to + // is given by AudioParameters::frames_per_buffer(), i.e., it corresponds to // the native OpenSL ES buffer size. - std::unique_ptr[]> audio_buffers_; + std::unique_ptr[]> audio_buffers_; // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue. // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...