diff --git a/modules/audio_device/audio_device_buffer.cc b/modules/audio_device/audio_device_buffer.cc index f80319df17..873e5d6088 100644 --- a/modules/audio_device/audio_device_buffer.cc +++ b/modules/audio_device/audio_device_buffer.cc @@ -246,8 +246,17 @@ int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer, RTC_LOG(LS_INFO) << "Size of recording buffer: " << rec_buffer_.size(); } - capture_timestamp_ns_ = capture_timestamp_ns; - + // If the timestamp is less then or equal to zero, it's not valid and are + // ignored. If we do antimestamp alignment on them they might accidentally + // become greater then zero, and will be handled as if they were a correct + // timestamp. + capture_timestamp_ns_ = + (capture_timestamp_ns > 0) + ? rtc::kNumNanosecsPerMicrosec * + timestamp_aligner_.TranslateTimestamp( + capture_timestamp_ns_ / rtc::kNumNanosecsPerMicrosec, + rtc::TimeMicros()) + : capture_timestamp_ns; // Derive a new level value twice per second and check if it is non-zero. int16_t max_abs = 0; RTC_DCHECK_LT(rec_stat_count_, 50); diff --git a/modules/audio_device/audio_device_buffer.h b/modules/audio_device/audio_device_buffer.h index dbb9e5730d..ea6ab9a93e 100644 --- a/modules/audio_device/audio_device_buffer.h +++ b/modules/audio_device/audio_device_buffer.h @@ -23,6 +23,7 @@ #include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" #include "rtc_base/thread_annotations.h" +#include "rtc_base/timestamp_aligner.h" namespace webrtc { @@ -227,6 +228,10 @@ class AudioDeviceBuffer { // being printed in the LogStats() task. bool log_stats_ RTC_GUARDED_BY(task_queue_); + // Used for converting capture timestaps (recieved from AudioRecordThread + // via AudioRecordJni::DataIsRecorded) to RTC clock. + rtc::TimestampAligner timestamp_aligner_; + // Should *never* be defined in production builds. Only used for testing. // When defined, the output signal will be replaced by a sinus tone at 440Hz. #ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java index 10fd58c36c..6647e5fcbb 100644 --- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java +++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java @@ -17,6 +17,7 @@ import android.media.AudioFormat; import android.media.AudioManager; import android.media.AudioRecord; import android.media.AudioRecordingConfiguration; +import android.media.AudioTimestamp; import android.media.MediaRecorder.AudioSource; import android.os.Build; import android.os.Process; @@ -130,6 +131,10 @@ class WebRtcAudioRecord { doAudioRecordStateCallback(AUDIO_RECORD_START); long lastTime = System.nanoTime(); + AudioTimestamp audioTimestamp = null; + if (Build.VERSION.SDK_INT >= 24) { + audioTimestamp = new AudioTimestamp(); + } while (keepAlive) { int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity()); if (bytesRead == byteBuffer.capacity()) { @@ -141,7 +146,14 @@ class WebRtcAudioRecord { // failed to join this thread. To be a bit safer, try to avoid calling any native methods // in case they've been unregistered after stopRecording() returned. if (keepAlive) { - nativeDataIsRecorded(nativeAudioRecord, bytesRead); + long captureTimeNs = 0; + if (Build.VERSION.SDK_INT >= 24) { + if (audioRecord.getTimestamp(audioTimestamp, AudioTimestamp.TIMEBASE_MONOTONIC) + == AudioRecord.SUCCESS) { + captureTimeNs = audioTimestamp.nanoTime; + } + } + nativeDataIsRecorded(nativeAudioRecord, bytesRead, captureTimeNs); } if (audioSamplesReadyCallback != null) { // Copy the entire byte buffer array. The start of the byteBuffer is not necessarily @@ -489,7 +501,8 @@ class WebRtcAudioRecord { private native void nativeCacheDirectBufferAddress( long nativeAudioRecordJni, ByteBuffer byteBuffer); - private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes); + private native void nativeDataIsRecorded( + long nativeAudioRecordJni, int bytes, long captureTimestampNs); // Sets all recorded samples to zero if `mute` is true, i.e., ensures that // the microphone is muted. diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.cc b/sdk/android/src/jni/audio_device/audio_record_jni.cc index 170c81af48..f5f10895eb 100644 --- a/sdk/android/src/jni/audio_device/audio_record_jni.cc +++ b/sdk/android/src/jni/audio_device/audio_record_jni.cc @@ -245,14 +245,15 @@ void AudioRecordJni::CacheDirectBufferAddress( // the thread is 'AudioRecordThread'. void AudioRecordJni::DataIsRecorded(JNIEnv* env, const JavaParamRef& j_caller, - int length) { + int length, + int64_t capture_timestamp_ns) { RTC_DCHECK(thread_checker_java_.IsCurrent()); if (!audio_device_buffer_) { RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called"; return; } - audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_, - frames_per_buffer_); + audio_device_buffer_->SetRecordedBuffer( + direct_buffer_address_, frames_per_buffer_, capture_timestamp_ns); // We provide one (combined) fixed delay estimate for the APM and use the // `playDelayMs` parameter only. Components like the AEC only sees the sum // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter. diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.h b/sdk/android/src/jni/audio_device/audio_record_jni.h index 1ff62f8dcf..49c905daaf 100644 --- a/sdk/android/src/jni/audio_device/audio_record_jni.h +++ b/sdk/android/src/jni/audio_device/audio_record_jni.h @@ -90,7 +90,8 @@ class AudioRecordJni : public AudioInput { // the thread is 'AudioRecordThread'. void DataIsRecorded(JNIEnv* env, const JavaParamRef& j_caller, - int length); + int length, + int64_t capture_timestamp_ns); private: // Stores thread ID in constructor.