Use low latency mode on Android O and later.
This CL makes it possible to use a low-latency mode on Android O and later. This should help to reduce the audio latency. The feature is disabled by default and needs to be enabled when creating the audio device module. Bug: webrtc:12284 Change-Id: Idf41146aa0bc1206e9a2e28e4101d85c3e4eaefc Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/196741 Reviewed-by: Sami Kalliomäki <sakal@webrtc.org> Reviewed-by: Henrik Andreassson <henrika@webrtc.org> Commit-Queue: Ivo Creusen <ivoc@webrtc.org> Cr-Commit-Position: refs/heads/master@{#32854}
This commit is contained in:
parent
4a541f15dd
commit
c25a3a3a1e
@ -425,6 +425,7 @@ if (is_android) {
|
|||||||
visibility = [ "*" ]
|
visibility = [ "*" ]
|
||||||
sources = [
|
sources = [
|
||||||
"api/org/webrtc/audio/JavaAudioDeviceModule.java",
|
"api/org/webrtc/audio/JavaAudioDeviceModule.java",
|
||||||
|
"src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java",
|
||||||
"src/java/org/webrtc/audio/VolumeLogger.java",
|
"src/java/org/webrtc/audio/VolumeLogger.java",
|
||||||
"src/java/org/webrtc/audio/WebRtcAudioEffects.java",
|
"src/java/org/webrtc/audio/WebRtcAudioEffects.java",
|
||||||
"src/java/org/webrtc/audio/WebRtcAudioManager.java",
|
"src/java/org/webrtc/audio/WebRtcAudioManager.java",
|
||||||
@ -1534,12 +1535,14 @@ if (is_android) {
|
|||||||
"tests/src/org/webrtc/IceCandidateTest.java",
|
"tests/src/org/webrtc/IceCandidateTest.java",
|
||||||
"tests/src/org/webrtc/RefCountDelegateTest.java",
|
"tests/src/org/webrtc/RefCountDelegateTest.java",
|
||||||
"tests/src/org/webrtc/ScalingSettingsTest.java",
|
"tests/src/org/webrtc/ScalingSettingsTest.java",
|
||||||
|
"tests/src/org/webrtc/audio/LowLatencyAudioBufferManagerTest.java",
|
||||||
]
|
]
|
||||||
|
|
||||||
deps = [
|
deps = [
|
||||||
":base_java",
|
":base_java",
|
||||||
":camera_java",
|
":camera_java",
|
||||||
":hwcodecs_java",
|
":hwcodecs_java",
|
||||||
|
":java_audio_device_module_java",
|
||||||
":libjingle_peerconnection_java",
|
":libjingle_peerconnection_java",
|
||||||
":peerconnection_java",
|
":peerconnection_java",
|
||||||
":video_api_java",
|
":video_api_java",
|
||||||
|
|||||||
@ -49,12 +49,14 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
|
|||||||
private boolean useStereoInput;
|
private boolean useStereoInput;
|
||||||
private boolean useStereoOutput;
|
private boolean useStereoOutput;
|
||||||
private AudioAttributes audioAttributes;
|
private AudioAttributes audioAttributes;
|
||||||
|
private boolean useLowLatency;
|
||||||
|
|
||||||
private Builder(Context context) {
|
private Builder(Context context) {
|
||||||
this.context = context;
|
this.context = context;
|
||||||
this.audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
this.audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||||
this.inputSampleRate = WebRtcAudioManager.getSampleRate(audioManager);
|
this.inputSampleRate = WebRtcAudioManager.getSampleRate(audioManager);
|
||||||
this.outputSampleRate = WebRtcAudioManager.getSampleRate(audioManager);
|
this.outputSampleRate = WebRtcAudioManager.getSampleRate(audioManager);
|
||||||
|
this.useLowLatency = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder setScheduler(ScheduledExecutorService scheduler) {
|
public Builder setScheduler(ScheduledExecutorService scheduler) {
|
||||||
@ -195,6 +197,14 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Control if the low-latency mode should be used. The default is disabled.
|
||||||
|
*/
|
||||||
|
public Builder setUseLowLatency(boolean useLowLatency) {
|
||||||
|
this.useLowLatency = useLowLatency;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set custom {@link AudioAttributes} to use.
|
* Set custom {@link AudioAttributes} to use.
|
||||||
*/
|
*/
|
||||||
@ -225,6 +235,12 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
|
|||||||
}
|
}
|
||||||
Logging.d(TAG, "HW AEC will not be used.");
|
Logging.d(TAG, "HW AEC will not be used.");
|
||||||
}
|
}
|
||||||
|
// Low-latency mode was introduced in API version 26, see
|
||||||
|
// https://developer.android.com/reference/android/media/AudioTrack#PERFORMANCE_MODE_LOW_LATENCY
|
||||||
|
final int MIN_LOW_LATENCY_SDK_VERSION = 26;
|
||||||
|
if (useLowLatency && Build.VERSION.SDK_INT >= MIN_LOW_LATENCY_SDK_VERSION) {
|
||||||
|
Logging.d(TAG, "Low latency mode will be used.");
|
||||||
|
}
|
||||||
ScheduledExecutorService executor = this.scheduler;
|
ScheduledExecutorService executor = this.scheduler;
|
||||||
if (executor == null) {
|
if (executor == null) {
|
||||||
executor = WebRtcAudioRecord.newDefaultScheduler();
|
executor = WebRtcAudioRecord.newDefaultScheduler();
|
||||||
@ -232,8 +248,8 @@ public class JavaAudioDeviceModule implements AudioDeviceModule {
|
|||||||
final WebRtcAudioRecord audioInput = new WebRtcAudioRecord(context, executor, audioManager,
|
final WebRtcAudioRecord audioInput = new WebRtcAudioRecord(context, executor, audioManager,
|
||||||
audioSource, audioFormat, audioRecordErrorCallback, audioRecordStateCallback,
|
audioSource, audioFormat, audioRecordErrorCallback, audioRecordStateCallback,
|
||||||
samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
|
samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
|
||||||
final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack(
|
final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack(context, audioManager,
|
||||||
context, audioManager, audioAttributes, audioTrackErrorCallback, audioTrackStateCallback);
|
audioAttributes, audioTrackErrorCallback, audioTrackStateCallback, useLowLatency);
|
||||||
return new JavaAudioDeviceModule(context, audioManager, audioInput, audioOutput,
|
return new JavaAudioDeviceModule(context, audioManager, audioInput, audioOutput,
|
||||||
inputSampleRate, outputSampleRate, useStereoInput, useStereoOutput);
|
inputSampleRate, outputSampleRate, useStereoInput, useStereoOutput);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -0,0 +1,81 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.webrtc.audio;
|
||||||
|
|
||||||
|
import android.media.AudioTrack;
|
||||||
|
import android.os.Build;
|
||||||
|
import org.webrtc.Logging;
|
||||||
|
|
||||||
|
// Lowers the buffer size if no underruns are detected for 100 ms. Once an
|
||||||
|
// underrun is detected, the buffer size is increased by 10 ms and it will not
|
||||||
|
// be lowered further. The buffer size will never be increased more than
|
||||||
|
// 5 times, to avoid the possibility of the buffer size increasing without
|
||||||
|
// bounds.
|
||||||
|
class LowLatencyAudioBufferManager {
|
||||||
|
private static final String TAG = "LowLatencyAudioBufferManager";
|
||||||
|
// The underrun count that was valid during the previous call to maybeAdjustBufferSize(). Used to
|
||||||
|
// detect increases in the value.
|
||||||
|
private int prevUnderrunCount;
|
||||||
|
// The number of ticks to wait without an underrun before decreasing the buffer size.
|
||||||
|
private int ticksUntilNextDecrease;
|
||||||
|
// Indicate if we should continue to decrease the buffer size.
|
||||||
|
private boolean keepLoweringBufferSize;
|
||||||
|
// How often the buffer size was increased.
|
||||||
|
private int bufferIncreaseCounter;
|
||||||
|
|
||||||
|
public LowLatencyAudioBufferManager() {
|
||||||
|
this.prevUnderrunCount = 0;
|
||||||
|
this.ticksUntilNextDecrease = 10;
|
||||||
|
this.keepLoweringBufferSize = true;
|
||||||
|
this.bufferIncreaseCounter = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void maybeAdjustBufferSize(AudioTrack audioTrack) {
|
||||||
|
if (Build.VERSION.SDK_INT >= 26) {
|
||||||
|
final int underrunCount = audioTrack.getUnderrunCount();
|
||||||
|
if (underrunCount > prevUnderrunCount) {
|
||||||
|
// Don't increase buffer more than 5 times. Continuing to increase the buffer size
|
||||||
|
// could be harmful on low-power devices that regularly experience underruns under
|
||||||
|
// normal conditions.
|
||||||
|
if (bufferIncreaseCounter < 5) {
|
||||||
|
// Underrun detected, increase buffer size by 10ms.
|
||||||
|
final int currentBufferSize = audioTrack.getBufferSizeInFrames();
|
||||||
|
final int newBufferSize = currentBufferSize + audioTrack.getPlaybackRate() / 100;
|
||||||
|
Logging.d(TAG,
|
||||||
|
"Underrun detected! Increasing AudioTrack buffer size from " + currentBufferSize
|
||||||
|
+ " to " + newBufferSize);
|
||||||
|
audioTrack.setBufferSizeInFrames(newBufferSize);
|
||||||
|
bufferIncreaseCounter++;
|
||||||
|
}
|
||||||
|
// Stop trying to lower the buffer size.
|
||||||
|
keepLoweringBufferSize = false;
|
||||||
|
prevUnderrunCount = underrunCount;
|
||||||
|
ticksUntilNextDecrease = 10;
|
||||||
|
} else if (keepLoweringBufferSize) {
|
||||||
|
ticksUntilNextDecrease--;
|
||||||
|
if (ticksUntilNextDecrease <= 0) {
|
||||||
|
// No underrun seen for 100 ms, try to lower the buffer size by 10ms.
|
||||||
|
final int bufferSize10ms = audioTrack.getPlaybackRate() / 100;
|
||||||
|
// Never go below a buffer size of 10ms.
|
||||||
|
final int currentBufferSize = audioTrack.getBufferSizeInFrames();
|
||||||
|
final int newBufferSize = Math.max(bufferSize10ms, currentBufferSize - bufferSize10ms);
|
||||||
|
if (newBufferSize != currentBufferSize) {
|
||||||
|
Logging.d(TAG,
|
||||||
|
"Lowering AudioTrack buffer size from " + currentBufferSize + " to "
|
||||||
|
+ newBufferSize);
|
||||||
|
audioTrack.setBufferSizeInFrames(newBufferSize);
|
||||||
|
}
|
||||||
|
ticksUntilNextDecrease = 10;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -19,7 +19,6 @@ import android.media.AudioTrack;
|
|||||||
import android.os.Build;
|
import android.os.Build;
|
||||||
import android.os.Process;
|
import android.os.Process;
|
||||||
import android.support.annotation.Nullable;
|
import android.support.annotation.Nullable;
|
||||||
import java.lang.Thread;
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import org.webrtc.CalledByNative;
|
import org.webrtc.CalledByNative;
|
||||||
import org.webrtc.Logging;
|
import org.webrtc.Logging;
|
||||||
@ -27,6 +26,7 @@ import org.webrtc.ThreadUtils;
|
|||||||
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback;
|
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback;
|
||||||
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStartErrorCode;
|
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStartErrorCode;
|
||||||
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStateCallback;
|
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStateCallback;
|
||||||
|
import org.webrtc.audio.LowLatencyAudioBufferManager;
|
||||||
|
|
||||||
class WebRtcAudioTrack {
|
class WebRtcAudioTrack {
|
||||||
private static final String TAG = "WebRtcAudioTrackExternal";
|
private static final String TAG = "WebRtcAudioTrackExternal";
|
||||||
@ -80,6 +80,8 @@ class WebRtcAudioTrack {
|
|||||||
// Can be used to ensure that the speaker is fully muted.
|
// Can be used to ensure that the speaker is fully muted.
|
||||||
private volatile boolean speakerMute;
|
private volatile boolean speakerMute;
|
||||||
private byte[] emptyBytes;
|
private byte[] emptyBytes;
|
||||||
|
private boolean useLowLatency;
|
||||||
|
private int initialBufferSizeInFrames;
|
||||||
|
|
||||||
private final @Nullable AudioTrackErrorCallback errorCallback;
|
private final @Nullable AudioTrackErrorCallback errorCallback;
|
||||||
private final @Nullable AudioTrackStateCallback stateCallback;
|
private final @Nullable AudioTrackStateCallback stateCallback;
|
||||||
@ -92,9 +94,11 @@ class WebRtcAudioTrack {
|
|||||||
*/
|
*/
|
||||||
private class AudioTrackThread extends Thread {
|
private class AudioTrackThread extends Thread {
|
||||||
private volatile boolean keepAlive = true;
|
private volatile boolean keepAlive = true;
|
||||||
|
private LowLatencyAudioBufferManager bufferManager;
|
||||||
|
|
||||||
public AudioTrackThread(String name) {
|
public AudioTrackThread(String name) {
|
||||||
super(name);
|
super(name);
|
||||||
|
bufferManager = new LowLatencyAudioBufferManager();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -134,6 +138,9 @@ class WebRtcAudioTrack {
|
|||||||
reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
|
reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (useLowLatency) {
|
||||||
|
bufferManager.maybeAdjustBufferSize(audioTrack);
|
||||||
|
}
|
||||||
// The byte buffer must be rewinded since byteBuffer.position() is
|
// The byte buffer must be rewinded since byteBuffer.position() is
|
||||||
// increased at each call to AudioTrack.write(). If we don't do this,
|
// increased at each call to AudioTrack.write(). If we don't do this,
|
||||||
// next call to AudioTrack.write() will fail.
|
// next call to AudioTrack.write() will fail.
|
||||||
@ -164,12 +171,12 @@ class WebRtcAudioTrack {
|
|||||||
@CalledByNative
|
@CalledByNative
|
||||||
WebRtcAudioTrack(Context context, AudioManager audioManager) {
|
WebRtcAudioTrack(Context context, AudioManager audioManager) {
|
||||||
this(context, audioManager, null /* audioAttributes */, null /* errorCallback */,
|
this(context, audioManager, null /* audioAttributes */, null /* errorCallback */,
|
||||||
null /* stateCallback */);
|
null /* stateCallback */, false /* useLowLatency */);
|
||||||
}
|
}
|
||||||
|
|
||||||
WebRtcAudioTrack(Context context, AudioManager audioManager,
|
WebRtcAudioTrack(Context context, AudioManager audioManager,
|
||||||
@Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback,
|
@Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback,
|
||||||
@Nullable AudioTrackStateCallback stateCallback) {
|
@Nullable AudioTrackStateCallback stateCallback, boolean useLowLatency) {
|
||||||
threadChecker.detachThread();
|
threadChecker.detachThread();
|
||||||
this.context = context;
|
this.context = context;
|
||||||
this.audioManager = audioManager;
|
this.audioManager = audioManager;
|
||||||
@ -177,6 +184,7 @@ class WebRtcAudioTrack {
|
|||||||
this.errorCallback = errorCallback;
|
this.errorCallback = errorCallback;
|
||||||
this.stateCallback = stateCallback;
|
this.stateCallback = stateCallback;
|
||||||
this.volumeLogger = new VolumeLogger(audioManager);
|
this.volumeLogger = new VolumeLogger(audioManager);
|
||||||
|
this.useLowLatency = useLowLatency;
|
||||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,6 +226,13 @@ class WebRtcAudioTrack {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Don't use low-latency mode when a bufferSizeFactor > 1 is used. When bufferSizeFactor > 1
|
||||||
|
// we want to use a larger buffer to prevent underruns. However, low-latency mode would
|
||||||
|
// decrease the buffer size, which makes the bufferSizeFactor have no effect.
|
||||||
|
if (bufferSizeFactor > 1.0) {
|
||||||
|
useLowLatency = false;
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that prevision audio session was stopped correctly before trying
|
// Ensure that prevision audio session was stopped correctly before trying
|
||||||
// to create a new AudioTrack.
|
// to create a new AudioTrack.
|
||||||
if (audioTrack != null) {
|
if (audioTrack != null) {
|
||||||
@ -228,7 +243,11 @@ class WebRtcAudioTrack {
|
|||||||
// Create an AudioTrack object and initialize its associated audio buffer.
|
// Create an AudioTrack object and initialize its associated audio buffer.
|
||||||
// The size of this buffer determines how long an AudioTrack can play
|
// The size of this buffer determines how long an AudioTrack can play
|
||||||
// before running out of data.
|
// before running out of data.
|
||||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
|
if (useLowLatency && Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
// On API level 26 or higher, we can use a low latency mode.
|
||||||
|
audioTrack = createAudioTrackOnOreoOrHigher(
|
||||||
|
sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes);
|
||||||
|
} else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
|
||||||
// If we are on API level 21 or higher, it is possible to use a special AudioTrack
|
// If we are on API level 21 or higher, it is possible to use a special AudioTrack
|
||||||
// constructor that uses AudioAttributes and AudioFormat as input. It allows us to
|
// constructor that uses AudioAttributes and AudioFormat as input. It allows us to
|
||||||
// supersede the notion of stream types for defining the behavior of audio playback,
|
// supersede the notion of stream types for defining the behavior of audio playback,
|
||||||
@ -255,6 +274,11 @@ class WebRtcAudioTrack {
|
|||||||
releaseAudioResources();
|
releaseAudioResources();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
|
||||||
|
initialBufferSizeInFrames = audioTrack.getBufferSizeInFrames();
|
||||||
|
} else {
|
||||||
|
initialBufferSizeInFrames = -1;
|
||||||
|
}
|
||||||
logMainParameters();
|
logMainParameters();
|
||||||
logMainParametersExtended();
|
logMainParametersExtended();
|
||||||
return minBufferSizeInBytes;
|
return minBufferSizeInBytes;
|
||||||
@ -382,22 +406,16 @@ class WebRtcAudioTrack {
|
|||||||
+ "max gain: " + AudioTrack.getMaxVolume());
|
+ "max gain: " + AudioTrack.getMaxVolume());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
|
private static void logNativeOutputSampleRate(int requestedSampleRateInHz) {
|
||||||
// It allows certain platforms or routing policies to use this information for more
|
|
||||||
// refined volume or routing decisions.
|
|
||||||
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
|
|
||||||
private static AudioTrack createAudioTrackOnLollipopOrHigher(int sampleRateInHz,
|
|
||||||
int channelConfig, int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
|
|
||||||
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
|
|
||||||
// TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
|
|
||||||
// performance when Android O is supported. Add some logging in the mean time.
|
|
||||||
final int nativeOutputSampleRate =
|
final int nativeOutputSampleRate =
|
||||||
AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
|
AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
|
||||||
Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
|
Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
|
||||||
if (sampleRateInHz != nativeOutputSampleRate) {
|
if (requestedSampleRateInHz != nativeOutputSampleRate) {
|
||||||
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
|
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static AudioAttributes getAudioAttributes(@Nullable AudioAttributes overrideAttributes) {
|
||||||
AudioAttributes.Builder attributesBuilder =
|
AudioAttributes.Builder attributesBuilder =
|
||||||
new AudioAttributes.Builder()
|
new AudioAttributes.Builder()
|
||||||
.setUsage(DEFAULT_USAGE)
|
.setUsage(DEFAULT_USAGE)
|
||||||
@ -417,9 +435,20 @@ class WebRtcAudioTrack {
|
|||||||
attributesBuilder = applyAttributesOnQOrHigher(attributesBuilder, overrideAttributes);
|
attributesBuilder = applyAttributesOnQOrHigher(attributesBuilder, overrideAttributes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return attributesBuilder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
|
||||||
|
// It allows certain platforms or routing policies to use this information for more
|
||||||
|
// refined volume or routing decisions.
|
||||||
|
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
|
||||||
|
private static AudioTrack createAudioTrackOnLollipopOrHigher(int sampleRateInHz,
|
||||||
|
int channelConfig, int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
|
||||||
|
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
|
||||||
|
logNativeOutputSampleRate(sampleRateInHz);
|
||||||
|
|
||||||
// Create an audio track where the audio usage is for VoIP and the content type is speech.
|
// Create an audio track where the audio usage is for VoIP and the content type is speech.
|
||||||
return new AudioTrack(attributesBuilder.build(),
|
return new AudioTrack(getAudioAttributes(overrideAttributes),
|
||||||
new AudioFormat.Builder()
|
new AudioFormat.Builder()
|
||||||
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
|
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
|
||||||
.setSampleRate(sampleRateInHz)
|
.setSampleRate(sampleRateInHz)
|
||||||
@ -428,6 +457,32 @@ class WebRtcAudioTrack {
|
|||||||
bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
|
bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
|
||||||
|
// Use the low-latency mode to improve audio latency. Note that the low-latency mode may
|
||||||
|
// prevent effects (such as AEC) from working. Assuming AEC is working, the delay changes
|
||||||
|
// that happen in low-latency mode during the call will cause the AEC to perform worse.
|
||||||
|
// The behavior of the low-latency mode may be device dependent, use at your own risk.
|
||||||
|
@TargetApi(Build.VERSION_CODES.O)
|
||||||
|
private static AudioTrack createAudioTrackOnOreoOrHigher(int sampleRateInHz, int channelConfig,
|
||||||
|
int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) {
|
||||||
|
Logging.d(TAG, "createAudioTrackOnOreoOrHigher");
|
||||||
|
logNativeOutputSampleRate(sampleRateInHz);
|
||||||
|
|
||||||
|
// Create an audio track where the audio usage is for VoIP and the content type is speech.
|
||||||
|
return new AudioTrack.Builder()
|
||||||
|
.setAudioAttributes(getAudioAttributes(overrideAttributes))
|
||||||
|
.setAudioFormat(new AudioFormat.Builder()
|
||||||
|
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
|
||||||
|
.setSampleRate(sampleRateInHz)
|
||||||
|
.setChannelMask(channelConfig)
|
||||||
|
.build())
|
||||||
|
.setBufferSizeInBytes(bufferSizeInBytes)
|
||||||
|
.setPerformanceMode(AudioTrack.PERFORMANCE_MODE_LOW_LATENCY)
|
||||||
|
.setTransferMode(AudioTrack.MODE_STREAM)
|
||||||
|
.setSessionId(AudioManager.AUDIO_SESSION_ID_GENERATE)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
@TargetApi(Build.VERSION_CODES.Q)
|
@TargetApi(Build.VERSION_CODES.Q)
|
||||||
private static AudioAttributes.Builder applyAttributesOnQOrHigher(
|
private static AudioAttributes.Builder applyAttributesOnQOrHigher(
|
||||||
AudioAttributes.Builder builder, AudioAttributes overrideAttributes) {
|
AudioAttributes.Builder builder, AudioAttributes overrideAttributes) {
|
||||||
@ -458,6 +513,11 @@ class WebRtcAudioTrack {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@CalledByNative
|
||||||
|
private int getInitialBufferSizeInFrames() {
|
||||||
|
return initialBufferSizeInFrames;
|
||||||
|
}
|
||||||
|
|
||||||
private void logBufferCapacityInFrames() {
|
private void logBufferCapacityInFrames() {
|
||||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||||
Logging.d(TAG,
|
Logging.d(TAG,
|
||||||
|
|||||||
@ -151,6 +151,18 @@ int32_t AudioTrackJni::StopPlayout() {
|
|||||||
if (!initialized_ || !playing_) {
|
if (!initialized_ || !playing_) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
// Log the difference in initial and current buffer level.
|
||||||
|
const int current_buffer_size_frames =
|
||||||
|
Java_WebRtcAudioTrack_getBufferSizeInFrames(env_, j_audio_track_);
|
||||||
|
const int initial_buffer_size_frames =
|
||||||
|
Java_WebRtcAudioTrack_getInitialBufferSizeInFrames(env_, j_audio_track_);
|
||||||
|
const int sample_rate_hz = audio_parameters_.sample_rate();
|
||||||
|
RTC_HISTOGRAM_COUNTS(
|
||||||
|
"WebRTC.Audio.AndroidNativeAudioBufferSizeDifferenceFromInitialMs",
|
||||||
|
(current_buffer_size_frames - initial_buffer_size_frames) * 1000 /
|
||||||
|
sample_rate_hz,
|
||||||
|
-500, 100, 100);
|
||||||
|
|
||||||
if (!Java_WebRtcAudioTrack_stopPlayout(env_, j_audio_track_)) {
|
if (!Java_WebRtcAudioTrack_stopPlayout(env_, j_audio_track_)) {
|
||||||
RTC_LOG(LS_ERROR) << "StopPlayout failed";
|
RTC_LOG(LS_ERROR) << "StopPlayout failed";
|
||||||
return -1;
|
return -1;
|
||||||
|
|||||||
@ -0,0 +1,104 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.webrtc.audio;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.mockito.AdditionalMatchers.gt;
|
||||||
|
import static org.mockito.AdditionalMatchers.lt;
|
||||||
|
import static org.mockito.ArgumentMatchers.anyInt;
|
||||||
|
import static org.mockito.Mockito.times;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import android.media.AudioTrack;
|
||||||
|
import android.os.Build;
|
||||||
|
import org.chromium.testing.local.LocalRobolectricTestRunner;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.MockitoAnnotations;
|
||||||
|
import org.robolectric.annotation.Config;
|
||||||
|
import org.webrtc.audio.LowLatencyAudioBufferManager;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests for LowLatencyAudioBufferManager.
|
||||||
|
*/
|
||||||
|
@RunWith(LocalRobolectricTestRunner.class)
|
||||||
|
@Config(manifest = Config.NONE, sdk = Build.VERSION_CODES.O)
|
||||||
|
public class LowLatencyAudioBufferManagerTest {
|
||||||
|
@Mock private AudioTrack mockAudioTrack;
|
||||||
|
private LowLatencyAudioBufferManager bufferManager;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
MockitoAnnotations.initMocks(this);
|
||||||
|
bufferManager = new LowLatencyAudioBufferManager();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBufferSizeDecrease() {
|
||||||
|
when(mockAudioTrack.getUnderrunCount()).thenReturn(0);
|
||||||
|
when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(100);
|
||||||
|
when(mockAudioTrack.getPlaybackRate()).thenReturn(1000);
|
||||||
|
for (int i = 0; i < 9; i++) {
|
||||||
|
bufferManager.maybeAdjustBufferSize(mockAudioTrack);
|
||||||
|
}
|
||||||
|
// Check that the buffer size was not changed yet.
|
||||||
|
verify(mockAudioTrack, times(0)).setBufferSizeInFrames(anyInt());
|
||||||
|
// After the 10th call without underruns, we expect the buffer size to decrease.
|
||||||
|
bufferManager.maybeAdjustBufferSize(mockAudioTrack);
|
||||||
|
// The expected size is 10ms below the existing size, which works out to 100 - (1000 / 100)
|
||||||
|
// = 90.
|
||||||
|
verify(mockAudioTrack, times(1)).setBufferSizeInFrames(90);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBufferSizeNeverBelow10ms() {
|
||||||
|
when(mockAudioTrack.getUnderrunCount()).thenReturn(0);
|
||||||
|
when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(11);
|
||||||
|
when(mockAudioTrack.getPlaybackRate()).thenReturn(1000);
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
bufferManager.maybeAdjustBufferSize(mockAudioTrack);
|
||||||
|
}
|
||||||
|
// Check that the buffer size was not set to a value below 10 ms.
|
||||||
|
verify(mockAudioTrack, times(0)).setBufferSizeInFrames(lt(10));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnderrunBehavior() {
|
||||||
|
when(mockAudioTrack.getUnderrunCount()).thenReturn(1);
|
||||||
|
when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(100);
|
||||||
|
when(mockAudioTrack.getPlaybackRate()).thenReturn(1000);
|
||||||
|
bufferManager.maybeAdjustBufferSize(mockAudioTrack);
|
||||||
|
// Check that the buffer size was increased after the underrrun.
|
||||||
|
verify(mockAudioTrack, times(1)).setBufferSizeInFrames(gt(100));
|
||||||
|
when(mockAudioTrack.getUnderrunCount()).thenReturn(0);
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
bufferManager.maybeAdjustBufferSize(mockAudioTrack);
|
||||||
|
}
|
||||||
|
// Check that the buffer size was not changed again, even though there were no underruns for
|
||||||
|
// 10 calls.
|
||||||
|
verify(mockAudioTrack, times(1)).setBufferSizeInFrames(anyInt());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBufferIncrease() {
|
||||||
|
when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(100);
|
||||||
|
when(mockAudioTrack.getPlaybackRate()).thenReturn(1000);
|
||||||
|
for (int i = 1; i < 30; i++) {
|
||||||
|
when(mockAudioTrack.getUnderrunCount()).thenReturn(i);
|
||||||
|
bufferManager.maybeAdjustBufferSize(mockAudioTrack);
|
||||||
|
}
|
||||||
|
// Check that the buffer size was not increased more than 5 times.
|
||||||
|
verify(mockAudioTrack, times(5)).setBufferSizeInFrames(gt(100));
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
x
Reference in New Issue
Block a user