Android: Add AudioDeviceModule interface and clean up implementation code
This CL introduces sdk/android/api/org/webrtc/audio/AudioDeviceModule.java, which is the new interface for audio device modules on Android. This CL also refactors the main AudioDeviceModule implementation, which is sdk/android/api/org/webrtc/audio/JavaAudioDeviceModule.java and makes it conform to the new interface. The old code used global static methods to configure the audio device code. This CL gets rid of all that and uses a builder pattern in JavaAudioDeviceModule instead. The only two dynamic methods left in the interface are setSpeakerMute() and setMicrophoneMute(). Removing the global static methods allowed a significant cleanup, and e.g. the file sdk/android/src/jni/audio_device/audio_manager.cc has been completely removed. The PeerConnectionFactory interface is also updated to allow passing in an external AudioDeviceModule. The current built-in ADM is encapsulated under LegacyAudioDeviceModule.java, which is the default for now to ensure backwards compatibility. Bug: webrtc:7452 Change-Id: I64d5f4dba9a004da001f1acb2bd0c1b1f2b64f21 Reviewed-on: https://webrtc-review.googlesource.com/65360 Commit-Queue: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Paulina Hensman <phensman@webrtc.org> Cr-Commit-Position: refs/heads/master@{#22765}
This commit is contained in:
parent
3ab5c40f72
commit
66f1e9eb34
@ -71,6 +71,10 @@ import org.webrtc.VideoSink;
|
||||
import org.webrtc.VideoSource;
|
||||
import org.webrtc.VideoTrack;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordErrorCallback;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback;
|
||||
import org.webrtc.audio.LegacyAudioDeviceModule;
|
||||
import org.webrtc.audio.AudioDeviceModule;
|
||||
import org.webrtc.voiceengine.WebRtcAudioManager;
|
||||
import org.webrtc.voiceengine.WebRtcAudioRecord;
|
||||
import org.webrtc.voiceengine.WebRtcAudioRecord.AudioRecordStartErrorCode;
|
||||
@ -106,8 +110,6 @@ public class PeerConnectionClient {
|
||||
"WebRTC-H264HighProfile/Enabled/";
|
||||
private static final String DISABLE_WEBRTC_AGC_FIELDTRIAL =
|
||||
"WebRTC-Audio-MinimizeResamplingOnMobile/Enabled/";
|
||||
private static final String EXTERNAL_ANDROID_AUDIO_DEVICE_FIELDTRIAL =
|
||||
"WebRTC-ExternalAndroidAudioDevice/Enabled/";
|
||||
private static final String AUDIO_CODEC_PARAM_BITRATE = "maxaveragebitrate";
|
||||
private static final String AUDIO_ECHO_CANCELLATION_CONSTRAINT = "googEchoCancellation";
|
||||
private static final String AUDIO_AUTO_GAIN_CONTROL_CONSTRAINT = "googAutoGainControl";
|
||||
@ -428,7 +430,6 @@ public class PeerConnectionClient {
|
||||
Log.d(TAG, "Disable WebRTC AGC field trial.");
|
||||
}
|
||||
if (!peerConnectionParameters.useLegacyAudioDevice) {
|
||||
fieldTrials += EXTERNAL_ANDROID_AUDIO_DEVICE_FIELDTRIAL;
|
||||
Log.d(TAG, "Enable WebRTC external Android audio device field trial.");
|
||||
}
|
||||
|
||||
@ -492,11 +493,10 @@ public class PeerConnectionClient {
|
||||
}
|
||||
}
|
||||
|
||||
if (peerConnectionParameters.useLegacyAudioDevice) {
|
||||
setupAudioDeviceLegacy();
|
||||
} else {
|
||||
setupAudioDevice();
|
||||
}
|
||||
final AudioDeviceModule adm = peerConnectionParameters.useLegacyAudioDevice
|
||||
? createLegacyAudioDevice()
|
||||
: createJavaAudioDevice();
|
||||
|
||||
// Create peer connection factory.
|
||||
if (options != null) {
|
||||
Log.d(TAG, "Factory networkIgnoreMask option: " + options.networkIgnoreMask);
|
||||
@ -515,11 +515,16 @@ public class PeerConnectionClient {
|
||||
decoderFactory = new SoftwareVideoDecoderFactory();
|
||||
}
|
||||
|
||||
factory = new PeerConnectionFactory(options, encoderFactory, decoderFactory);
|
||||
factory = PeerConnectionFactory.builder()
|
||||
.setOptions(options)
|
||||
.setAudioDeviceModule(adm)
|
||||
.setVideoEncoderFactory(encoderFactory)
|
||||
.setVideoDecoderFactory(decoderFactory)
|
||||
.createPeerConnectionFactory();
|
||||
Log.d(TAG, "Peer connection factory created.");
|
||||
}
|
||||
|
||||
void setupAudioDeviceLegacy() {
|
||||
AudioDeviceModule createLegacyAudioDevice() {
|
||||
// Enable/disable OpenSL ES playback.
|
||||
if (!peerConnectionParameters.useOpenSLES) {
|
||||
Log.d(TAG, "Disable OpenSL ES audio even if device supports it");
|
||||
@ -589,37 +594,19 @@ public class PeerConnectionClient {
|
||||
reportError(errorMessage);
|
||||
}
|
||||
});
|
||||
|
||||
return new LegacyAudioDeviceModule();
|
||||
}
|
||||
|
||||
void setupAudioDevice() {
|
||||
AudioDeviceModule createJavaAudioDevice() {
|
||||
// Enable/disable OpenSL ES playback.
|
||||
if (!peerConnectionParameters.useOpenSLES) {
|
||||
Log.d(TAG, "Disable OpenSL ES audio even if device supports it");
|
||||
} else {
|
||||
Log.d(TAG, "Allow OpenSL ES audio if device supports it");
|
||||
Log.w(TAG, "External OpenSLES ADM not implemented yet.");
|
||||
// TODO(magjed): Add support for external OpenSLES ADM.
|
||||
}
|
||||
|
||||
if (peerConnectionParameters.disableBuiltInAEC) {
|
||||
Log.d(TAG, "Disable built-in AEC even if device supports it");
|
||||
JavaAudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(true);
|
||||
} else {
|
||||
Log.d(TAG, "Enable built-in AEC if device supports it");
|
||||
JavaAudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(false);
|
||||
}
|
||||
|
||||
if (peerConnectionParameters.disableBuiltInNS) {
|
||||
Log.d(TAG, "Disable built-in NS even if device supports it");
|
||||
JavaAudioDeviceModule.setWebRtcBasedNoiseSuppressor(true);
|
||||
} else {
|
||||
Log.d(TAG, "Enable built-in NS if device supports it");
|
||||
JavaAudioDeviceModule.setWebRtcBasedNoiseSuppressor(false);
|
||||
}
|
||||
|
||||
JavaAudioDeviceModule.setOnAudioSamplesReady(saveRecordedAudioToFile);
|
||||
|
||||
// Set audio record error callbacks.
|
||||
JavaAudioDeviceModule.setErrorCallback(new JavaAudioDeviceModule.AudioRecordErrorCallback() {
|
||||
AudioRecordErrorCallback audioRecordErrorCallback = new AudioRecordErrorCallback() {
|
||||
@Override
|
||||
public void onWebRtcAudioRecordInitError(String errorMessage) {
|
||||
Log.e(TAG, "onWebRtcAudioRecordInitError: " + errorMessage);
|
||||
@ -638,9 +625,9 @@ public class PeerConnectionClient {
|
||||
Log.e(TAG, "onWebRtcAudioRecordError: " + errorMessage);
|
||||
reportError(errorMessage);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
JavaAudioDeviceModule.setErrorCallback(new JavaAudioDeviceModule.AudioTrackErrorCallback() {
|
||||
AudioTrackErrorCallback audioTrackErrorCallback = new AudioTrackErrorCallback() {
|
||||
@Override
|
||||
public void onWebRtcAudioTrackInitError(String errorMessage) {
|
||||
Log.e(TAG, "onWebRtcAudioTrackInitError: " + errorMessage);
|
||||
@ -659,7 +646,16 @@ public class PeerConnectionClient {
|
||||
Log.e(TAG, "onWebRtcAudioTrackError: " + errorMessage);
|
||||
reportError(errorMessage);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return JavaAudioDeviceModule.builder(appContext)
|
||||
.setSamplesReadyCallback(saveRecordedAudioToFile)
|
||||
.setUseHardwareAcousticEchoCanceler(peerConnectionParameters.disableBuiltInAEC)
|
||||
.setUseHardwareNoiseSuppressor(peerConnectionParameters.disableBuiltInNS)
|
||||
.setAudioRecordErrorCallback(audioRecordErrorCallback)
|
||||
.setAudioTrackErrorCallback(audioTrackErrorCallback)
|
||||
.setSamplesReadyCallback(saveRecordedAudioToFile)
|
||||
.createAudioDeviceModule();
|
||||
}
|
||||
|
||||
private void createMediaConstraintsInternal() {
|
||||
|
||||
@ -18,7 +18,6 @@ import android.preference.ListPreference;
|
||||
import android.preference.Preference;
|
||||
import org.webrtc.Camera2Enumerator;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule;
|
||||
import org.webrtc.voiceengine.WebRtcAudioUtils;
|
||||
|
||||
/**
|
||||
* Settings activity for AppRTC.
|
||||
@ -174,56 +173,26 @@ public class SettingsActivity extends Activity implements OnSharedPreferenceChan
|
||||
camera2Preference.setEnabled(false);
|
||||
}
|
||||
|
||||
// Disable forcing WebRTC based AEC so it won't affect our value.
|
||||
// Otherwise, if it was enabled, isAcousticEchoCancelerSupported would always return false.
|
||||
if (sharedPreferences.getBoolean(keyprefUseLegacyAudioDevice, false)) {
|
||||
WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(false);
|
||||
if (!WebRtcAudioUtils.isAcousticEchoCancelerSupported()) {
|
||||
Preference disableBuiltInAECPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInAEC);
|
||||
if (!JavaAudioDeviceModule.isBuiltInAcousticEchoCancelerSupported()) {
|
||||
Preference disableBuiltInAECPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInAEC);
|
||||
|
||||
disableBuiltInAECPreference.setSummary(getString(R.string.pref_built_in_aec_not_available));
|
||||
disableBuiltInAECPreference.setEnabled(false);
|
||||
}
|
||||
disableBuiltInAECPreference.setSummary(getString(R.string.pref_built_in_aec_not_available));
|
||||
disableBuiltInAECPreference.setEnabled(false);
|
||||
}
|
||||
|
||||
Preference disableBuiltInAGCPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInAGC);
|
||||
Preference disableBuiltInAGCPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInAGC);
|
||||
|
||||
disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
|
||||
disableBuiltInAGCPreference.setEnabled(false);
|
||||
disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
|
||||
disableBuiltInAGCPreference.setEnabled(false);
|
||||
|
||||
WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(false);
|
||||
if (!WebRtcAudioUtils.isNoiseSuppressorSupported()) {
|
||||
Preference disableBuiltInNSPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInNS);
|
||||
if (!JavaAudioDeviceModule.isBuiltInNoiseSuppressorSupported()) {
|
||||
Preference disableBuiltInNSPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInNS);
|
||||
|
||||
disableBuiltInNSPreference.setSummary(getString(R.string.pref_built_in_ns_not_available));
|
||||
disableBuiltInNSPreference.setEnabled(false);
|
||||
}
|
||||
} else {
|
||||
JavaAudioDeviceModule.setWebRtcBasedAcousticEchoCanceler(false);
|
||||
if (!JavaAudioDeviceModule.isAcousticEchoCancelerSupported()) {
|
||||
Preference disableBuiltInAECPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInAEC);
|
||||
|
||||
disableBuiltInAECPreference.setSummary(getString(R.string.pref_built_in_aec_not_available));
|
||||
disableBuiltInAECPreference.setEnabled(false);
|
||||
}
|
||||
|
||||
Preference disableBuiltInAGCPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInAGC);
|
||||
|
||||
disableBuiltInAGCPreference.setSummary(getString(R.string.pref_built_in_agc_not_available));
|
||||
disableBuiltInAGCPreference.setEnabled(false);
|
||||
|
||||
JavaAudioDeviceModule.setWebRtcBasedNoiseSuppressor(false);
|
||||
if (!JavaAudioDeviceModule.isNoiseSuppressorSupported()) {
|
||||
Preference disableBuiltInNSPreference =
|
||||
settingsFragment.findPreference(keyprefDisableBuiltInNS);
|
||||
|
||||
disableBuiltInNSPreference.setSummary(getString(R.string.pref_built_in_ns_not_available));
|
||||
disableBuiltInNSPreference.setEnabled(false);
|
||||
}
|
||||
disableBuiltInNSPreference.setSummary(getString(R.string.pref_built_in_ns_not_available));
|
||||
disableBuiltInNSPreference.setEnabled(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -129,7 +129,6 @@ rtc_source_set("native_api_audio_device_module") {
|
||||
]
|
||||
|
||||
deps = [
|
||||
":audio_device_base_jni",
|
||||
":base_jni",
|
||||
":java_audio_device_jni",
|
||||
":opensles_audio_device_jni",
|
||||
@ -156,10 +155,6 @@ rtc_source_set("audio_device_base_jni") {
|
||||
"src/jni/audio_device/audio_common.h",
|
||||
"src/jni/audio_device/audio_device_module.cc",
|
||||
"src/jni/audio_device/audio_device_module.h",
|
||||
"src/jni/audio_device/audio_manager.cc",
|
||||
"src/jni/audio_device/audio_manager.h",
|
||||
"src/jni/audio_device/build_info.cc",
|
||||
"src/jni/audio_device/build_info.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
@ -167,7 +162,6 @@ rtc_source_set("audio_device_base_jni") {
|
||||
":generated_audio_device_base_jni",
|
||||
":native_api_jni",
|
||||
"../../api:optional",
|
||||
"../../modules/audio_device:audio_device",
|
||||
"../../modules/audio_device:audio_device_buffer",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:rtc_base_approved",
|
||||
@ -234,6 +228,7 @@ rtc_source_set("java_audio_device_jni") {
|
||||
"src/jni/audio_device/audio_record_jni.h",
|
||||
"src/jni/audio_device/audio_track_jni.cc",
|
||||
"src/jni/audio_device/audio_track_jni.h",
|
||||
"src/jni/audio_device/java_audio_device_module.cc",
|
||||
]
|
||||
deps = [
|
||||
":audio_device_base_jni",
|
||||
@ -260,7 +255,6 @@ rtc_static_library("null_audio_jni") {
|
||||
|
||||
generate_jni("generated_audio_device_base_jni") {
|
||||
sources = [
|
||||
"src/java/org/webrtc/audio/BuildInfo.java",
|
||||
"src/java/org/webrtc/audio/WebRtcAudioManager.java",
|
||||
]
|
||||
jni_package = ""
|
||||
@ -269,6 +263,7 @@ generate_jni("generated_audio_device_base_jni") {
|
||||
|
||||
generate_jni("generated_java_audio_device_jni") {
|
||||
sources = [
|
||||
"api/org/webrtc/audio/JavaAudioDeviceModule.java",
|
||||
"src/java/org/webrtc/audio/WebRtcAudioRecord.java",
|
||||
"src/java/org/webrtc/audio/WebRtcAudioTrack.java",
|
||||
]
|
||||
@ -611,7 +606,6 @@ rtc_static_library("peerconnection_jni") {
|
||||
}
|
||||
|
||||
deps = [
|
||||
":audio_device_base_jni",
|
||||
":base_jni",
|
||||
":generated_external_classes_jni",
|
||||
":generated_peerconnection_jni",
|
||||
@ -806,9 +800,10 @@ rtc_android_library("video_api_java") {
|
||||
|
||||
rtc_android_library("audio_java") {
|
||||
java_files = [
|
||||
"api/org/webrtc/audio/AudioDeviceModule.java",
|
||||
"api/org/webrtc/audio/JavaAudioDeviceModule.java",
|
||||
"api/org/webrtc/audio/LegacyAudioDeviceModule.java",
|
||||
"src/java/org/webrtc/audio/VolumeLogger.java",
|
||||
"src/java/org/webrtc/audio/BuildInfo.java",
|
||||
"src/java/org/webrtc/audio/WebRtcAudioEffects.java",
|
||||
"src/java/org/webrtc/audio/WebRtcAudioManager.java",
|
||||
"src/java/org/webrtc/audio/WebRtcAudioRecord.java",
|
||||
@ -818,6 +813,7 @@ rtc_android_library("audio_java") {
|
||||
|
||||
deps = [
|
||||
":base_java",
|
||||
"//modules/audio_device:audio_device_java",
|
||||
"//rtc_base:base_java",
|
||||
]
|
||||
}
|
||||
|
||||
@ -13,6 +13,8 @@ package org.webrtc;
|
||||
import android.content.Context;
|
||||
import java.util.List;
|
||||
import javax.annotation.Nullable;
|
||||
import org.webrtc.audio.AudioDeviceModule;
|
||||
import org.webrtc.audio.LegacyAudioDeviceModule;
|
||||
|
||||
/**
|
||||
* Java wrapper for a C++ PeerConnectionFactoryInterface. Main entry point to
|
||||
@ -132,6 +134,7 @@ public class PeerConnectionFactory {
|
||||
|
||||
public static class Builder {
|
||||
private @Nullable Options options;
|
||||
private @Nullable AudioDeviceModule audioDeviceModule = new LegacyAudioDeviceModule();
|
||||
private @Nullable VideoEncoderFactory encoderFactory;
|
||||
private @Nullable VideoDecoderFactory decoderFactory;
|
||||
private @Nullable AudioProcessingFactory audioProcessingFactory;
|
||||
@ -144,6 +147,11 @@ public class PeerConnectionFactory {
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setAudioDeviceModule(AudioDeviceModule audioDeviceModule) {
|
||||
this.audioDeviceModule = audioDeviceModule;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setVideoEncoderFactory(VideoEncoderFactory encoderFactory) {
|
||||
this.encoderFactory = encoderFactory;
|
||||
return this;
|
||||
@ -170,7 +178,7 @@ public class PeerConnectionFactory {
|
||||
}
|
||||
|
||||
public PeerConnectionFactory createPeerConnectionFactory() {
|
||||
return new PeerConnectionFactory(options, encoderFactory, decoderFactory,
|
||||
return new PeerConnectionFactory(options, audioDeviceModule, encoderFactory, decoderFactory,
|
||||
audioProcessingFactory, fecControllerFactoryFactory);
|
||||
}
|
||||
}
|
||||
@ -255,8 +263,8 @@ public class PeerConnectionFactory {
|
||||
public PeerConnectionFactory(
|
||||
Options options, VideoEncoderFactory encoderFactory, VideoDecoderFactory decoderFactory) {
|
||||
checkInitializeHasBeenCalled();
|
||||
nativeFactory = nativeCreatePeerConnectionFactory(
|
||||
ContextUtils.getApplicationContext(), options, encoderFactory, decoderFactory, 0, 0);
|
||||
nativeFactory = nativeCreatePeerConnectionFactory(ContextUtils.getApplicationContext(), options,
|
||||
0 /* audioDeviceModule */, encoderFactory, decoderFactory, 0, 0);
|
||||
if (nativeFactory == 0) {
|
||||
throw new RuntimeException("Failed to initialize PeerConnectionFactory!");
|
||||
}
|
||||
@ -265,16 +273,17 @@ public class PeerConnectionFactory {
|
||||
@Deprecated
|
||||
public PeerConnectionFactory(Options options, VideoEncoderFactory encoderFactory,
|
||||
VideoDecoderFactory decoderFactory, AudioProcessingFactory audioProcessingFactory) {
|
||||
this(options, encoderFactory, decoderFactory, audioProcessingFactory,
|
||||
null /* fecControllerFactoryFactory */);
|
||||
this(options, new LegacyAudioDeviceModule(), encoderFactory, decoderFactory,
|
||||
audioProcessingFactory, null /* fecControllerFactoryFactory */);
|
||||
}
|
||||
|
||||
private PeerConnectionFactory(Options options, @Nullable VideoEncoderFactory encoderFactory,
|
||||
@Nullable VideoDecoderFactory decoderFactory,
|
||||
private PeerConnectionFactory(Options options, @Nullable AudioDeviceModule audioDeviceModule,
|
||||
@Nullable VideoEncoderFactory encoderFactory, @Nullable VideoDecoderFactory decoderFactory,
|
||||
@Nullable AudioProcessingFactory audioProcessingFactory,
|
||||
@Nullable FecControllerFactoryFactoryInterface fecControllerFactoryFactory) {
|
||||
checkInitializeHasBeenCalled();
|
||||
nativeFactory = nativeCreatePeerConnectionFactory(ContextUtils.getApplicationContext(), options,
|
||||
audioDeviceModule == null ? 0 : audioDeviceModule.getNativeAudioDeviceModulePointer(),
|
||||
encoderFactory, decoderFactory,
|
||||
audioProcessingFactory == null ? 0 : audioProcessingFactory.createNative(),
|
||||
fecControllerFactoryFactory == null ? 0 : fecControllerFactoryFactory.createNative());
|
||||
@ -483,8 +492,9 @@ public class PeerConnectionFactory {
|
||||
private static native boolean nativeStartInternalTracingCapture(String tracingFilename);
|
||||
private static native void nativeStopInternalTracingCapture();
|
||||
private static native long nativeCreatePeerConnectionFactory(Context context, Options options,
|
||||
VideoEncoderFactory encoderFactory, VideoDecoderFactory decoderFactory,
|
||||
long nativeAudioProcessor, long nativeFecControllerFactory);
|
||||
long nativeAudioDeviceModule, VideoEncoderFactory encoderFactory,
|
||||
VideoDecoderFactory decoderFactory, long nativeAudioProcessor,
|
||||
long nativeFecControllerFactory);
|
||||
private static native long nativeCreatePeerConnection(long factory,
|
||||
PeerConnection.RTCConfiguration rtcConfig, MediaConstraints constraints, long nativeObserver);
|
||||
private static native long nativeCreateLocalMediaStream(long factory, String label);
|
||||
|
||||
38
sdk/android/api/org/webrtc/audio/AudioDeviceModule.java
Normal file
38
sdk/android/api/org/webrtc/audio/AudioDeviceModule.java
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
package org.webrtc.audio;
|
||||
|
||||
/**
|
||||
* This interface is a thin wrapper on top of a native C++ webrtc::AudioDeviceModule (ADM). The
|
||||
* reason for basing it on a native ADM instead of a pure Java interface is that we have two native
|
||||
* Android implementations (OpenSLES and AAudio) that does not make sense to wrap through JNI.
|
||||
*
|
||||
* <p>Note: This class is still under development and may change without notice.
|
||||
*/
|
||||
public interface AudioDeviceModule {
|
||||
/**
|
||||
* Returns a C++ pointer to a webrtc::AudioDeviceModule. Caller does _not_ take ownership and
|
||||
* lifetime is handled through the release() call.
|
||||
*/
|
||||
long getNativeAudioDeviceModulePointer();
|
||||
|
||||
/**
|
||||
* Release resources for this AudioDeviceModule, including native resources. The object should not
|
||||
* be used after this call.
|
||||
*/
|
||||
void release();
|
||||
|
||||
/** Control muting/unmuting the speaker. */
|
||||
void setSpeakerMute(boolean mute);
|
||||
|
||||
/** Control muting/unmuting the microphone. */
|
||||
void setMicrophoneMute(boolean mute);
|
||||
}
|
||||
@ -10,21 +10,143 @@
|
||||
|
||||
package org.webrtc.audio;
|
||||
|
||||
import org.webrtc.audio.WebRtcAudioManager;
|
||||
import org.webrtc.audio.WebRtcAudioRecord;
|
||||
import org.webrtc.audio.WebRtcAudioTrack;
|
||||
import org.webrtc.audio.WebRtcAudioUtils;
|
||||
import android.media.AudioManager;
|
||||
import android.content.Context;
|
||||
import org.webrtc.JNINamespace;
|
||||
import org.webrtc.JniCommon;
|
||||
import org.webrtc.Logging;
|
||||
|
||||
/**
|
||||
* AudioDeviceModule implemented using android.media.AudioRecord as input and
|
||||
* android.media.AudioTrack as output.
|
||||
*
|
||||
* <p>Note: This class is still under development and may change without notice.
|
||||
*/
|
||||
public class JavaAudioDeviceModule {
|
||||
/* AudioManager */
|
||||
public static void setStereoInput(boolean enable) {
|
||||
WebRtcAudioManager.setStereoInput(enable);
|
||||
@JNINamespace("webrtc::jni")
|
||||
public class JavaAudioDeviceModule implements AudioDeviceModule {
|
||||
private static final String TAG = "JavaAudioDeviceModule";
|
||||
|
||||
public static Builder builder(Context context) {
|
||||
return new Builder(context);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private final Context context;
|
||||
private final AudioManager audioManager;
|
||||
private int sampleRate;
|
||||
private int audioSource = WebRtcAudioRecord.DEFAULT_AUDIO_SOURCE;
|
||||
private AudioTrackErrorCallback audioTrackErrorCallback;
|
||||
private AudioRecordErrorCallback audioRecordErrorCallback;
|
||||
private SamplesReadyCallback samplesReadyCallback;
|
||||
private boolean useHardwareAcousticEchoCanceler = isBuiltInAcousticEchoCancelerSupported();
|
||||
private boolean useHardwareNoiseSuppressor = isBuiltInNoiseSuppressorSupported();
|
||||
private boolean useStereoInput;
|
||||
private boolean useStereoOutput;
|
||||
|
||||
private Builder(Context context) {
|
||||
this.context = context;
|
||||
this.audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||
this.sampleRate = WebRtcAudioManager.getSampleRate(audioManager);
|
||||
}
|
||||
|
||||
/**
|
||||
* Call this method if the default handling of querying the native sample rate shall be
|
||||
* overridden. Can be useful on some devices where the available Android APIs are known to
|
||||
* return invalid results.
|
||||
*/
|
||||
public Builder setSampleRate(int sampleRate) {
|
||||
this.sampleRate = sampleRate;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call this to change the audio source. The argument should be one of the values from
|
||||
* android.media.MediaRecorder.AudioSource. The default is AudioSource.VOICE_COMMUNICATION.
|
||||
*/
|
||||
public Builder setAudioSource(int audioSource) {
|
||||
this.audioSource = audioSource;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a callback to retrieve errors from the AudioTrack.
|
||||
*/
|
||||
public Builder setAudioTrackErrorCallback(AudioTrackErrorCallback audioTrackErrorCallback) {
|
||||
this.audioTrackErrorCallback = audioTrackErrorCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a callback to retrieve errors from the AudioRecord.
|
||||
*/
|
||||
public Builder setAudioRecordErrorCallback(AudioRecordErrorCallback audioRecordErrorCallback) {
|
||||
this.audioRecordErrorCallback = audioRecordErrorCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a callback to listen to the raw audio input from the AudioRecord.
|
||||
*/
|
||||
public Builder setSamplesReadyCallback(SamplesReadyCallback samplesReadyCallback) {
|
||||
this.samplesReadyCallback = samplesReadyCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Control if the built-in HW noise suppressor should be used or not. The default is on if it is
|
||||
* supported. It is possible to query support by calling isBuiltInNoiseSuppressorSupported().
|
||||
*/
|
||||
public Builder setUseHardwareNoiseSuppressor(boolean useHardwareNoiseSuppressor) {
|
||||
if (useHardwareNoiseSuppressor && !isBuiltInNoiseSuppressorSupported()) {
|
||||
Logging.e(TAG, "HW noise suppressor not supported");
|
||||
useHardwareNoiseSuppressor = false;
|
||||
}
|
||||
this.useHardwareNoiseSuppressor = useHardwareNoiseSuppressor;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Control if the built-in HW acoustic echo canceler should be used or not. The default is on if
|
||||
* it is supported. It is possible to query support by calling
|
||||
* isBuiltInAcousticEchoCancelerSupported().
|
||||
*/
|
||||
public Builder setUseHardwareAcousticEchoCanceler(boolean useHardwareAcousticEchoCanceler) {
|
||||
if (useHardwareAcousticEchoCanceler && !isBuiltInAcousticEchoCancelerSupported()) {
|
||||
Logging.e(TAG, "HW acoustic echo canceler not supported");
|
||||
useHardwareAcousticEchoCanceler = false;
|
||||
}
|
||||
this.useHardwareAcousticEchoCanceler = useHardwareAcousticEchoCanceler;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Control if stereo input should be used or not. The default is mono.
|
||||
*/
|
||||
public Builder setUseStereoInput(boolean useStereoInput) {
|
||||
this.useStereoInput = useStereoInput;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Control if stereo output should be used or not. The default is mono.
|
||||
*/
|
||||
public Builder setUseStereoOutput(boolean useStereoOutput) {
|
||||
this.useStereoOutput = useStereoOutput;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct an AudioDeviceModule based on the supplied arguments. The caller takes ownership
|
||||
* and is responsible for calling release().
|
||||
*/
|
||||
public AudioDeviceModule createAudioDeviceModule() {
|
||||
final WebRtcAudioRecord audioInput =
|
||||
new WebRtcAudioRecord(context, audioManager, audioSource, audioRecordErrorCallback,
|
||||
samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
|
||||
final WebRtcAudioTrack audioOutput =
|
||||
new WebRtcAudioTrack(context, audioManager, audioTrackErrorCallback);
|
||||
final long nativeAudioDeviceModule = nativeCreateAudioDeviceModule(context, audioManager,
|
||||
audioInput, audioOutput, sampleRate, useStereoInput, useStereoOutput);
|
||||
return new JavaAudioDeviceModule(audioInput, audioOutput, nativeAudioDeviceModule);
|
||||
}
|
||||
}
|
||||
|
||||
/* AudioRecord */
|
||||
@ -82,14 +204,6 @@ public class JavaAudioDeviceModule {
|
||||
void onWebRtcAudioRecordSamplesReady(AudioSamples samples);
|
||||
}
|
||||
|
||||
public static void setErrorCallback(AudioRecordErrorCallback errorCallback) {
|
||||
WebRtcAudioRecord.setErrorCallback(errorCallback);
|
||||
}
|
||||
|
||||
public static void setOnAudioSamplesReady(SamplesReadyCallback callback) {
|
||||
WebRtcAudioRecord.setOnAudioSamplesReady(callback);
|
||||
}
|
||||
|
||||
/* AudioTrack */
|
||||
// Audio playout/track error handler functions.
|
||||
public enum AudioTrackStartErrorCode {
|
||||
@ -103,37 +217,57 @@ public class JavaAudioDeviceModule {
|
||||
void onWebRtcAudioTrackError(String errorMessage);
|
||||
}
|
||||
|
||||
public static void setErrorCallback(AudioTrackErrorCallback errorCallback) {
|
||||
WebRtcAudioTrack.setErrorCallback(errorCallback);
|
||||
/**
|
||||
* Returns true if the device supports built-in HW AEC, and the UUID is approved (some UUIDs can
|
||||
* be excluded).
|
||||
*/
|
||||
public static boolean isBuiltInAcousticEchoCancelerSupported() {
|
||||
return WebRtcAudioEffects.isAcousticEchoCancelerSupported();
|
||||
}
|
||||
|
||||
/* AudioUtils */
|
||||
public static void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
|
||||
WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(enable);
|
||||
/**
|
||||
* Returns true if the device supports built-in HW NS, and the UUID is approved (some UUIDs can be
|
||||
* excluded).
|
||||
*/
|
||||
public static boolean isBuiltInNoiseSuppressorSupported() {
|
||||
return WebRtcAudioEffects.isNoiseSuppressorSupported();
|
||||
}
|
||||
|
||||
public static void setWebRtcBasedNoiseSuppressor(boolean enable) {
|
||||
WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(enable);
|
||||
private final WebRtcAudioRecord audioInput;
|
||||
private final WebRtcAudioTrack audioOutput;
|
||||
private long nativeAudioDeviceModule;
|
||||
|
||||
private JavaAudioDeviceModule(
|
||||
WebRtcAudioRecord audioInput, WebRtcAudioTrack audioOutput, long nativeAudioDeviceModule) {
|
||||
this.audioInput = audioInput;
|
||||
this.audioOutput = audioOutput;
|
||||
this.nativeAudioDeviceModule = nativeAudioDeviceModule;
|
||||
}
|
||||
|
||||
// Returns true if the device supports an audio effect (AEC or NS).
|
||||
// Four conditions must be fulfilled if functions are to return true:
|
||||
// 1) the platform must support the built-in (HW) effect,
|
||||
// 2) explicit use (override) of a WebRTC based version must not be set,
|
||||
// 3) the device must not be blacklisted for use of the effect, and
|
||||
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
|
||||
public static boolean isAcousticEchoCancelerSupported() {
|
||||
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
|
||||
}
|
||||
public static boolean isNoiseSuppressorSupported() {
|
||||
return WebRtcAudioEffects.canUseNoiseSuppressor();
|
||||
@Override
|
||||
public long getNativeAudioDeviceModulePointer() {
|
||||
return nativeAudioDeviceModule;
|
||||
}
|
||||
|
||||
// Call this method if the default handling of querying the native sample
|
||||
// rate shall be overridden. Can be useful on some devices where the
|
||||
// available Android APIs are known to return invalid results.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
public static void setDefaultSampleRateHz(int sampleRateHz) {
|
||||
WebRtcAudioUtils.setDefaultSampleRateHz(sampleRateHz);
|
||||
@Override
|
||||
public void release() {
|
||||
if (nativeAudioDeviceModule != 0) {
|
||||
JniCommon.nativeReleaseRef(nativeAudioDeviceModule);
|
||||
nativeAudioDeviceModule = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSpeakerMute(boolean mute) {
|
||||
audioOutput.setSpeakerMute(mute);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMicrophoneMute(boolean mute) {
|
||||
audioInput.setMicrophoneMute(mute);
|
||||
}
|
||||
|
||||
private static native long nativeCreateAudioDeviceModule(Context context,
|
||||
AudioManager audioManager, WebRtcAudioRecord audioInput, WebRtcAudioTrack audioOutput,
|
||||
int sampleRate, boolean useStereoInput, boolean useStereoOutput);
|
||||
}
|
||||
|
||||
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
package org.webrtc.audio;
|
||||
|
||||
import org.webrtc.voiceengine.WebRtcAudioRecord;
|
||||
import org.webrtc.voiceengine.WebRtcAudioTrack;
|
||||
|
||||
/**
|
||||
* This class represents the legacy AudioDeviceModule that is currently hardcoded into C++ WebRTC.
|
||||
* It will return a null native AudioDeviceModule pointer, leading to an internal object being
|
||||
* created inside WebRTC that is controlled by static calls to the classes under the voiceengine
|
||||
* package. Please use the new JavaAudioDeviceModule instead of this class.
|
||||
*/
|
||||
@Deprecated
|
||||
public class LegacyAudioDeviceModule implements AudioDeviceModule {
|
||||
public static AudioDeviceModule Create() {
|
||||
return new LegacyAudioDeviceModule();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNativeAudioDeviceModulePointer() {
|
||||
// Returning a null pointer will make WebRTC construct the built-in legacy AudioDeviceModule for
|
||||
// Android internally.
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void release() {
|
||||
// All control for this ADM goes through static global methods and the C++ object is owned
|
||||
// internally by WebRTC.
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSpeakerMute(boolean mute) {
|
||||
WebRtcAudioTrack.setSpeakerMute(mute);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMicrophoneMute(boolean mute) {
|
||||
WebRtcAudioRecord.setMicrophoneMute(mute);
|
||||
}
|
||||
}
|
||||
@ -19,7 +19,7 @@
|
||||
#include "rtc_base/refcountedobject.h"
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_player.h"
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_recorder.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/opensles_player.h"
|
||||
@ -28,72 +28,117 @@
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
void GetDefaultAudioParameters(JNIEnv* env,
|
||||
jobject application_context,
|
||||
AudioParameters* input_parameters,
|
||||
AudioParameters* output_parameters) {
|
||||
const JavaParamRef<jobject> j_context(application_context);
|
||||
const ScopedJavaLocalRef<jobject> j_audio_manager =
|
||||
android_adm::GetAudioManager(env, j_context);
|
||||
const int sample_rate =
|
||||
android_adm::GetDefaultSampleRate(env, j_audio_manager);
|
||||
android_adm::GetAudioParameters(env, j_context, j_audio_manager, sample_rate,
|
||||
false /* use_stereo_input */,
|
||||
false /* use_stereo_output */,
|
||||
input_parameters, output_parameters);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAAudioAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
jobject application_context) {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
const AudioDeviceModule::AudioLayer audio_layer =
|
||||
AudioDeviceModule::kAndroidAAudioAudio;
|
||||
auto audio_manager = rtc::MakeUnique<android_adm::AudioManager>(
|
||||
env, audio_layer, JavaParamRef<jobject>(application_context));
|
||||
auto audio_input =
|
||||
rtc::MakeUnique<android_adm::AAudioRecorder>(audio_manager.get());
|
||||
auto audio_output =
|
||||
rtc::MakeUnique<android_adm::AAudioPlayer>(audio_manager.get());
|
||||
// Get default audio input/output parameters.
|
||||
AudioParameters input_parameters;
|
||||
AudioParameters output_parameters;
|
||||
GetDefaultAudioParameters(env, application_context, &input_parameters,
|
||||
&output_parameters);
|
||||
// Create ADM from AAudioRecorder and AAudioPlayer.
|
||||
return CreateAudioDeviceModuleFromInputAndOutput(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
AudioDeviceModule::kAndroidAAudioAudio, false /* use_stereo_input */,
|
||||
false /* use_stereo_output */,
|
||||
android_adm::kLowLatencyModeDelayEstimateInMilliseconds,
|
||||
rtc::MakeUnique<android_adm::AAudioRecorder>(input_parameters),
|
||||
rtc::MakeUnique<android_adm::AAudioPlayer>(output_parameters));
|
||||
}
|
||||
#endif
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateJavaAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
jobject application_context) {
|
||||
const AudioDeviceModule::AudioLayer audio_layer =
|
||||
AudioDeviceModule::kAndroidJavaAudio;
|
||||
auto audio_manager = rtc::MakeUnique<android_adm::AudioManager>(
|
||||
env, audio_layer, JavaParamRef<jobject>(application_context));
|
||||
auto audio_input =
|
||||
rtc::MakeUnique<android_adm::AudioRecordJni>(audio_manager.get());
|
||||
auto audio_output =
|
||||
rtc::MakeUnique<android_adm::AudioTrackJni>(audio_manager.get());
|
||||
// Get default audio input/output parameters.
|
||||
const JavaParamRef<jobject> j_context(application_context);
|
||||
const ScopedJavaLocalRef<jobject> j_audio_manager =
|
||||
android_adm::GetAudioManager(env, j_context);
|
||||
AudioParameters input_parameters;
|
||||
AudioParameters output_parameters;
|
||||
GetDefaultAudioParameters(env, application_context, &input_parameters,
|
||||
&output_parameters);
|
||||
// Create ADM from AudioRecord and AudioTrack.
|
||||
auto audio_input = rtc::MakeUnique<android_adm::AudioRecordJni>(
|
||||
env, input_parameters,
|
||||
android_adm::kHighLatencyModeDelayEstimateInMilliseconds,
|
||||
android_adm::AudioRecordJni::CreateJavaWebRtcAudioRecord(
|
||||
env, j_context, j_audio_manager));
|
||||
auto audio_output = rtc::MakeUnique<android_adm::AudioTrackJni>(
|
||||
env, output_parameters,
|
||||
android_adm::AudioTrackJni::CreateJavaWebRtcAudioTrack(env, j_context,
|
||||
j_audio_manager));
|
||||
return CreateAudioDeviceModuleFromInputAndOutput(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
AudioDeviceModule::kAndroidJavaAudio, false /* use_stereo_input */,
|
||||
false /* use_stereo_output */,
|
||||
android_adm::kHighLatencyModeDelayEstimateInMilliseconds,
|
||||
std::move(audio_input), std::move(audio_output));
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateOpenSLESAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
jobject application_context) {
|
||||
const AudioDeviceModule::AudioLayer audio_layer =
|
||||
AudioDeviceModule::kAndroidOpenSLESAudio;
|
||||
// Get default audio input/output parameters.
|
||||
AudioParameters input_parameters;
|
||||
AudioParameters output_parameters;
|
||||
GetDefaultAudioParameters(env, application_context, &input_parameters,
|
||||
&output_parameters);
|
||||
// Create ADM from OpenSLESRecorder and OpenSLESPlayer.
|
||||
auto engine_manager = rtc::MakeUnique<android_adm::OpenSLEngineManager>();
|
||||
auto audio_manager = rtc::MakeUnique<android_adm::AudioManager>(
|
||||
env, audio_layer, JavaParamRef<jobject>(application_context));
|
||||
auto audio_input = rtc::MakeUnique<android_adm::OpenSLESRecorder>(
|
||||
audio_manager.get(), engine_manager.get());
|
||||
input_parameters, engine_manager.get());
|
||||
auto audio_output = rtc::MakeUnique<android_adm::OpenSLESPlayer>(
|
||||
audio_manager.get(), std::move(engine_manager));
|
||||
output_parameters, std::move(engine_manager));
|
||||
return CreateAudioDeviceModuleFromInputAndOutput(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
AudioDeviceModule::kAndroidOpenSLESAudio, false /* use_stereo_input */,
|
||||
false /* use_stereo_output */,
|
||||
android_adm::kLowLatencyModeDelayEstimateInMilliseconds,
|
||||
std::move(audio_input), std::move(audio_output));
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule>
|
||||
CreateJavaInputAndOpenSLESOutputAudioDeviceModule(JNIEnv* env,
|
||||
jobject application_context) {
|
||||
const AudioDeviceModule::AudioLayer audio_layer =
|
||||
AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
|
||||
auto audio_manager = rtc::MakeUnique<android_adm::AudioManager>(
|
||||
env, audio_layer, JavaParamRef<jobject>(application_context));
|
||||
auto audio_input =
|
||||
rtc::MakeUnique<android_adm::AudioRecordJni>(audio_manager.get());
|
||||
// Get default audio input/output parameters.
|
||||
const JavaParamRef<jobject> j_context(application_context);
|
||||
const ScopedJavaLocalRef<jobject> j_audio_manager =
|
||||
android_adm::GetAudioManager(env, j_context);
|
||||
AudioParameters input_parameters;
|
||||
AudioParameters output_parameters;
|
||||
GetDefaultAudioParameters(env, application_context, &input_parameters,
|
||||
&output_parameters);
|
||||
// Create ADM from AudioRecord and OpenSLESPlayer.
|
||||
auto audio_input = rtc::MakeUnique<android_adm::AudioRecordJni>(
|
||||
env, input_parameters,
|
||||
android_adm::kLowLatencyModeDelayEstimateInMilliseconds,
|
||||
android_adm::AudioRecordJni::CreateJavaWebRtcAudioRecord(
|
||||
env, j_context, j_audio_manager));
|
||||
auto audio_output = rtc::MakeUnique<android_adm::OpenSLESPlayer>(
|
||||
audio_manager.get(), rtc::MakeUnique<android_adm::OpenSLEngineManager>());
|
||||
output_parameters, rtc::MakeUnique<android_adm::OpenSLEngineManager>());
|
||||
return CreateAudioDeviceModuleFromInputAndOutput(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio,
|
||||
false /* use_stereo_input */, false /* use_stereo_output */,
|
||||
android_adm::kLowLatencyModeDelayEstimateInMilliseconds,
|
||||
std::move(audio_input), std::move(audio_output));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -14,10 +14,10 @@ import java.nio.ByteBuffer;
|
||||
|
||||
/** Class with static JNI helper functions that are used in many places. */
|
||||
@JNINamespace("webrtc::jni")
|
||||
class JniCommon {
|
||||
public class JniCommon {
|
||||
/** Functions to increment/decrement an rtc::RefCountInterface pointer. */
|
||||
static native void nativeAddRef(long refCountedPointer);
|
||||
static native void nativeReleaseRef(long refCountedPointer);
|
||||
public static native void nativeAddRef(long refCountedPointer);
|
||||
public static native void nativeReleaseRef(long refCountedPointer);
|
||||
|
||||
public static native ByteBuffer nativeAllocateByteBuffer(int size);
|
||||
public static native void nativeFreeByteBuffer(ByteBuffer buffer);
|
||||
|
||||
@ -1,59 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
package org.webrtc.audio;
|
||||
|
||||
import android.os.Build;
|
||||
import org.webrtc.CalledByNative;
|
||||
|
||||
public final class BuildInfo {
|
||||
public static String getDevice() {
|
||||
return Build.DEVICE;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getDeviceModel() {
|
||||
return Build.MODEL;
|
||||
}
|
||||
|
||||
public static String getProduct() {
|
||||
return Build.PRODUCT;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getBrand() {
|
||||
return Build.BRAND;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getDeviceManufacturer() {
|
||||
return Build.MANUFACTURER;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getAndroidBuildId() {
|
||||
return Build.ID;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getBuildType() {
|
||||
return Build.TYPE;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static String getBuildRelease() {
|
||||
return Build.VERSION.RELEASE;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public static int getSdkVersion() {
|
||||
return Build.VERSION.SDK_INT;
|
||||
}
|
||||
}
|
||||
@ -29,7 +29,7 @@ import org.webrtc.Logging;
|
||||
class WebRtcAudioEffects {
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
private static final String TAG = "WebRtcAudioEffects";
|
||||
private static final String TAG = "WebRtcAudioEffectsExternal";
|
||||
|
||||
// UUIDs for Software Audio Effects that we want to avoid using.
|
||||
// The implementor field will be set to "The Android Open Source Project".
|
||||
@ -56,106 +56,20 @@ class WebRtcAudioEffects {
|
||||
private boolean shouldEnableAec = false;
|
||||
private boolean shouldEnableNs = false;
|
||||
|
||||
// Checks if the device implements Acoustic Echo Cancellation (AEC).
|
||||
// Returns true if the device implements AEC, false otherwise.
|
||||
// Returns true if all conditions for supporting HW Acoustic Echo Cancellation (AEC) are
|
||||
// fulfilled.
|
||||
@TargetApi(18)
|
||||
public static boolean isAcousticEchoCancelerSupported() {
|
||||
// Note: we're using isAcousticEchoCancelerEffectAvailable() instead of
|
||||
// AcousticEchoCanceler.isAvailable() to avoid the expensive getEffects()
|
||||
// OS API call.
|
||||
return isAcousticEchoCancelerEffectAvailable();
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC, AOSP_ACOUSTIC_ECHO_CANCELER);
|
||||
}
|
||||
|
||||
// Checks if the device implements Noise Suppression (NS).
|
||||
// Returns true if the device implements NS, false otherwise.
|
||||
// Returns true if all conditions for supporting HW Noise Suppression (NS) are fulfilled.
|
||||
@TargetApi(18)
|
||||
public static boolean isNoiseSuppressorSupported() {
|
||||
// Note: we're using isNoiseSuppressorEffectAvailable() instead of
|
||||
// NoiseSuppressor.isAvailable() to avoid the expensive getEffects()
|
||||
// OS API call.
|
||||
return isNoiseSuppressorEffectAvailable();
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS, AOSP_NOISE_SUPPRESSOR);
|
||||
}
|
||||
|
||||
// Returns true if the device is blacklisted for HW AEC usage.
|
||||
public static boolean isAcousticEchoCancelerBlacklisted() {
|
||||
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
|
||||
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
|
||||
if (isBlacklisted) {
|
||||
Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
|
||||
}
|
||||
return isBlacklisted;
|
||||
}
|
||||
|
||||
// Returns true if the device is blacklisted for HW NS usage.
|
||||
public static boolean isNoiseSuppressorBlacklisted() {
|
||||
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
|
||||
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
|
||||
if (isBlacklisted) {
|
||||
Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
|
||||
}
|
||||
return isBlacklisted;
|
||||
}
|
||||
|
||||
// Returns true if the platform AEC should be excluded based on its UUID.
|
||||
// AudioEffect.queryEffects() can throw IllegalStateException.
|
||||
@TargetApi(18)
|
||||
private static boolean isAcousticEchoCancelerExcludedByUUID() {
|
||||
for (Descriptor d : getAvailableEffects()) {
|
||||
if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC)
|
||||
&& d.uuid.equals(AOSP_ACOUSTIC_ECHO_CANCELER)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the platform NS should be excluded based on its UUID.
|
||||
// AudioEffect.queryEffects() can throw IllegalStateException.
|
||||
@TargetApi(18)
|
||||
private static boolean isNoiseSuppressorExcludedByUUID() {
|
||||
for (Descriptor d : getAvailableEffects()) {
|
||||
if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && d.uuid.equals(AOSP_NOISE_SUPPRESSOR)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the device supports Acoustic Echo Cancellation (AEC).
|
||||
@TargetApi(18)
|
||||
private static boolean isAcousticEchoCancelerEffectAvailable() {
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC);
|
||||
}
|
||||
|
||||
// Returns true if the device supports Noise Suppression (NS).
|
||||
@TargetApi(18)
|
||||
private static boolean isNoiseSuppressorEffectAvailable() {
|
||||
return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS);
|
||||
}
|
||||
|
||||
// Returns true if all conditions for supporting the HW AEC are fulfilled.
|
||||
// It will not be possible to enable the HW AEC if this method returns false.
|
||||
public static boolean canUseAcousticEchoCanceler() {
|
||||
boolean canUseAcousticEchoCanceler = isAcousticEchoCancelerSupported()
|
||||
&& !WebRtcAudioUtils.useWebRtcBasedAcousticEchoCanceler()
|
||||
&& !isAcousticEchoCancelerBlacklisted() && !isAcousticEchoCancelerExcludedByUUID();
|
||||
Logging.d(TAG, "canUseAcousticEchoCanceler: " + canUseAcousticEchoCanceler);
|
||||
return canUseAcousticEchoCanceler;
|
||||
}
|
||||
|
||||
// Returns true if all conditions for supporting the HW NS are fulfilled.
|
||||
// It will not be possible to enable the HW NS if this method returns false.
|
||||
public static boolean canUseNoiseSuppressor() {
|
||||
boolean canUseNoiseSuppressor = isNoiseSuppressorSupported()
|
||||
&& !WebRtcAudioUtils.useWebRtcBasedNoiseSuppressor() && !isNoiseSuppressorBlacklisted()
|
||||
&& !isNoiseSuppressorExcludedByUUID();
|
||||
Logging.d(TAG, "canUseNoiseSuppressor: " + canUseNoiseSuppressor);
|
||||
return canUseNoiseSuppressor;
|
||||
}
|
||||
|
||||
public static WebRtcAudioEffects create() {
|
||||
return new WebRtcAudioEffects();
|
||||
}
|
||||
|
||||
private WebRtcAudioEffects() {
|
||||
public WebRtcAudioEffects() {
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
}
|
||||
|
||||
@ -165,7 +79,7 @@ class WebRtcAudioEffects {
|
||||
// false otherwise.
|
||||
public boolean setAEC(boolean enable) {
|
||||
Logging.d(TAG, "setAEC(" + enable + ")");
|
||||
if (!canUseAcousticEchoCanceler()) {
|
||||
if (!isAcousticEchoCancelerSupported()) {
|
||||
Logging.w(TAG, "Platform AEC is not supported");
|
||||
shouldEnableAec = false;
|
||||
return false;
|
||||
@ -184,7 +98,7 @@ class WebRtcAudioEffects {
|
||||
// false otherwise.
|
||||
public boolean setNS(boolean enable) {
|
||||
Logging.d(TAG, "setNS(" + enable + ")");
|
||||
if (!canUseNoiseSuppressor()) {
|
||||
if (!isNoiseSuppressorSupported()) {
|
||||
Logging.w(TAG, "Platform NS is not supported");
|
||||
shouldEnableNs = false;
|
||||
return false;
|
||||
@ -223,7 +137,7 @@ class WebRtcAudioEffects {
|
||||
aec = AcousticEchoCanceler.create(audioSession);
|
||||
if (aec != null) {
|
||||
boolean enabled = aec.getEnabled();
|
||||
boolean enable = shouldEnableAec && canUseAcousticEchoCanceler();
|
||||
boolean enable = shouldEnableAec && isAcousticEchoCancelerSupported();
|
||||
if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
|
||||
Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
|
||||
}
|
||||
@ -241,7 +155,7 @@ class WebRtcAudioEffects {
|
||||
ns = NoiseSuppressor.create(audioSession);
|
||||
if (ns != null) {
|
||||
boolean enabled = ns.getEnabled();
|
||||
boolean enable = shouldEnableNs && canUseNoiseSuppressor();
|
||||
boolean enable = shouldEnableNs && isNoiseSuppressorSupported();
|
||||
if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
|
||||
Logging.e(TAG, "Failed to set the NoiseSuppressor state");
|
||||
}
|
||||
@ -309,14 +223,15 @@ class WebRtcAudioEffects {
|
||||
// Returns true if an effect of the specified type is available. Functionally
|
||||
// equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but
|
||||
// faster as it avoids the expensive OS call to enumerate effects.
|
||||
private static boolean isEffectTypeAvailable(UUID effectType) {
|
||||
@TargetApi(18)
|
||||
private static boolean isEffectTypeAvailable(UUID effectType, UUID blackListedUuid) {
|
||||
Descriptor[] effects = getAvailableEffects();
|
||||
if (effects == null) {
|
||||
return false;
|
||||
}
|
||||
for (Descriptor d : effects) {
|
||||
if (d.type.equals(effectType)) {
|
||||
return true;
|
||||
return !d.uuid.equals(blackListedUuid);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
@ -17,58 +17,16 @@ import android.media.AudioFormat;
|
||||
import android.media.AudioManager;
|
||||
import android.media.AudioRecord;
|
||||
import android.media.AudioTrack;
|
||||
import android.os.Build;
|
||||
import javax.annotation.Nullable;
|
||||
import org.webrtc.Logging;
|
||||
import org.webrtc.CalledByNative;
|
||||
|
||||
// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
|
||||
// At construction, storeAudioParameters() is called and it retrieves
|
||||
// fundamental audio parameters like native sample rate and number of channels.
|
||||
// The result is then provided to the caller by nativeCacheAudioParameters().
|
||||
// It is also possible to call init() to set up the audio environment for best
|
||||
// possible "VoIP performance". All settings done in init() are reverted by
|
||||
// dispose(). This class can also be used without calling init() if the user
|
||||
// prefers to set up the audio environment separately. However, it is
|
||||
// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
|
||||
/**
|
||||
* This class contains static functions to query sample rate and input/output audio buffer sizes.
|
||||
*/
|
||||
class WebRtcAudioManager {
|
||||
private static final boolean DEBUG = false;
|
||||
private static final String TAG = "WebRtcAudioManagerExternal";
|
||||
|
||||
private static final String TAG = "WebRtcAudioManager";
|
||||
|
||||
// Use mono as default for both audio directions.
|
||||
private static boolean useStereoOutput = false;
|
||||
private static boolean useStereoInput = false;
|
||||
|
||||
// Call these methods to override the default mono audio modes for the specified direction(s)
|
||||
// (input and/or output).
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setStereoOutput(boolean enable) {
|
||||
Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
|
||||
useStereoOutput = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setStereoInput(boolean enable) {
|
||||
Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
|
||||
useStereoInput = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
@CalledByNative
|
||||
public synchronized boolean getStereoOutput() {
|
||||
return useStereoOutput;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
@CalledByNative
|
||||
public synchronized boolean getStereoInput() {
|
||||
return useStereoInput;
|
||||
}
|
||||
private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
|
||||
|
||||
// Default audio data format is PCM 16 bit per sample.
|
||||
// Guaranteed to be supported by all devices.
|
||||
@ -76,67 +34,32 @@ class WebRtcAudioManager {
|
||||
|
||||
private static final int DEFAULT_FRAME_PER_BUFFER = 256;
|
||||
|
||||
private final AudioManager audioManager;
|
||||
private final int sampleRate;
|
||||
private final int outputBufferSize;
|
||||
private final int inputBufferSize;
|
||||
private final VolumeLogger volumeLogger;
|
||||
|
||||
private boolean initialized = false;
|
||||
|
||||
@CalledByNative
|
||||
WebRtcAudioManager(Context context) {
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
this.audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||
if (DEBUG) {
|
||||
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||
}
|
||||
this.volumeLogger = new VolumeLogger(audioManager);
|
||||
|
||||
final int outputChannels = getStereoOutput() ? 2 : 1;
|
||||
final int inputChannels = getStereoInput() ? 2 : 1;
|
||||
|
||||
this.sampleRate = getNativeOutputSampleRate();
|
||||
this.outputBufferSize = isLowLatencyOutputSupported(context)
|
||||
? getLowLatencyOutputFramesPerBuffer()
|
||||
: getMinOutputFrameSize(sampleRate, outputChannels);
|
||||
this.inputBufferSize = isLowLatencyInputSupported(context)
|
||||
? getLowLatencyInputFramesPerBuffer()
|
||||
: getMinInputFrameSize(sampleRate, inputChannels);
|
||||
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
static AudioManager getAudioManager(Context context) {
|
||||
return (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private boolean init() {
|
||||
Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
|
||||
if (initialized) {
|
||||
return true;
|
||||
}
|
||||
Logging.d(TAG, "audio mode is: " + WebRtcAudioUtils.modeToString(audioManager.getMode()));
|
||||
initialized = true;
|
||||
volumeLogger.start();
|
||||
return true;
|
||||
static int getOutputBufferSize(
|
||||
Context context, AudioManager audioManager, int sampleRate, int numberOfOutputChannels) {
|
||||
return isLowLatencyOutputSupported(context)
|
||||
? getLowLatencyFramesPerBuffer(audioManager)
|
||||
: getMinOutputFrameSize(sampleRate, numberOfOutputChannels);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private void dispose() {
|
||||
Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
volumeLogger.stop();
|
||||
static int getInputBufferSize(
|
||||
Context context, AudioManager audioManager, int sampleRate, int numberOfInputChannels) {
|
||||
return isLowLatencyInputSupported(context)
|
||||
? getLowLatencyFramesPerBuffer(audioManager)
|
||||
: getMinInputFrameSize(sampleRate, numberOfInputChannels);
|
||||
}
|
||||
|
||||
// Returns true if low-latency audio output is supported.
|
||||
public static boolean isLowLatencyOutputSupported(Context context) {
|
||||
private static boolean isLowLatencyOutputSupported(Context context) {
|
||||
return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY);
|
||||
}
|
||||
|
||||
// Returns true if low-latency audio input is supported.
|
||||
// TODO(henrika): remove the hardcoded false return value when OpenSL ES
|
||||
// input performance has been evaluated and tested more.
|
||||
public static boolean isLowLatencyInputSupported(Context context) {
|
||||
private static boolean isLowLatencyInputSupported(Context context) {
|
||||
// TODO(henrika): investigate if some sort of device list is needed here
|
||||
// as well. The NDK doc states that: "As of API level 21, lower latency
|
||||
// audio input is supported on select devices. To take advantage of this
|
||||
@ -144,49 +67,34 @@ class WebRtcAudioManager {
|
||||
return WebRtcAudioUtils.runningOnLollipopOrHigher() && isLowLatencyOutputSupported(context);
|
||||
}
|
||||
|
||||
// Returns the native output sample rate for this device's output stream.
|
||||
private int getNativeOutputSampleRate() {
|
||||
/**
|
||||
* Returns the native input/output sample rate for this device's output stream.
|
||||
*/
|
||||
@CalledByNative
|
||||
static int getSampleRate(AudioManager audioManager) {
|
||||
// Override this if we're running on an old emulator image which only
|
||||
// supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
|
||||
if (WebRtcAudioUtils.runningOnEmulator()) {
|
||||
Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
|
||||
return 8000;
|
||||
}
|
||||
// Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
|
||||
// If so, use that value and return here.
|
||||
if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
|
||||
Logging.d(TAG,
|
||||
"Default sample rate is overriden to " + WebRtcAudioUtils.getDefaultSampleRateHz()
|
||||
+ " Hz");
|
||||
return WebRtcAudioUtils.getDefaultSampleRateHz();
|
||||
}
|
||||
// No overrides available. Deliver best possible estimate based on default
|
||||
// Android AudioManager APIs.
|
||||
final int sampleRateHz;
|
||||
if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
|
||||
sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
|
||||
} else {
|
||||
sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
|
||||
}
|
||||
// Deliver best possible estimate based on default Android AudioManager APIs.
|
||||
final int sampleRateHz = WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()
|
||||
? getSampleRateOnJellyBeanMR10OrHigher(audioManager)
|
||||
: DEFAULT_SAMPLE_RATE_HZ;
|
||||
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
|
||||
return sampleRateHz;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
int getSampleRate() {
|
||||
return sampleRate;
|
||||
}
|
||||
|
||||
@TargetApi(17)
|
||||
private int getSampleRateOnJellyBeanMR10OrHigher() {
|
||||
private static int getSampleRateOnJellyBeanMR10OrHigher(AudioManager audioManager) {
|
||||
String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
|
||||
return (sampleRateString == null) ? WebRtcAudioUtils.getDefaultSampleRateHz()
|
||||
: Integer.parseInt(sampleRateString);
|
||||
return (sampleRateString == null) ? DEFAULT_SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
|
||||
}
|
||||
|
||||
// Returns the native output buffer size for low-latency output streams.
|
||||
@TargetApi(17)
|
||||
private int getLowLatencyOutputFramesPerBuffer() {
|
||||
private static int getLowLatencyFramesPerBuffer(AudioManager audioManager) {
|
||||
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
|
||||
return DEFAULT_FRAME_PER_BUFFER;
|
||||
}
|
||||
@ -195,32 +103,6 @@ class WebRtcAudioManager {
|
||||
return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
|
||||
}
|
||||
|
||||
// Returns true if the device supports an audio effect (AEC or NS).
|
||||
// Four conditions must be fulfilled if functions are to return true:
|
||||
// 1) the platform must support the built-in (HW) effect,
|
||||
// 2) explicit use (override) of a WebRTC based version must not be set,
|
||||
// 3) the device must not be blacklisted for use of the effect, and
|
||||
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
|
||||
@CalledByNative
|
||||
boolean isAcousticEchoCancelerSupported() {
|
||||
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
boolean isNoiseSuppressorSupported() {
|
||||
return WebRtcAudioEffects.canUseNoiseSuppressor();
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
int getOutputBufferSize() {
|
||||
return outputBufferSize;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
int getInputBufferSize() {
|
||||
return inputBufferSize;
|
||||
}
|
||||
|
||||
// Returns the minimum output buffer size for Java based audio (AudioTrack).
|
||||
// This size can also be used for OpenSL ES implementations on devices that
|
||||
// lacks support of low-latency output.
|
||||
@ -233,11 +115,6 @@ class WebRtcAudioManager {
|
||||
/ bytesPerFrame;
|
||||
}
|
||||
|
||||
// Returns the native input buffer size for input streams.
|
||||
private int getLowLatencyInputFramesPerBuffer() {
|
||||
return getLowLatencyOutputFramesPerBuffer();
|
||||
}
|
||||
|
||||
// Returns the minimum input buffer size for Java based audio (AudioRecord).
|
||||
// This size can calso be used for OpenSL ES implementations on devices that
|
||||
// lacks support of low-latency input.
|
||||
|
||||
@ -13,6 +13,8 @@ package org.webrtc.audio;
|
||||
import android.annotation.TargetApi;
|
||||
import android.media.AudioFormat;
|
||||
import android.media.AudioRecord;
|
||||
import android.media.AudioManager;
|
||||
import android.content.Context;
|
||||
import android.media.MediaRecorder.AudioSource;
|
||||
import android.os.Process;
|
||||
import java.lang.System;
|
||||
@ -29,9 +31,7 @@ import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStartErrorCode;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule.SamplesReadyCallback;
|
||||
|
||||
class WebRtcAudioRecord {
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
private static final String TAG = "WebRtcAudioRecord";
|
||||
private static final String TAG = "WebRtcAudioRecordExternal";
|
||||
|
||||
// Default audio data format is PCM 16 bit per sample.
|
||||
// Guaranteed to be supported by all devices.
|
||||
@ -52,33 +52,28 @@ class WebRtcAudioRecord {
|
||||
// but the wait times out afther this amount of time.
|
||||
private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
|
||||
|
||||
private static final int DEFAULT_AUDIO_SOURCE = getDefaultAudioSource();
|
||||
private static int audioSource = DEFAULT_AUDIO_SOURCE;
|
||||
public static final int DEFAULT_AUDIO_SOURCE = AudioSource.VOICE_COMMUNICATION;
|
||||
|
||||
private final long nativeAudioRecord;
|
||||
private final Context context;
|
||||
private final AudioManager audioManager;
|
||||
private final int audioSource;
|
||||
|
||||
private @Nullable WebRtcAudioEffects effects = null;
|
||||
private long nativeAudioRecord;
|
||||
|
||||
private final WebRtcAudioEffects effects = new WebRtcAudioEffects();
|
||||
|
||||
private @Nullable ByteBuffer byteBuffer;
|
||||
|
||||
private @Nullable AudioRecord audioRecord = null;
|
||||
private @Nullable AudioRecordThread audioThread = null;
|
||||
|
||||
private static volatile boolean microphoneMute = false;
|
||||
private volatile boolean microphoneMute = false;
|
||||
private byte[] emptyBytes;
|
||||
|
||||
private static @Nullable AudioRecordErrorCallback errorCallback = null;
|
||||
|
||||
public static void setErrorCallback(AudioRecordErrorCallback errorCallback) {
|
||||
Logging.d(TAG, "Set error callback");
|
||||
WebRtcAudioRecord.errorCallback = errorCallback;
|
||||
}
|
||||
|
||||
private static @Nullable SamplesReadyCallback audioSamplesReadyCallback = null;
|
||||
|
||||
public static void setOnAudioSamplesReady(SamplesReadyCallback callback) {
|
||||
audioSamplesReadyCallback = callback;
|
||||
}
|
||||
private final @Nullable AudioRecordErrorCallback errorCallback;
|
||||
private final @Nullable SamplesReadyCallback audioSamplesReadyCallback;
|
||||
private final boolean isAcousticEchoCancelerSupported;
|
||||
private final boolean isNoiseSuppressorSupported;
|
||||
|
||||
/**
|
||||
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
|
||||
@ -129,12 +124,6 @@ class WebRtcAudioRecord {
|
||||
reportWebRtcAudioRecordError(errorMessage);
|
||||
}
|
||||
}
|
||||
if (DEBUG) {
|
||||
long nowTime = System.nanoTime();
|
||||
long durationInMs = TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
|
||||
lastTime = nowTime;
|
||||
Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
@ -155,32 +144,55 @@ class WebRtcAudioRecord {
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
WebRtcAudioRecord(long nativeAudioRecord) {
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
this.nativeAudioRecord = nativeAudioRecord;
|
||||
if (DEBUG) {
|
||||
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||
WebRtcAudioRecord(Context context, AudioManager audioManager) {
|
||||
this(context, audioManager, DEFAULT_AUDIO_SOURCE, null /* errorCallback */,
|
||||
null /* audioSamplesReadyCallback */, WebRtcAudioEffects.isAcousticEchoCancelerSupported(),
|
||||
WebRtcAudioEffects.isNoiseSuppressorSupported());
|
||||
}
|
||||
|
||||
public WebRtcAudioRecord(Context context, AudioManager audioManager, int audioSource,
|
||||
@Nullable AudioRecordErrorCallback errorCallback,
|
||||
@Nullable SamplesReadyCallback audioSamplesReadyCallback,
|
||||
boolean isAcousticEchoCancelerSupported, boolean isNoiseSuppressorSupported) {
|
||||
if (isAcousticEchoCancelerSupported && !WebRtcAudioEffects.isAcousticEchoCancelerSupported()) {
|
||||
throw new IllegalArgumentException("HW AEC not supported");
|
||||
}
|
||||
effects = WebRtcAudioEffects.create();
|
||||
if (isNoiseSuppressorSupported && !WebRtcAudioEffects.isNoiseSuppressorSupported()) {
|
||||
throw new IllegalArgumentException("HW NS not supported");
|
||||
}
|
||||
this.context = context;
|
||||
this.audioManager = audioManager;
|
||||
this.audioSource = audioSource;
|
||||
this.errorCallback = errorCallback;
|
||||
this.audioSamplesReadyCallback = audioSamplesReadyCallback;
|
||||
this.isAcousticEchoCancelerSupported = isAcousticEchoCancelerSupported;
|
||||
this.isNoiseSuppressorSupported = isNoiseSuppressorSupported;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public void setNativeAudioRecord(long nativeAudioRecord) {
|
||||
this.nativeAudioRecord = nativeAudioRecord;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
boolean isAcousticEchoCancelerSupported() {
|
||||
return isAcousticEchoCancelerSupported;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
boolean isNoiseSuppressorSupported() {
|
||||
return isNoiseSuppressorSupported;
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private boolean enableBuiltInAEC(boolean enable) {
|
||||
Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
|
||||
if (effects == null) {
|
||||
Logging.e(TAG, "Built-in AEC is not supported on this platform");
|
||||
return false;
|
||||
}
|
||||
return effects.setAEC(enable);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
private boolean enableBuiltInNS(boolean enable) {
|
||||
Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
|
||||
if (effects == null) {
|
||||
Logging.e(TAG, "Built-in NS is not supported on this platform");
|
||||
return false;
|
||||
}
|
||||
return effects.setNS(enable);
|
||||
}
|
||||
|
||||
@ -231,9 +243,7 @@ class WebRtcAudioRecord {
|
||||
releaseAudioResources();
|
||||
return -1;
|
||||
}
|
||||
if (effects != null) {
|
||||
effects.enable(audioRecord.getAudioSessionId());
|
||||
}
|
||||
effects.enable(audioRecord.getAudioSessionId());
|
||||
logMainParameters();
|
||||
logMainParametersExtended();
|
||||
return framesPerBuffer;
|
||||
@ -269,12 +279,10 @@ class WebRtcAudioRecord {
|
||||
audioThread.stopThread();
|
||||
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
|
||||
Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
}
|
||||
audioThread = null;
|
||||
if (effects != null) {
|
||||
effects.release();
|
||||
}
|
||||
effects.release();
|
||||
releaseAudioResources();
|
||||
return true;
|
||||
}
|
||||
@ -314,19 +322,9 @@ class WebRtcAudioRecord {
|
||||
@NativeClassQualifiedName("webrtc::android_adm::AudioRecordJni")
|
||||
private native void nativeDataIsRecorded(long nativeAudioRecord, int bytes);
|
||||
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setAudioSource(int source) {
|
||||
Logging.w(TAG, "Audio source is changed from: " + audioSource + " to " + source);
|
||||
audioSource = source;
|
||||
}
|
||||
|
||||
private static int getDefaultAudioSource() {
|
||||
return AudioSource.VOICE_COMMUNICATION;
|
||||
}
|
||||
|
||||
// Sets all recorded samples to zero if |mute| is true, i.e., ensures that
|
||||
// the microphone is muted.
|
||||
public static void setMicrophoneMute(boolean mute) {
|
||||
public void setMicrophoneMute(boolean mute) {
|
||||
Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
|
||||
microphoneMute = mute;
|
||||
}
|
||||
@ -342,7 +340,7 @@ class WebRtcAudioRecord {
|
||||
|
||||
private void reportWebRtcAudioRecordInitError(String errorMessage) {
|
||||
Logging.e(TAG, "Init recording error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordInitError(errorMessage);
|
||||
}
|
||||
@ -351,7 +349,7 @@ class WebRtcAudioRecord {
|
||||
private void reportWebRtcAudioRecordStartError(
|
||||
AudioRecordStartErrorCode errorCode, String errorMessage) {
|
||||
Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
|
||||
}
|
||||
@ -359,7 +357,7 @@ class WebRtcAudioRecord {
|
||||
|
||||
private void reportWebRtcAudioRecordError(String errorMessage) {
|
||||
Logging.e(TAG, "Run-time recording error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioRecordError(errorMessage);
|
||||
}
|
||||
|
||||
@ -21,7 +21,6 @@ import android.os.Process;
|
||||
import java.lang.Thread;
|
||||
import java.nio.ByteBuffer;
|
||||
import javax.annotation.Nullable;
|
||||
import org.webrtc.ContextUtils;
|
||||
import org.webrtc.Logging;
|
||||
import org.webrtc.ThreadUtils;
|
||||
import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback;
|
||||
@ -30,9 +29,7 @@ import org.webrtc.CalledByNative;
|
||||
import org.webrtc.NativeClassQualifiedName;
|
||||
|
||||
class WebRtcAudioTrack {
|
||||
private static final boolean DEBUG = false;
|
||||
|
||||
private static final String TAG = "WebRtcAudioTrack";
|
||||
private static final String TAG = "WebRtcAudioTrackExternal";
|
||||
|
||||
// Default audio data format is PCM 16 bit per sample.
|
||||
// Guaranteed to be supported by all devices.
|
||||
@ -51,17 +48,6 @@ class WebRtcAudioTrack {
|
||||
// By default, WebRTC creates audio tracks with a usage attribute
|
||||
// corresponding to voice communications, such as telephony or VoIP.
|
||||
private static final int DEFAULT_USAGE = getDefaultUsageAttribute();
|
||||
private static int usageAttribute = DEFAULT_USAGE;
|
||||
|
||||
// This method overrides the default usage attribute and allows the user
|
||||
// to set it to something else than AudioAttributes.USAGE_VOICE_COMMUNICATION.
|
||||
// NOTE: calling this method will most likely break existing VoIP tuning.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setAudioTrackUsageAttribute(int usage) {
|
||||
Logging.w(TAG, "Default usage attribute is changed from: " + DEFAULT_USAGE + " to " + usage);
|
||||
usageAttribute = usage;
|
||||
}
|
||||
|
||||
private static int getDefaultUsageAttribute() {
|
||||
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
|
||||
@ -77,7 +63,8 @@ class WebRtcAudioTrack {
|
||||
return AudioAttributes.USAGE_VOICE_COMMUNICATION;
|
||||
}
|
||||
|
||||
private final long nativeAudioTrack;
|
||||
private long nativeAudioTrack;
|
||||
private final Context context;
|
||||
private final AudioManager audioManager;
|
||||
private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
|
||||
|
||||
@ -85,18 +72,14 @@ class WebRtcAudioTrack {
|
||||
|
||||
private @Nullable AudioTrack audioTrack = null;
|
||||
private @Nullable AudioTrackThread audioThread = null;
|
||||
private final VolumeLogger volumeLogger;
|
||||
|
||||
// Samples to be played are replaced by zeros if |speakerMute| is set to true.
|
||||
// Can be used to ensure that the speaker is fully muted.
|
||||
private static volatile boolean speakerMute = false;
|
||||
private volatile boolean speakerMute = false;
|
||||
private byte[] emptyBytes;
|
||||
|
||||
private static @Nullable AudioTrackErrorCallback errorCallback = null;
|
||||
|
||||
public static void setErrorCallback(AudioTrackErrorCallback errorCallback) {
|
||||
Logging.d(TAG, "Set extended error callback");
|
||||
WebRtcAudioTrack.errorCallback = errorCallback;
|
||||
}
|
||||
private final @Nullable AudioTrackErrorCallback errorCallback;
|
||||
|
||||
/**
|
||||
* Audio thread which keeps calling AudioTrack.write() to stream audio.
|
||||
@ -192,15 +175,22 @@ class WebRtcAudioTrack {
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
WebRtcAudioTrack(long nativeAudioTrack) {
|
||||
WebRtcAudioTrack(Context context, AudioManager audioManager) {
|
||||
this(context, audioManager, null /* errorCallback */);
|
||||
}
|
||||
|
||||
WebRtcAudioTrack(
|
||||
Context context, AudioManager audioManager, @Nullable AudioTrackErrorCallback errorCallback) {
|
||||
threadChecker.detachThread();
|
||||
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
|
||||
this.context = context;
|
||||
this.audioManager = audioManager;
|
||||
this.errorCallback = errorCallback;
|
||||
this.volumeLogger = new VolumeLogger(audioManager);
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
public void setNativeAudioTrack(long nativeAudioTrack) {
|
||||
this.nativeAudioTrack = nativeAudioTrack;
|
||||
audioManager =
|
||||
(AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
|
||||
if (DEBUG) {
|
||||
WebRtcAudioUtils.logDeviceInfo(TAG);
|
||||
}
|
||||
}
|
||||
|
||||
@CalledByNative
|
||||
@ -279,6 +269,7 @@ class WebRtcAudioTrack {
|
||||
@CalledByNative
|
||||
private boolean startPlayout() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
volumeLogger.start();
|
||||
Logging.d(TAG, "startPlayout");
|
||||
assertTrue(audioTrack != null);
|
||||
assertTrue(audioThread == null);
|
||||
@ -310,6 +301,7 @@ class WebRtcAudioTrack {
|
||||
@CalledByNative
|
||||
private boolean stopPlayout() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
volumeLogger.stop();
|
||||
Logging.d(TAG, "stopPlayout");
|
||||
assertTrue(audioThread != null);
|
||||
logUnderrunCount();
|
||||
@ -319,7 +311,7 @@ class WebRtcAudioTrack {
|
||||
audioThread.interrupt();
|
||||
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
|
||||
Logging.e(TAG, "Join of AudioTrackThread timed out.");
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
}
|
||||
Logging.d(TAG, "AudioTrackThread has now been stopped.");
|
||||
audioThread = null;
|
||||
@ -332,7 +324,6 @@ class WebRtcAudioTrack {
|
||||
private int getStreamMaxVolume() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
Logging.d(TAG, "getStreamMaxVolume");
|
||||
assertTrue(audioManager != null);
|
||||
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
|
||||
}
|
||||
|
||||
@ -341,7 +332,6 @@ class WebRtcAudioTrack {
|
||||
private boolean setStreamVolume(int volume) {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
Logging.d(TAG, "setStreamVolume(" + volume + ")");
|
||||
assertTrue(audioManager != null);
|
||||
if (isVolumeFixed()) {
|
||||
Logging.e(TAG, "The device implements a fixed volume policy.");
|
||||
return false;
|
||||
@ -364,7 +354,6 @@ class WebRtcAudioTrack {
|
||||
private int getStreamVolume() {
|
||||
threadChecker.checkIsOnValidThread();
|
||||
Logging.d(TAG, "getStreamVolume");
|
||||
assertTrue(audioManager != null);
|
||||
return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
|
||||
}
|
||||
|
||||
@ -394,12 +383,9 @@ class WebRtcAudioTrack {
|
||||
if (sampleRateInHz != nativeOutputSampleRate) {
|
||||
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
|
||||
}
|
||||
if (usageAttribute != DEFAULT_USAGE) {
|
||||
Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
|
||||
}
|
||||
// Create an audio track where the audio usage is for VoIP and the content type is speech.
|
||||
return new AudioTrack(new AudioAttributes.Builder()
|
||||
.setUsage(usageAttribute)
|
||||
.setUsage(DEFAULT_USAGE)
|
||||
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
|
||||
.build(),
|
||||
new AudioFormat.Builder()
|
||||
@ -466,7 +452,7 @@ class WebRtcAudioTrack {
|
||||
|
||||
// Sets all samples to be played out to zero if |mute| is true, i.e.,
|
||||
// ensures that the speaker is muted.
|
||||
public static void setSpeakerMute(boolean mute) {
|
||||
public void setSpeakerMute(boolean mute) {
|
||||
Logging.w(TAG, "setSpeakerMute(" + mute + ")");
|
||||
speakerMute = mute;
|
||||
}
|
||||
@ -482,7 +468,7 @@ class WebRtcAudioTrack {
|
||||
|
||||
private void reportWebRtcAudioTrackInitError(String errorMessage) {
|
||||
Logging.e(TAG, "Init playout error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioTrackInitError(errorMessage);
|
||||
}
|
||||
@ -491,7 +477,7 @@ class WebRtcAudioTrack {
|
||||
private void reportWebRtcAudioTrackStartError(
|
||||
AudioTrackStartErrorCode errorCode, String errorMessage) {
|
||||
Logging.e(TAG, "Start playout error: " + errorCode + ". " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
|
||||
}
|
||||
@ -499,7 +485,7 @@ class WebRtcAudioTrack {
|
||||
|
||||
private void reportWebRtcAudioTrackError(String errorMessage) {
|
||||
Logging.e(TAG, "Run-time playback error: " + errorMessage);
|
||||
WebRtcAudioUtils.logAudioState(TAG);
|
||||
WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
|
||||
if (errorCallback != null) {
|
||||
errorCallback.onWebRtcAudioTrackError(errorMessage);
|
||||
}
|
||||
|
||||
@ -29,127 +29,10 @@ import java.lang.Thread;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import org.webrtc.ContextUtils;
|
||||
import org.webrtc.Logging;
|
||||
|
||||
final class WebRtcAudioUtils {
|
||||
private static final String TAG = "WebRtcAudioUtils";
|
||||
|
||||
// List of devices where it has been verified that the built-in effect
|
||||
// bad and where it makes sense to avoid using it and instead rely on the
|
||||
// native WebRTC version instead. The device name is given by Build.MODEL.
|
||||
private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
|
||||
// It is recommended to maintain a list of blacklisted models outside
|
||||
// this package and instead call setWebRtcBasedAcousticEchoCanceler(true)
|
||||
// from the client for devices where the built-in AEC shall be disabled.
|
||||
};
|
||||
private static final String[] BLACKLISTED_NS_MODELS = new String[] {
|
||||
// It is recommended to maintain a list of blacklisted models outside
|
||||
// this package and instead call setWebRtcBasedNoiseSuppressor(true)
|
||||
// from the client for devices where the built-in NS shall be disabled.
|
||||
};
|
||||
|
||||
// Use 16kHz as the default sample rate. A higher sample rate might prevent
|
||||
// us from supporting communication mode on some older (e.g. ICS) devices.
|
||||
private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
|
||||
private static int defaultSampleRateHz = DEFAULT_SAMPLE_RATE_HZ;
|
||||
// Set to true if setDefaultSampleRateHz() has been called.
|
||||
private static boolean isDefaultSampleRateOverridden = false;
|
||||
|
||||
// By default, utilize hardware based audio effects for AEC and NS when
|
||||
// available.
|
||||
private static boolean useWebRtcBasedAcousticEchoCanceler = false;
|
||||
private static boolean useWebRtcBasedNoiseSuppressor = false;
|
||||
|
||||
// Call these methods if any hardware based effect shall be replaced by a
|
||||
// software based version provided by the WebRTC stack instead.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
|
||||
useWebRtcBasedAcousticEchoCanceler = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setWebRtcBasedNoiseSuppressor(boolean enable) {
|
||||
useWebRtcBasedNoiseSuppressor = enable;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setWebRtcBasedAutomaticGainControl(boolean enable) {
|
||||
// TODO(henrika): deprecated; remove when no longer used by any client.
|
||||
Logging.w(TAG, "setWebRtcBasedAutomaticGainControl() is deprecated");
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
|
||||
if (useWebRtcBasedAcousticEchoCanceler) {
|
||||
Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
|
||||
}
|
||||
return useWebRtcBasedAcousticEchoCanceler;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
|
||||
if (useWebRtcBasedNoiseSuppressor) {
|
||||
Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
|
||||
}
|
||||
return useWebRtcBasedNoiseSuppressor;
|
||||
}
|
||||
|
||||
// TODO(henrika): deprecated; remove when no longer used by any client.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean useWebRtcBasedAutomaticGainControl() {
|
||||
// Always return true here to avoid trying to use any built-in AGC.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns true if the device supports an audio effect (AEC or NS).
|
||||
// Four conditions must be fulfilled if functions are to return true:
|
||||
// 1) the platform must support the built-in (HW) effect,
|
||||
// 2) explicit use (override) of a WebRTC based version must not be set,
|
||||
// 3) the device must not be blacklisted for use of the effect, and
|
||||
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
|
||||
public static boolean isAcousticEchoCancelerSupported() {
|
||||
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
|
||||
}
|
||||
public static boolean isNoiseSuppressorSupported() {
|
||||
return WebRtcAudioEffects.canUseNoiseSuppressor();
|
||||
}
|
||||
|
||||
// Call this method if the default handling of querying the native sample
|
||||
// rate shall be overridden. Can be useful on some devices where the
|
||||
// available Android APIs are known to return invalid results.
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized void setDefaultSampleRateHz(int sampleRateHz) {
|
||||
isDefaultSampleRateOverridden = true;
|
||||
defaultSampleRateHz = sampleRateHz;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized boolean isDefaultSampleRateOverridden() {
|
||||
return isDefaultSampleRateOverridden;
|
||||
}
|
||||
|
||||
// TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
|
||||
@SuppressWarnings("NoSynchronizedMethodCheck")
|
||||
public static synchronized int getDefaultSampleRateHz() {
|
||||
return defaultSampleRateHz;
|
||||
}
|
||||
|
||||
public static List<String> getBlackListedModelsForAecUsage() {
|
||||
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AEC_MODELS);
|
||||
}
|
||||
|
||||
public static List<String> getBlackListedModelsForNsUsage() {
|
||||
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_NS_MODELS);
|
||||
}
|
||||
private static final String TAG = "WebRtcAudioUtilsExternal";
|
||||
|
||||
public static boolean runningOnJellyBeanMR1OrHigher() {
|
||||
// November 2012: Android 4.2. API Level 17.
|
||||
@ -214,22 +97,19 @@ final class WebRtcAudioUtils {
|
||||
// Logs information about the current audio state. The idea is to call this
|
||||
// method when errors are detected to log under what conditions the error
|
||||
// occurred. Hopefully it will provide clues to what might be the root cause.
|
||||
static void logAudioState(String tag) {
|
||||
static void logAudioState(String tag, Context context, AudioManager audioManager) {
|
||||
logDeviceInfo(tag);
|
||||
final Context context = ContextUtils.getApplicationContext();
|
||||
final AudioManager audioManager =
|
||||
(AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
|
||||
logAudioStateBasic(tag, audioManager);
|
||||
logAudioStateBasic(tag, context, audioManager);
|
||||
logAudioStateVolume(tag, audioManager);
|
||||
logAudioDeviceInfo(tag, audioManager);
|
||||
}
|
||||
|
||||
// Reports basic audio statistics.
|
||||
private static void logAudioStateBasic(String tag, AudioManager audioManager) {
|
||||
private static void logAudioStateBasic(String tag, Context context, AudioManager audioManager) {
|
||||
Logging.d(tag,
|
||||
"Audio State: "
|
||||
+ "audio mode: " + modeToString(audioManager.getMode()) + ", "
|
||||
+ "has mic: " + hasMicrophone() + ", "
|
||||
+ "has mic: " + hasMicrophone(context) + ", "
|
||||
+ "mic muted: " + audioManager.isMicrophoneMute() + ", "
|
||||
+ "music active: " + audioManager.isMusicActive() + ", "
|
||||
+ "speakerphone: " + audioManager.isSpeakerphoneOn() + ", "
|
||||
@ -394,8 +274,7 @@ final class WebRtcAudioUtils {
|
||||
}
|
||||
|
||||
// Returns true if the device can record audio via a microphone.
|
||||
private static boolean hasMicrophone() {
|
||||
return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
|
||||
PackageManager.FEATURE_MICROPHONE);
|
||||
private static boolean hasMicrophone(Context context) {
|
||||
return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_MICROPHONE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -24,9 +23,9 @@ enum AudioDeviceMessageType : uint32_t {
|
||||
kMessageOutputStreamDisconnected,
|
||||
};
|
||||
|
||||
AAudioPlayer::AAudioPlayer(AudioManager* audio_manager)
|
||||
AAudioPlayer::AAudioPlayer(const AudioParameters& audio_parameters)
|
||||
: main_thread_(rtc::Thread::Current()),
|
||||
aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) {
|
||||
aaudio_(audio_parameters, AAUDIO_DIRECTION_OUTPUT, this) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
thread_checker_aaudio_.DetachFromThread();
|
||||
}
|
||||
|
||||
@ -55,7 +55,7 @@ class AAudioPlayer final : public AudioOutput,
|
||||
public AAudioObserverInterface,
|
||||
public rtc::MessageHandler {
|
||||
public:
|
||||
explicit AAudioPlayer(AudioManager* audio_manager);
|
||||
explicit AAudioPlayer(const AudioParameters& audio_parameters);
|
||||
~AAudioPlayer() override;
|
||||
|
||||
int Init() override;
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
#include "system_wrappers/include/sleep.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -26,9 +26,9 @@ enum AudioDeviceMessageType : uint32_t {
|
||||
kMessageInputStreamDisconnected,
|
||||
};
|
||||
|
||||
AAudioRecorder::AAudioRecorder(AudioManager* audio_manager)
|
||||
AAudioRecorder::AAudioRecorder(const AudioParameters& audio_parameters)
|
||||
: main_thread_(rtc::Thread::Current()),
|
||||
aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) {
|
||||
aaudio_(audio_parameters, AAUDIO_DIRECTION_INPUT, this) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
thread_checker_aaudio_.DetachFromThread();
|
||||
}
|
||||
@ -122,6 +122,14 @@ void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
||||
}
|
||||
|
||||
bool AAudioRecorder::IsAcousticEchoCancelerSupported() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AAudioRecorder::IsNoiseSuppressorSupported() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int AAudioRecorder::EnableBuiltInAEC(bool enable) {
|
||||
RTC_LOG(INFO) << "EnableBuiltInAEC: " << enable;
|
||||
RTC_LOG(LS_ERROR) << "Not implemented";
|
||||
|
||||
@ -47,7 +47,7 @@ class AAudioRecorder : public AudioInput,
|
||||
public AAudioObserverInterface,
|
||||
public rtc::MessageHandler {
|
||||
public:
|
||||
explicit AAudioRecorder(AudioManager* audio_manager);
|
||||
explicit AAudioRecorder(const AudioParameters& audio_parameters);
|
||||
~AAudioRecorder() override;
|
||||
|
||||
int Init() override;
|
||||
@ -63,6 +63,8 @@ class AAudioRecorder : public AudioInput,
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
// TODO(henrika): add support using AAudio APIs when available.
|
||||
bool IsAcousticEchoCancelerSupported() const override;
|
||||
bool IsNoiseSuppressorSupported() const override;
|
||||
int EnableBuiltInAEC(bool enable) override;
|
||||
int EnableBuiltInAGC(bool enable) override;
|
||||
int EnableBuiltInNS(bool enable) override;
|
||||
|
||||
@ -13,7 +13,6 @@
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/strings/string_builder.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
#define LOG_ON_ERROR(op) \
|
||||
do { \
|
||||
@ -132,15 +131,14 @@ class ScopedStreamBuilder {
|
||||
|
||||
} // namespace
|
||||
|
||||
AAudioWrapper::AAudioWrapper(AudioManager* audio_manager,
|
||||
AAudioWrapper::AAudioWrapper(const AudioParameters& audio_parameters,
|
||||
aaudio_direction_t direction,
|
||||
AAudioObserverInterface* observer)
|
||||
: direction_(direction), observer_(observer) {
|
||||
: audio_parameters_(audio_parameters),
|
||||
direction_(direction),
|
||||
observer_(observer) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
RTC_DCHECK(observer_);
|
||||
direction_ == AAUDIO_DIRECTION_OUTPUT
|
||||
? audio_parameters_ = audio_manager->GetPlayoutAudioParameters()
|
||||
: audio_parameters_ = audio_manager->GetRecordAudioParameters();
|
||||
aaudio_thread_checker_.DetachFromThread();
|
||||
RTC_LOG(INFO) << audio_parameters_.ToString();
|
||||
}
|
||||
|
||||
@ -20,8 +20,6 @@ namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
class AudioManager;
|
||||
|
||||
// AAudio callback interface for audio transport to/from the AAudio stream.
|
||||
// The interface also contains an error callback method for notifications of
|
||||
// e.g. device changes.
|
||||
@ -60,7 +58,7 @@ class AAudioObserverInterface {
|
||||
// ensure that the audio device and stream direction agree.
|
||||
class AAudioWrapper {
|
||||
public:
|
||||
AAudioWrapper(AudioManager* audio_manager,
|
||||
AAudioWrapper(const AudioParameters& audio_parameters,
|
||||
aaudio_direction_t direction,
|
||||
AAudioObserverInterface* observer);
|
||||
~AAudioWrapper();
|
||||
@ -117,7 +115,7 @@ class AAudioWrapper {
|
||||
|
||||
rtc::ThreadChecker thread_checker_;
|
||||
rtc::ThreadChecker aaudio_thread_checker_;
|
||||
AudioParameters audio_parameters_;
|
||||
const AudioParameters audio_parameters_;
|
||||
const aaudio_direction_t direction_;
|
||||
AAudioObserverInterface* observer_ = nullptr;
|
||||
AAudioStream* stream_ = nullptr;
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/refcountedobject.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/generated_audio_device_base_jni/jni/WebRtcAudioManager_jni.h"
|
||||
#include "system_wrappers/include/metrics.h"
|
||||
|
||||
#define CHECKinitialized_() \
|
||||
@ -38,15 +39,15 @@ namespace android_adm {
|
||||
|
||||
namespace {
|
||||
|
||||
// InputType/OutputType can be any class that implements the capturing/rendering
|
||||
// part of the AudioDeviceGeneric API.
|
||||
// Construction and destruction must be done on one and the same thread. Each
|
||||
// internal implementation of InputType and OutputType will RTC_DCHECK if that
|
||||
// is not the case. All implemented methods must also be called on the same
|
||||
// thread. See comments in each InputType/OutputType class for more info.
|
||||
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
|
||||
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
|
||||
// RTC_CHECK that the calling thread is attached to a Java VM.
|
||||
// This class combines a generic instance of an AudioInput and a generic
|
||||
// instance of an AudioOutput to create an AudioDeviceModule. This is mostly
|
||||
// done by delegating to the audio input/output with some glue code. This class
|
||||
// also directly implements some of the AudioDeviceModule methods with dummy
|
||||
// implementations.
|
||||
//
|
||||
// An instance can be created on any thread, but must then be used on one and
|
||||
// the same thread. All public methods must also be called on the same thread. A
|
||||
// thread checker will RTC_DCHECK if any method is called on an invalid thread.
|
||||
class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
public:
|
||||
// For use with UMA logging. Must be kept in sync with histograms.xml in
|
||||
@ -61,17 +62,20 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
};
|
||||
|
||||
AndroidAudioDeviceModule(AudioDeviceModule::AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioManager> audio_manager,
|
||||
bool is_stereo_playout_supported,
|
||||
bool is_stereo_record_supported,
|
||||
uint16_t playout_delay_ms,
|
||||
std::unique_ptr<AudioInput> audio_input,
|
||||
std::unique_ptr<AudioOutput> audio_output)
|
||||
: audio_layer_(audio_layer),
|
||||
audio_manager_(std::move(audio_manager)),
|
||||
is_stereo_playout_supported_(is_stereo_playout_supported),
|
||||
is_stereo_record_supported_(is_stereo_record_supported),
|
||||
playout_delay_ms_(playout_delay_ms),
|
||||
input_(std::move(audio_input)),
|
||||
output_(std::move(audio_output)),
|
||||
initialized_(false) {
|
||||
RTC_CHECK(input_);
|
||||
RTC_CHECK(output_);
|
||||
RTC_CHECK(audio_manager_);
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
thread_checker_.DetachFromThread();
|
||||
}
|
||||
@ -99,14 +103,10 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
return 0;
|
||||
}
|
||||
InitStatus status;
|
||||
if (!audio_manager_->Init()) {
|
||||
status = InitStatus::OTHER_ERROR;
|
||||
} else if (output_->Init() != 0) {
|
||||
audio_manager_->Close();
|
||||
if (output_->Init() != 0) {
|
||||
status = InitStatus::PLAYOUT_ERROR;
|
||||
} else if (input_->Init() != 0) {
|
||||
output_->Terminate();
|
||||
audio_manager_->Close();
|
||||
status = InitStatus::RECORDING_ERROR;
|
||||
} else {
|
||||
initialized_ = true;
|
||||
@ -129,7 +129,6 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
int32_t err = input_->Terminate();
|
||||
err |= output_->Terminate();
|
||||
err |= !audio_manager_->Close();
|
||||
initialized_ = false;
|
||||
RTC_DCHECK_EQ(err, 0);
|
||||
return err;
|
||||
@ -262,7 +261,6 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
int32_t StopPlayout() override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
// Avoid using audio manger (JNI/Java cost) if playout was inactive.
|
||||
if (!Playing())
|
||||
return 0;
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
@ -463,12 +461,10 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Returns true if the audio manager has been configured to support stereo
|
||||
// and false otherwised. Default is mono.
|
||||
int32_t StereoPlayoutIsAvailable(bool* available) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*available = audio_manager_->IsStereoPlayoutSupported();
|
||||
*available = is_stereo_playout_supported_;
|
||||
RTC_LOG(INFO) << "output: " << *available;
|
||||
return 0;
|
||||
}
|
||||
@ -480,11 +476,9 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
RTC_LOG(WARNING) << "recording in stereo is not supported";
|
||||
return -1;
|
||||
}
|
||||
bool available = audio_manager_->IsStereoPlayoutSupported();
|
||||
// Android does not support changes between mono and stero on the fly.
|
||||
// Instead, the native audio layer is configured via the audio manager
|
||||
// to either support mono or stereo. It is allowed to call this method
|
||||
// if that same state is not modified.
|
||||
bool available = is_stereo_playout_supported_;
|
||||
// Android does not support changes between mono and stero on the fly. It is
|
||||
// allowed to call this method if that same state is not modified.
|
||||
if (enable != available) {
|
||||
RTC_LOG(WARNING) << "failed to change stereo recording";
|
||||
return -1;
|
||||
@ -500,7 +494,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
int32_t StereoPlayout(bool* enabled) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*enabled = audio_manager_->IsStereoPlayoutSupported();
|
||||
*enabled = is_stereo_playout_supported_;
|
||||
RTC_LOG(INFO) << "output: " << *enabled;
|
||||
return 0;
|
||||
}
|
||||
@ -508,7 +502,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
int32_t StereoRecordingIsAvailable(bool* available) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*available = audio_manager_->IsStereoRecordSupported();
|
||||
*available = is_stereo_record_supported_;
|
||||
RTC_LOG(INFO) << "output: " << *available;
|
||||
return 0;
|
||||
}
|
||||
@ -520,11 +514,9 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
RTC_LOG(WARNING) << "recording in stereo is not supported";
|
||||
return -1;
|
||||
}
|
||||
bool available = audio_manager_->IsStereoRecordSupported();
|
||||
// Android does not support changes between mono and stero on the fly.
|
||||
// Instead, the native audio layer is configured via the audio manager
|
||||
// to either support mono or stereo. It is allowed to call this method
|
||||
// if that same state is not modified.
|
||||
bool available = is_stereo_record_supported_;
|
||||
// Android does not support changes between mono and stero on the fly. It is
|
||||
// allowed to call this method if that same state is not modified.
|
||||
if (enable != available) {
|
||||
RTC_LOG(WARNING) << "failed to change stereo recording";
|
||||
return -1;
|
||||
@ -540,7 +532,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
int32_t StereoRecording(bool* enabled) const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized_();
|
||||
*enabled = audio_manager_->IsStereoRecordSupported();
|
||||
*enabled = is_stereo_record_supported_;
|
||||
RTC_LOG(INFO) << "output: " << *enabled;
|
||||
return 0;
|
||||
}
|
||||
@ -548,7 +540,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
int32_t PlayoutDelay(uint16_t* delay_ms) const override {
|
||||
CHECKinitialized_();
|
||||
// Best guess we can do is to use half of the estimated total delay.
|
||||
*delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
|
||||
*delay_ms = playout_delay_ms_;
|
||||
RTC_DCHECK_GT(*delay_ms, 0);
|
||||
return 0;
|
||||
}
|
||||
@ -568,7 +560,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
bool BuiltInAECIsAvailable() const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isAvailable = audio_manager_->IsAcousticEchoCancelerSupported();
|
||||
bool isAvailable = input_->IsAcousticEchoCancelerSupported();
|
||||
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||
return isAvailable;
|
||||
}
|
||||
@ -592,7 +584,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
bool BuiltInNSIsAvailable() const override {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
CHECKinitialized__BOOL();
|
||||
bool isAvailable = audio_manager_->IsNoiseSuppressorSupported();
|
||||
bool isAvailable = input_->IsNoiseSuppressorSupported();
|
||||
RTC_LOG(INFO) << "output: " << isAvailable;
|
||||
return isAvailable;
|
||||
}
|
||||
@ -638,7 +630,9 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
rtc::ThreadChecker thread_checker_;
|
||||
|
||||
const AudioDeviceModule::AudioLayer audio_layer_;
|
||||
const std::unique_ptr<AudioManager> audio_manager_;
|
||||
const bool is_stereo_playout_supported_;
|
||||
const bool is_stereo_record_supported_;
|
||||
const uint16_t playout_delay_ms_;
|
||||
const std::unique_ptr<AudioInput> input_;
|
||||
const std::unique_ptr<AudioOutput> output_;
|
||||
std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
|
||||
@ -648,14 +642,47 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
|
||||
} // namespace
|
||||
|
||||
ScopedJavaLocalRef<jobject> GetAudioManager(JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context) {
|
||||
return Java_WebRtcAudioManager_getAudioManager(env, j_context);
|
||||
}
|
||||
|
||||
int GetDefaultSampleRate(JNIEnv* env, const JavaRef<jobject>& j_audio_manager) {
|
||||
return Java_WebRtcAudioManager_getSampleRate(env, j_audio_manager);
|
||||
}
|
||||
|
||||
void GetAudioParameters(JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context,
|
||||
const JavaRef<jobject>& j_audio_manager,
|
||||
int sample_rate,
|
||||
bool use_stereo_input,
|
||||
bool use_stereo_output,
|
||||
AudioParameters* input_parameters,
|
||||
AudioParameters* output_parameters) {
|
||||
const size_t output_channels = use_stereo_output ? 2 : 1;
|
||||
const size_t input_channels = use_stereo_input ? 2 : 1;
|
||||
const size_t output_buffer_size = Java_WebRtcAudioManager_getOutputBufferSize(
|
||||
env, j_context, j_audio_manager, sample_rate, output_channels);
|
||||
const size_t input_buffer_size = Java_WebRtcAudioManager_getInputBufferSize(
|
||||
env, j_context, j_audio_manager, sample_rate, input_channels);
|
||||
output_parameters->reset(sample_rate, static_cast<size_t>(output_channels),
|
||||
static_cast<size_t>(output_buffer_size));
|
||||
input_parameters->reset(sample_rate, static_cast<size_t>(input_channels),
|
||||
static_cast<size_t>(input_buffer_size));
|
||||
RTC_CHECK(input_parameters->is_valid());
|
||||
RTC_CHECK(output_parameters->is_valid());
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModuleFromInputAndOutput(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioManager> audio_manager,
|
||||
bool is_stereo_playout_supported,
|
||||
bool is_stereo_record_supported,
|
||||
uint16_t playout_delay_ms,
|
||||
std::unique_ptr<AudioInput> audio_input,
|
||||
std::unique_ptr<AudioOutput> audio_output) {
|
||||
return new rtc::RefCountedObject<AndroidAudioDeviceModule>(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
audio_layer, is_stereo_playout_supported, is_stereo_record_supported,
|
||||
playout_delay_ms, std::move(audio_input), std::move(audio_output));
|
||||
}
|
||||
|
||||
} // namespace android_adm
|
||||
|
||||
@ -16,14 +16,11 @@
|
||||
#include "api/optional.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "sdk/android/native_api/jni/scoped_java_ref.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
class AudioManager;
|
||||
|
||||
class AudioInput {
|
||||
public:
|
||||
virtual ~AudioInput() {}
|
||||
@ -40,6 +37,11 @@ class AudioInput {
|
||||
|
||||
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
|
||||
|
||||
// Returns true if the audio input supports built-in audio effects for AEC and
|
||||
// NS.
|
||||
virtual bool IsAcousticEchoCancelerSupported() const = 0;
|
||||
virtual bool IsNoiseSuppressorSupported() const = 0;
|
||||
|
||||
virtual int32_t EnableBuiltInAEC(bool enable) = 0;
|
||||
virtual int32_t EnableBuiltInAGC(bool enable) = 0;
|
||||
virtual int32_t EnableBuiltInNS(bool enable) = 0;
|
||||
@ -64,9 +66,29 @@ class AudioOutput {
|
||||
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
|
||||
};
|
||||
|
||||
// Extract an android.media.AudioManager from an android.content.Context.
|
||||
ScopedJavaLocalRef<jobject> GetAudioManager(JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context);
|
||||
|
||||
// Get default audio sample rate by querying an android.media.AudioManager.
|
||||
int GetDefaultSampleRate(JNIEnv* env, const JavaRef<jobject>& j_audio_manager);
|
||||
|
||||
// Get audio input and output parameters based on a number of settings.
|
||||
void GetAudioParameters(JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context,
|
||||
const JavaRef<jobject>& j_audio_manager,
|
||||
int sample_rate,
|
||||
bool use_stereo_input,
|
||||
bool use_stereo_output,
|
||||
AudioParameters* input_parameters,
|
||||
AudioParameters* output_parameters);
|
||||
|
||||
// Glue together an audio input and audio output to get an AudioDeviceModule.
|
||||
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModuleFromInputAndOutput(
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
std::unique_ptr<AudioManager> audio_manager,
|
||||
bool is_stereo_playout_supported,
|
||||
bool is_stereo_record_supported,
|
||||
uint16_t playout_delay_ms,
|
||||
std::unique_ptr<AudioInput> audio_input,
|
||||
std::unique_ptr<AudioOutput> audio_output);
|
||||
|
||||
|
||||
@ -1,132 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/refcount.h"
|
||||
#include "rtc_base/refcountedobject.h"
|
||||
|
||||
#include "sdk/android/generated_audio_device_base_jni/jni/WebRtcAudioManager_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
// AudioManager implementation
|
||||
AudioManager::AudioManager(JNIEnv* env,
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
const JavaParamRef<jobject>& application_context)
|
||||
: j_audio_manager_(
|
||||
Java_WebRtcAudioManager_Constructor(env, application_context)),
|
||||
audio_layer_(audio_layer),
|
||||
initialized_(false) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
const int sample_rate =
|
||||
Java_WebRtcAudioManager_getSampleRate(env, j_audio_manager_);
|
||||
const size_t output_channels =
|
||||
Java_WebRtcAudioManager_getStereoOutput(env, j_audio_manager_) ? 2 : 1;
|
||||
const size_t input_channels =
|
||||
Java_WebRtcAudioManager_getStereoInput(env, j_audio_manager_) ? 2 : 1;
|
||||
const size_t output_buffer_size =
|
||||
Java_WebRtcAudioManager_getOutputBufferSize(env, j_audio_manager_);
|
||||
const size_t input_buffer_size =
|
||||
Java_WebRtcAudioManager_getInputBufferSize(env, j_audio_manager_);
|
||||
playout_parameters_.reset(sample_rate, static_cast<size_t>(output_channels),
|
||||
static_cast<size_t>(output_buffer_size));
|
||||
record_parameters_.reset(sample_rate, static_cast<size_t>(input_channels),
|
||||
static_cast<size_t>(input_buffer_size));
|
||||
RTC_CHECK(playout_parameters_.is_valid());
|
||||
RTC_CHECK(record_parameters_.is_valid());
|
||||
thread_checker_.DetachFromThread();
|
||||
}
|
||||
|
||||
AudioManager::~AudioManager() {
|
||||
RTC_LOG(INFO) << "dtor";
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
Close();
|
||||
}
|
||||
|
||||
bool AudioManager::Init() {
|
||||
RTC_LOG(INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
RTC_DCHECK(!initialized_);
|
||||
RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
|
||||
JNIEnv* env = AttachCurrentThreadIfNeeded();
|
||||
if (!Java_WebRtcAudioManager_init(env, j_audio_manager_)) {
|
||||
RTC_LOG(LS_ERROR) << "Init() failed";
|
||||
return false;
|
||||
}
|
||||
initialized_ = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioManager::Close() {
|
||||
RTC_LOG(INFO) << "Close";
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (!initialized_)
|
||||
return true;
|
||||
JNIEnv* env = AttachCurrentThreadIfNeeded();
|
||||
Java_WebRtcAudioManager_dispose(env, j_audio_manager_);
|
||||
initialized_ = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioManager::IsAcousticEchoCancelerSupported() const {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
JNIEnv* env = AttachCurrentThreadIfNeeded();
|
||||
return Java_WebRtcAudioManager_isAcousticEchoCancelerSupported(
|
||||
env, j_audio_manager_);
|
||||
}
|
||||
|
||||
bool AudioManager::IsNoiseSuppressorSupported() const {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
JNIEnv* env = AttachCurrentThreadIfNeeded();
|
||||
return Java_WebRtcAudioManager_isNoiseSuppressorSupported(env,
|
||||
j_audio_manager_);
|
||||
}
|
||||
|
||||
bool AudioManager::IsStereoPlayoutSupported() const {
|
||||
return (playout_parameters_.channels() == 2);
|
||||
}
|
||||
|
||||
bool AudioManager::IsStereoRecordSupported() const {
|
||||
return (record_parameters_.channels() == 2);
|
||||
}
|
||||
|
||||
int AudioManager::GetDelayEstimateInMilliseconds() const {
|
||||
return audio_layer_ == AudioDeviceModule::kAndroidJavaAudio
|
||||
? kHighLatencyModeDelayEstimateInMilliseconds
|
||||
: kLowLatencyModeDelayEstimateInMilliseconds;
|
||||
}
|
||||
|
||||
const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
|
||||
RTC_CHECK(playout_parameters_.is_valid());
|
||||
return playout_parameters_;
|
||||
}
|
||||
|
||||
const AudioParameters& AudioManager::GetRecordAudioParameters() {
|
||||
RTC_CHECK(record_parameters_.is_valid());
|
||||
return record_parameters_;
|
||||
}
|
||||
|
||||
} // namespace android_adm
|
||||
|
||||
} // namespace webrtc
|
||||
@ -1,95 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_MANAGER_H_
|
||||
#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_MANAGER_H_
|
||||
|
||||
#include <jni.h>
|
||||
#include <memory>
|
||||
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/native_api/jni/scoped_java_ref.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
// Implements support for functions in the WebRTC audio stack for Android that
|
||||
// relies on the AudioManager in android.media. It also populates an
|
||||
// AudioParameter structure with native audio parameters detected at
|
||||
// construction. This class does not make any audio-related modifications
|
||||
// unless Init() is called.
|
||||
class AudioManager {
|
||||
public:
|
||||
AudioManager(JNIEnv* env,
|
||||
AudioDeviceModule::AudioLayer audio_layer,
|
||||
const JavaParamRef<jobject>& application_context);
|
||||
~AudioManager();
|
||||
|
||||
// Initializes the audio manager and stores the current audio mode.
|
||||
bool Init();
|
||||
// Revert any setting done by Init().
|
||||
bool Close();
|
||||
|
||||
// Native audio parameters stored during construction.
|
||||
const AudioParameters& GetPlayoutAudioParameters();
|
||||
const AudioParameters& GetRecordAudioParameters();
|
||||
|
||||
// Returns true if the device supports built-in audio effects for AEC, AGC
|
||||
// and NS. Some devices can also be blacklisted for use in combination with
|
||||
// platform effects and these devices will return false.
|
||||
// Can currently only be used in combination with a Java based audio backend
|
||||
// for the recoring side (i.e. using the android.media.AudioRecord API).
|
||||
bool IsAcousticEchoCancelerSupported() const;
|
||||
bool IsNoiseSuppressorSupported() const;
|
||||
|
||||
// Returns true if the device supports (and has been configured for) stereo.
|
||||
// Call the Java API WebRtcAudioManager.setStereoOutput/Input() with true as
|
||||
// paramter to enable stereo. Default is mono in both directions and the
|
||||
// setting is set once and for all when the audio manager object is created.
|
||||
// TODO(henrika): stereo is not supported in combination with OpenSL ES.
|
||||
bool IsStereoPlayoutSupported() const;
|
||||
bool IsStereoRecordSupported() const;
|
||||
|
||||
// Returns the estimated total delay of this device. Unit is in milliseconds.
|
||||
// The vaule is set once at construction and never changes after that.
|
||||
// Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and
|
||||
// webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
|
||||
int GetDelayEstimateInMilliseconds() const;
|
||||
|
||||
private:
|
||||
// This class is single threaded except that construction might happen on a
|
||||
// different thread.
|
||||
rtc::ThreadChecker thread_checker_;
|
||||
|
||||
// Wraps the Java specific parts of the AudioManager.
|
||||
ScopedJavaGlobalRef<jobject> j_audio_manager_;
|
||||
|
||||
// Contains the selected audio layer specified by the AudioLayer enumerator
|
||||
// in the AudioDeviceModule class.
|
||||
const AudioDeviceModule::AudioLayer audio_layer_;
|
||||
|
||||
// Set to true by Init() and false by Close().
|
||||
bool initialized_;
|
||||
|
||||
// Contains native parameters (e.g. sample rate, channel configuration). Set
|
||||
// at construction.
|
||||
AudioParameters playout_parameters_;
|
||||
AudioParameters record_parameters_;
|
||||
};
|
||||
|
||||
} // namespace android_adm
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_MANAGER_H_
|
||||
@ -48,14 +48,20 @@ class ScopedHistogramTimer {
|
||||
|
||||
} // namespace
|
||||
|
||||
// AudioRecordJni implementation.
|
||||
AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
|
||||
: j_audio_record_(
|
||||
Java_WebRtcAudioRecord_Constructor(AttachCurrentThreadIfNeeded(),
|
||||
jni::jlongFromPointer(this))),
|
||||
audio_manager_(audio_manager),
|
||||
audio_parameters_(audio_manager->GetRecordAudioParameters()),
|
||||
total_delay_in_milliseconds_(0),
|
||||
ScopedJavaLocalRef<jobject> AudioRecordJni::CreateJavaWebRtcAudioRecord(
|
||||
JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context,
|
||||
const JavaRef<jobject>& j_audio_manager) {
|
||||
return Java_WebRtcAudioRecord_Constructor(env, j_context, j_audio_manager);
|
||||
}
|
||||
|
||||
AudioRecordJni::AudioRecordJni(JNIEnv* env,
|
||||
const AudioParameters& audio_parameters,
|
||||
int total_delay_ms,
|
||||
const JavaRef<jobject>& j_audio_record)
|
||||
: j_audio_record_(env, j_audio_record),
|
||||
audio_parameters_(audio_parameters),
|
||||
total_delay_ms_(total_delay_ms),
|
||||
direct_buffer_address_(nullptr),
|
||||
direct_buffer_capacity_in_bytes_(0),
|
||||
frames_per_buffer_(0),
|
||||
@ -64,6 +70,8 @@ AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
Java_WebRtcAudioRecord_setNativeAudioRecord(env, j_audio_record_,
|
||||
jni::jlongFromPointer(this));
|
||||
// Detach from this thread since construction is allowed to happen on a
|
||||
// different thread.
|
||||
thread_checker_.DetachFromThread();
|
||||
@ -171,11 +179,16 @@ void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
const size_t channels = audio_parameters_.channels();
|
||||
RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")";
|
||||
audio_device_buffer_->SetRecordingChannels(channels);
|
||||
total_delay_in_milliseconds_ =
|
||||
audio_manager_->GetDelayEstimateInMilliseconds();
|
||||
RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
|
||||
RTC_LOG(INFO) << "total_delay_in_milliseconds: "
|
||||
<< total_delay_in_milliseconds_;
|
||||
}
|
||||
|
||||
bool AudioRecordJni::IsAcousticEchoCancelerSupported() const {
|
||||
return Java_WebRtcAudioRecord_isAcousticEchoCancelerSupported(
|
||||
env_, j_audio_record_);
|
||||
}
|
||||
|
||||
bool AudioRecordJni::IsNoiseSuppressorSupported() const {
|
||||
return Java_WebRtcAudioRecord_isNoiseSuppressorSupported(env_,
|
||||
j_audio_record_);
|
||||
}
|
||||
|
||||
int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
|
||||
@ -228,7 +241,7 @@ void AudioRecordJni::DataIsRecorded(JNIEnv* env,
|
||||
// We provide one (combined) fixed delay estimate for the APM and use the
|
||||
// |playDelayMs| parameter only. Components like the AEC only sees the sum
|
||||
// of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
|
||||
audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
|
||||
audio_device_buffer_->SetVQEData(total_delay_ms_, 0);
|
||||
if (audio_device_buffer_->DeliverRecordedData() == -1) {
|
||||
RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
|
||||
}
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
#include "modules/audio_device/include/audio_device_defines.h"
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -35,16 +34,24 @@ namespace android_adm {
|
||||
// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
|
||||
// separately instead.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread.
|
||||
// An instance can be created on any thread, but must then be used on one and
|
||||
// the same thread. All public methods must also be called on the same thread. A
|
||||
// thread checker will RTC_DCHECK if any method is called on an invalid thread.
|
||||
//
|
||||
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed.
|
||||
// Additional thread checking guarantees that no other (possibly non attached)
|
||||
// thread is used.
|
||||
class AudioRecordJni : public AudioInput {
|
||||
public:
|
||||
explicit AudioRecordJni(AudioManager* audio_manager);
|
||||
static ScopedJavaLocalRef<jobject> CreateJavaWebRtcAudioRecord(
|
||||
JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context,
|
||||
const JavaRef<jobject>& j_audio_manager);
|
||||
|
||||
AudioRecordJni(JNIEnv* env,
|
||||
const AudioParameters& audio_parameters,
|
||||
int total_delay_ms,
|
||||
const JavaRef<jobject>& j_webrtc_audio_record);
|
||||
~AudioRecordJni() override;
|
||||
|
||||
int32_t Init() override;
|
||||
@ -59,6 +66,9 @@ class AudioRecordJni : public AudioInput {
|
||||
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
|
||||
|
||||
bool IsAcousticEchoCancelerSupported() const override;
|
||||
bool IsNoiseSuppressorSupported() const override;
|
||||
|
||||
int32_t EnableBuiltInAEC(bool enable) override;
|
||||
int32_t EnableBuiltInAGC(bool enable) override;
|
||||
int32_t EnableBuiltInNS(bool enable) override;
|
||||
@ -94,17 +104,12 @@ class AudioRecordJni : public AudioInput {
|
||||
JNIEnv* env_ = nullptr;
|
||||
ScopedJavaGlobalRef<jobject> j_audio_record_;
|
||||
|
||||
// Raw pointer to the audio manger.
|
||||
const AudioManager* audio_manager_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Delay estimate of the total round-trip delay (input + output).
|
||||
// Fixed value set once in AttachAudioBuffer() and it can take one out of two
|
||||
// possible values. See audio_common.h for details.
|
||||
int total_delay_in_milliseconds_;
|
||||
const int total_delay_ms_;
|
||||
|
||||
// Cached copy of address to direct audio buffer owned by |j_audio_record_|.
|
||||
void* direct_buffer_address_;
|
||||
|
||||
@ -18,19 +18,24 @@
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "sdk/android/generated_java_audio_device_jni/jni/WebRtcAudioTrack_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
// TODO(henrika): possible extend usage of AudioManager and add it as member.
|
||||
AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
|
||||
: j_audio_track_(
|
||||
Java_WebRtcAudioTrack_Constructor(AttachCurrentThreadIfNeeded(),
|
||||
jni::jlongFromPointer(this))),
|
||||
audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
|
||||
ScopedJavaLocalRef<jobject> AudioTrackJni::CreateJavaWebRtcAudioTrack(
|
||||
JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context,
|
||||
const JavaRef<jobject>& j_audio_manager) {
|
||||
return Java_WebRtcAudioTrack_Constructor(env, j_context, j_audio_manager);
|
||||
}
|
||||
|
||||
AudioTrackJni::AudioTrackJni(JNIEnv* env,
|
||||
const AudioParameters& audio_parameters,
|
||||
const JavaRef<jobject>& j_webrtc_audio_track)
|
||||
: j_audio_track_(env, j_webrtc_audio_track),
|
||||
audio_parameters_(audio_parameters),
|
||||
direct_buffer_address_(nullptr),
|
||||
direct_buffer_capacity_in_bytes_(0),
|
||||
frames_per_buffer_(0),
|
||||
@ -39,6 +44,8 @@ AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
|
||||
audio_device_buffer_(nullptr) {
|
||||
RTC_LOG(INFO) << "ctor";
|
||||
RTC_DCHECK(audio_parameters_.is_valid());
|
||||
Java_WebRtcAudioTrack_setNativeAudioTrack(env, j_audio_track_,
|
||||
jni::jlongFromPointer(this));
|
||||
// Detach from this thread since construction is allowed to happen on a
|
||||
// different thread.
|
||||
thread_checker_.DetachFromThread();
|
||||
|
||||
@ -20,7 +20,6 @@
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -32,16 +31,23 @@ namespace android_adm {
|
||||
// C++-land, but decoded audio buffers are requested on a high-priority
|
||||
// thread managed by the Java class.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread.
|
||||
// An instance can be created on any thread, but must then be used on one and
|
||||
// the same thread. All public methods must also be called on the same thread. A
|
||||
// thread checker will RTC_DCHECK if any method is called on an invalid thread
|
||||
//
|
||||
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
|
||||
// and detach when the object goes out of scope. Additional thread checking
|
||||
// guarantees that no other (possibly non attached) thread is used.
|
||||
// This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed.
|
||||
// Additional thread checking guarantees that no other (possibly non attached)
|
||||
// thread is used.
|
||||
class AudioTrackJni : public AudioOutput {
|
||||
public:
|
||||
explicit AudioTrackJni(AudioManager* audio_manager);
|
||||
static ScopedJavaLocalRef<jobject> CreateJavaWebRtcAudioTrack(
|
||||
JNIEnv* env,
|
||||
const JavaRef<jobject>& j_context,
|
||||
const JavaRef<jobject>& j_audio_manager);
|
||||
|
||||
AudioTrackJni(JNIEnv* env,
|
||||
const AudioParameters& audio_parameters,
|
||||
const JavaRef<jobject>& j_webrtc_audio_track);
|
||||
~AudioTrackJni() override;
|
||||
|
||||
int32_t Init() override;
|
||||
|
||||
@ -1,59 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "sdk/android/src/jni/audio_device/build_info.h"
|
||||
|
||||
#include "sdk/android/generated_audio_device_base_jni/jni/BuildInfo_jni.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
BuildInfo::BuildInfo() : env_(AttachCurrentThreadIfNeeded()) {}
|
||||
|
||||
std::string BuildInfo::GetDeviceModel() {
|
||||
thread_checker_.CalledOnValidThread();
|
||||
return JavaToStdString(env_, Java_BuildInfo_getDeviceModel(env_));
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetBrand() {
|
||||
thread_checker_.CalledOnValidThread();
|
||||
return JavaToStdString(env_, Java_BuildInfo_getBrand(env_));
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetDeviceManufacturer() {
|
||||
thread_checker_.CalledOnValidThread();
|
||||
return JavaToStdString(env_, Java_BuildInfo_getDeviceManufacturer(env_));
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetAndroidBuildId() {
|
||||
thread_checker_.CalledOnValidThread();
|
||||
return JavaToStdString(env_, Java_BuildInfo_getAndroidBuildId(env_));
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetBuildType() {
|
||||
thread_checker_.CalledOnValidThread();
|
||||
return JavaToStdString(env_, Java_BuildInfo_getBuildType(env_));
|
||||
}
|
||||
|
||||
std::string BuildInfo::GetBuildRelease() {
|
||||
thread_checker_.CalledOnValidThread();
|
||||
return JavaToStdString(env_, Java_BuildInfo_getBuildRelease(env_));
|
||||
}
|
||||
|
||||
SdkCode BuildInfo::GetSdkVersion() {
|
||||
thread_checker_.CalledOnValidThread();
|
||||
return static_cast<SdkCode>(Java_BuildInfo_getSdkVersion(env_));
|
||||
}
|
||||
|
||||
} // namespace android_adm
|
||||
|
||||
} // namespace webrtc
|
||||
@ -1,76 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_BUILD_INFO_H_
|
||||
#define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_BUILD_INFO_H_
|
||||
|
||||
#include <jni.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "rtc_base/thread_checker.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
// This enumeration maps to the values returned by BuildInfo::GetSdkVersion(),
|
||||
// indicating the Android release associated with a given SDK version.
|
||||
// See https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
|
||||
// for details.
|
||||
enum SdkCode {
|
||||
SDK_CODE_JELLY_BEAN = 16, // Android 4.1
|
||||
SDK_CODE_JELLY_BEAN_MR1 = 17, // Android 4.2
|
||||
SDK_CODE_JELLY_BEAN_MR2 = 18, // Android 4.3
|
||||
SDK_CODE_KITKAT = 19, // Android 4.4
|
||||
SDK_CODE_WATCH = 20, // Android 4.4W
|
||||
SDK_CODE_LOLLIPOP = 21, // Android 5.0
|
||||
SDK_CODE_LOLLIPOP_MR1 = 22, // Android 5.1
|
||||
SDK_CODE_MARSHMALLOW = 23, // Android 6.0
|
||||
SDK_CODE_N = 24,
|
||||
};
|
||||
|
||||
// Utility class used to query the Java class (org/webrtc/audio/BuildInfo)
|
||||
// for device and Android build information.
|
||||
// The calling thread is attached to the JVM at construction if needed and a
|
||||
// valid Java environment object is also created.
|
||||
// All Get methods must be called on the creating thread. If not, the code will
|
||||
// hit RTC_DCHECKs when calling JNIEnvironment::JavaToStdString().
|
||||
class BuildInfo {
|
||||
public:
|
||||
BuildInfo();
|
||||
~BuildInfo() {}
|
||||
|
||||
// End-user-visible name for the end product (e.g. "Nexus 6").
|
||||
std::string GetDeviceModel();
|
||||
// Consumer-visible brand (e.g. "google").
|
||||
std::string GetBrand();
|
||||
// Manufacturer of the product/hardware (e.g. "motorola").
|
||||
std::string GetDeviceManufacturer();
|
||||
// Android build ID (e.g. LMY47D).
|
||||
std::string GetAndroidBuildId();
|
||||
// The type of build (e.g. "user" or "eng").
|
||||
std::string GetBuildType();
|
||||
// The user-visible version string (e.g. "5.1").
|
||||
std::string GetBuildRelease();
|
||||
// The user-visible SDK version of the framework (e.g. 21). See SdkCode enum
|
||||
// for translation.
|
||||
SdkCode GetSdkVersion();
|
||||
|
||||
private:
|
||||
JNIEnv* const env_;
|
||||
rtc::ThreadChecker thread_checker_;
|
||||
};
|
||||
|
||||
} // namespace android_adm
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_BUILD_INFO_H_
|
||||
50
sdk/android/src/jni/audio_device/java_audio_device_module.cc
Normal file
50
sdk/android/src/jni/audio_device/java_audio_device_module.cc
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "sdk/android/generated_java_audio_device_jni/jni/JavaAudioDeviceModule_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace jni {
|
||||
|
||||
static jlong JNI_JavaAudioDeviceModule_CreateAudioDeviceModule(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jclass>& j_caller,
|
||||
const JavaParamRef<jobject>& j_context,
|
||||
const JavaParamRef<jobject>& j_audio_manager,
|
||||
const JavaParamRef<jobject>& j_webrtc_audio_record,
|
||||
const JavaParamRef<jobject>& j_webrtc_audio_track,
|
||||
int sample_rate,
|
||||
jboolean j_use_stereo_input,
|
||||
jboolean j_use_stereo_output) {
|
||||
AudioParameters input_parameters;
|
||||
AudioParameters output_parameters;
|
||||
android_adm::GetAudioParameters(env, j_context, j_audio_manager, sample_rate,
|
||||
j_use_stereo_input, j_use_stereo_output,
|
||||
&input_parameters, &output_parameters);
|
||||
auto audio_input = rtc::MakeUnique<android_adm::AudioRecordJni>(
|
||||
env, input_parameters,
|
||||
android_adm::kHighLatencyModeDelayEstimateInMilliseconds,
|
||||
j_webrtc_audio_record);
|
||||
auto audio_output = rtc::MakeUnique<android_adm::AudioTrackJni>(
|
||||
env, output_parameters, j_webrtc_audio_track);
|
||||
return jlongFromPointer(
|
||||
CreateAudioDeviceModuleFromInputAndOutput(
|
||||
AudioDeviceModule::kAndroidJavaAudio, j_use_stereo_input,
|
||||
j_use_stereo_output,
|
||||
android_adm::kHighLatencyModeDelayEstimateInMilliseconds,
|
||||
std::move(audio_input), std::move(audio_output))
|
||||
.release());
|
||||
}
|
||||
|
||||
} // namespace jni
|
||||
} // namespace webrtc
|
||||
@ -20,7 +20,6 @@
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
#define TAG "OpenSLESPlayer"
|
||||
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
|
||||
@ -43,9 +42,9 @@ namespace webrtc {
|
||||
namespace android_adm {
|
||||
|
||||
OpenSLESPlayer::OpenSLESPlayer(
|
||||
AudioManager* audio_manager,
|
||||
const AudioParameters& audio_parameters,
|
||||
std::unique_ptr<OpenSLEngineManager> engine_manager)
|
||||
: audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
|
||||
: audio_parameters_(audio_parameters),
|
||||
audio_device_buffer_(nullptr),
|
||||
initialized_(false),
|
||||
playing_(false),
|
||||
|
||||
@ -23,7 +23,6 @@
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/audio_device/opensles_common.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -35,11 +34,11 @@ namespace android_adm {
|
||||
// Implements 16-bit mono PCM audio output support for Android using the
|
||||
// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread. Decoded audio
|
||||
// buffers are requested on a dedicated internal thread managed by the OpenSL
|
||||
// ES layer.
|
||||
// An instance can be created on any thread, but must then be used on one and
|
||||
// the same thread. All public methods must also be called on the same thread. A
|
||||
// thread checker will RTC_DCHECK if any method is called on an invalid thread.
|
||||
// Decoded audio buffers are requested on a dedicated internal thread managed by
|
||||
// the OpenSL ES layer.
|
||||
//
|
||||
// The existing design forces the user to call InitPlayout() after Stoplayout()
|
||||
// to be able to call StartPlayout() again. This is inline with how the Java-
|
||||
@ -60,7 +59,7 @@ class OpenSLESPlayer : public AudioOutput {
|
||||
// TODO(henrika): perhaps set this value dynamically based on OS version.
|
||||
static const int kNumOfOpenSLESBuffers = 2;
|
||||
|
||||
OpenSLESPlayer(AudioManager* audio_manager,
|
||||
OpenSLESPlayer(const AudioParameters& audio_parameters,
|
||||
std::unique_ptr<OpenSLEngineManager> engine_manager);
|
||||
~OpenSLESPlayer() override;
|
||||
|
||||
@ -126,8 +125,6 @@ class OpenSLESPlayer : public AudioOutput {
|
||||
// Detached during construction of this object.
|
||||
rtc::ThreadChecker thread_checker_opensles_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
|
||||
@ -20,7 +20,6 @@
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
|
||||
#define TAG "OpenSLESRecorder"
|
||||
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
|
||||
@ -43,9 +42,9 @@ namespace webrtc {
|
||||
|
||||
namespace android_adm {
|
||||
|
||||
OpenSLESRecorder::OpenSLESRecorder(AudioManager* audio_manager,
|
||||
OpenSLESRecorder::OpenSLESRecorder(const AudioParameters& audio_parameters,
|
||||
OpenSLEngineManager* engine_manager)
|
||||
: audio_parameters_(audio_manager->GetRecordAudioParameters()),
|
||||
: audio_parameters_(audio_parameters),
|
||||
audio_device_buffer_(nullptr),
|
||||
initialized_(false),
|
||||
recording_(false),
|
||||
@ -195,6 +194,14 @@ void OpenSLESRecorder::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
|
||||
AllocateDataBuffers();
|
||||
}
|
||||
|
||||
bool OpenSLESRecorder::IsAcousticEchoCancelerSupported() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OpenSLESRecorder::IsNoiseSuppressorSupported() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
int OpenSLESRecorder::EnableBuiltInAEC(bool enable) {
|
||||
ALOGD("EnableBuiltInAEC(%d)", enable);
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
|
||||
@ -23,7 +23,6 @@
|
||||
#include "rtc_base/thread_checker.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/audio_device/opensles_common.h"
|
||||
|
||||
namespace webrtc {
|
||||
@ -35,11 +34,11 @@ namespace android_adm {
|
||||
// Implements 16-bit mono PCM audio input support for Android using the
|
||||
// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
|
||||
//
|
||||
// An instance must be created and destroyed on one and the same thread.
|
||||
// All public methods must also be called on the same thread. A thread checker
|
||||
// will RTC_DCHECK if any method is called on an invalid thread. Recorded audio
|
||||
// buffers are provided on a dedicated internal thread managed by the OpenSL
|
||||
// ES layer.
|
||||
// An instance can be created on any thread, but must then be used on one and
|
||||
// the same thread. All public methods must also be called on the same thread. A
|
||||
// thread checker will RTC_DCHECK if any method is called on an invalid thread.
|
||||
// Recorded audio buffers are provided on a dedicated internal thread managed by
|
||||
// the OpenSL ES layer.
|
||||
//
|
||||
// The existing design forces the user to call InitRecording() after
|
||||
// StopRecording() to be able to call StartRecording() again. This is inline
|
||||
@ -63,7 +62,7 @@ class OpenSLESRecorder : public AudioInput {
|
||||
// TODO(henrika): perhaps set this value dynamically based on OS version.
|
||||
static const int kNumOfOpenSLESBuffers = 2;
|
||||
|
||||
OpenSLESRecorder(AudioManager* audio_manager,
|
||||
OpenSLESRecorder(const AudioParameters& audio_parameters,
|
||||
OpenSLEngineManager* engine_manager);
|
||||
~OpenSLESRecorder() override;
|
||||
|
||||
@ -80,6 +79,8 @@ class OpenSLESRecorder : public AudioInput {
|
||||
void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
|
||||
|
||||
// TODO(henrika): add support using OpenSL ES APIs when available.
|
||||
bool IsAcousticEchoCancelerSupported() const override;
|
||||
bool IsNoiseSuppressorSupported() const override;
|
||||
int EnableBuiltInAEC(bool enable) override;
|
||||
int EnableBuiltInAGC(bool enable) override;
|
||||
int EnableBuiltInNS(bool enable) override;
|
||||
@ -134,8 +135,6 @@ class OpenSLESRecorder : public AudioInput {
|
||||
// Detached during construction of this object.
|
||||
rtc::ThreadChecker thread_checker_opensles_;
|
||||
|
||||
// Contains audio parameters provided to this class at construction by the
|
||||
// AudioManager.
|
||||
const AudioParameters audio_parameters_;
|
||||
|
||||
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
|
||||
|
||||
@ -26,9 +26,6 @@
|
||||
#include "rtc_base/thread.h"
|
||||
#include "sdk/android/generated_peerconnection_jni/jni/PeerConnectionFactory_jni.h"
|
||||
#include "sdk/android/native_api/jni/java_types.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_manager.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
#include "sdk/android/src/jni/pc/androidnetworkmonitor.h"
|
||||
#include "sdk/android/src/jni/pc/audio.h"
|
||||
@ -84,9 +81,6 @@ static char* field_trials_init_string = nullptr;
|
||||
static bool factory_static_initialized = false;
|
||||
static bool video_hw_acceleration_enabled = true;
|
||||
|
||||
static const char* kExternalAndroidAudioDeviceFieldTrialName =
|
||||
"WebRTC-ExternalAndroidAudioDevice";
|
||||
|
||||
void PeerConnectionFactoryNetworkThreadReady() {
|
||||
RTC_LOG(LS_INFO) << "Network thread JavaCallback";
|
||||
JNIEnv* env = AttachCurrentThreadIfNeeded();
|
||||
@ -199,6 +193,7 @@ jlong CreatePeerConnectionFactoryForJava(
|
||||
JNIEnv* jni,
|
||||
const JavaParamRef<jobject>& jcontext,
|
||||
const JavaParamRef<jobject>& joptions,
|
||||
rtc::scoped_refptr<AudioDeviceModule> audio_device_module,
|
||||
const JavaParamRef<jobject>& jencoder_factory,
|
||||
const JavaParamRef<jobject>& jdecoder_factory,
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processor,
|
||||
@ -240,23 +235,6 @@ jlong CreatePeerConnectionFactoryForJava(
|
||||
rtc::NetworkMonitorFactory::SetFactory(network_monitor_factory);
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioDeviceModule> adm = nullptr;
|
||||
if (field_trial::IsEnabled(kExternalAndroidAudioDeviceFieldTrialName)) {
|
||||
// Only Java AudioDeviceModule is supported as an external ADM at the
|
||||
// moment.
|
||||
const AudioDeviceModule::AudioLayer audio_layer =
|
||||
AudioDeviceModule::kAndroidJavaAudio;
|
||||
auto audio_manager =
|
||||
rtc::MakeUnique<android_adm::AudioManager>(jni, audio_layer, jcontext);
|
||||
auto audio_input =
|
||||
rtc::MakeUnique<android_adm::AudioRecordJni>(audio_manager.get());
|
||||
auto audio_output =
|
||||
rtc::MakeUnique<android_adm::AudioTrackJni>(audio_manager.get());
|
||||
adm = CreateAudioDeviceModuleFromInputAndOutput(
|
||||
audio_layer, std::move(audio_manager), std::move(audio_input),
|
||||
std::move(audio_output));
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<AudioMixer> audio_mixer = nullptr;
|
||||
std::unique_ptr<CallFactoryInterface> call_factory(CreateCallFactory());
|
||||
std::unique_ptr<RtcEventLogFactoryInterface> rtc_event_log_factory(
|
||||
@ -274,7 +252,7 @@ jlong CreatePeerConnectionFactoryForJava(
|
||||
legacy_video_decoder_factory = CreateLegacyVideoDecoderFactory();
|
||||
}
|
||||
media_engine.reset(CreateMediaEngine(
|
||||
adm, audio_encoder_factory, audio_decoder_factory,
|
||||
audio_device_module, audio_encoder_factory, audio_decoder_factory,
|
||||
legacy_video_encoder_factory, legacy_video_decoder_factory, audio_mixer,
|
||||
audio_processor));
|
||||
#endif
|
||||
@ -305,7 +283,7 @@ jlong CreatePeerConnectionFactoryForJava(
|
||||
}
|
||||
|
||||
media_engine.reset(CreateMediaEngine(
|
||||
adm, audio_encoder_factory, audio_decoder_factory,
|
||||
audio_device_module, audio_encoder_factory, audio_decoder_factory,
|
||||
std::move(video_encoder_factory), std::move(video_decoder_factory),
|
||||
audio_mixer, audio_processor));
|
||||
}
|
||||
@ -335,6 +313,7 @@ static jlong JNI_PeerConnectionFactory_CreatePeerConnectionFactory(
|
||||
const JavaParamRef<jclass>&,
|
||||
const JavaParamRef<jobject>& jcontext,
|
||||
const JavaParamRef<jobject>& joptions,
|
||||
jlong native_audio_device_module,
|
||||
const JavaParamRef<jobject>& jencoder_factory,
|
||||
const JavaParamRef<jobject>& jdecoder_factory,
|
||||
jlong native_audio_processor,
|
||||
@ -345,7 +324,9 @@ static jlong JNI_PeerConnectionFactory_CreatePeerConnectionFactory(
|
||||
reinterpret_cast<FecControllerFactoryInterface*>(
|
||||
native_fec_controller_factory));
|
||||
return CreatePeerConnectionFactoryForJava(
|
||||
jni, jcontext, joptions, jencoder_factory, jdecoder_factory,
|
||||
jni, jcontext, joptions,
|
||||
reinterpret_cast<AudioDeviceModule*>(native_audio_device_module),
|
||||
jencoder_factory, jdecoder_factory,
|
||||
audio_processor ? audio_processor : CreateAudioProcessing(),
|
||||
std::move(fec_controller_factory));
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user