diff --git a/sdk/android/BUILD.gn b/sdk/android/BUILD.gn index 0ce6309c46..a17c05b323 100644 --- a/sdk/android/BUILD.gn +++ b/sdk/android/BUILD.gn @@ -593,10 +593,10 @@ rtc_static_library("peerconnection_jni") { } deps = [ + ":audio_device_jni", ":base_jni", ":generated_external_classes_jni", ":generated_peerconnection_jni", - ":native_api_audio_device_module", ":native_api_jni", "../..:webrtc_common", "../../api:libjingle_peerconnection_api", @@ -845,7 +845,6 @@ rtc_android_library("libjingle_peerconnection_java") { } deps = [ - ":native_api_audio_device_module", "../../modules/audio_device:audio_device_java", "../../rtc_base:base_java", ] diff --git a/sdk/android/api/org/webrtc/PeerConnectionFactory.java b/sdk/android/api/org/webrtc/PeerConnectionFactory.java index 21729a57a6..0aa49eb834 100644 --- a/sdk/android/api/org/webrtc/PeerConnectionFactory.java +++ b/sdk/android/api/org/webrtc/PeerConnectionFactory.java @@ -27,7 +27,6 @@ public class PeerConnectionFactory { private final long nativeFactory; private static volatile boolean internalTracerInitialized = false; - private static Context applicationContext; private static Thread networkThread; private static Thread workerThread; private static Thread signalingThread; @@ -181,7 +180,7 @@ public class PeerConnectionFactory { public static void initialize(InitializationOptions options) { ContextUtils.initialize(options.applicationContext); NativeLibrary.initialize(options.nativeLibraryLoader); - nativeInitializeAndroidGlobals(options.applicationContext, options.enableVideoHwAcceleration); + nativeInitializeAndroidGlobals(options.enableVideoHwAcceleration); initializeFieldTrials(options.fieldTrials); if (options.enableInternalTracer && !internalTracerInitialized) { initializeInternalTracer(); @@ -249,8 +248,8 @@ public class PeerConnectionFactory { public PeerConnectionFactory( Options options, VideoEncoderFactory encoderFactory, VideoDecoderFactory decoderFactory) { checkInitializeHasBeenCalled(); - nativeFactory = - nativeCreatePeerConnectionFactory(options, encoderFactory, decoderFactory, 0, 0); + nativeFactory = nativeCreatePeerConnectionFactory( + ContextUtils.getApplicationContext(), options, encoderFactory, decoderFactory, 0, 0); if (nativeFactory == 0) { throw new RuntimeException("Failed to initialize PeerConnectionFactory!"); } @@ -267,7 +266,8 @@ public class PeerConnectionFactory { VideoDecoderFactory decoderFactory, AudioProcessingFactory audioProcessingFactory, FecControllerFactoryFactoryInterface fecControllerFactoryFactory) { checkInitializeHasBeenCalled(); - nativeFactory = nativeCreatePeerConnectionFactory(options, encoderFactory, decoderFactory, + nativeFactory = nativeCreatePeerConnectionFactory(ContextUtils.getApplicationContext(), options, + encoderFactory, decoderFactory, audioProcessingFactory == null ? 0 : audioProcessingFactory.createNative(), fecControllerFactoryFactory == null ? 0 : fecControllerFactoryFactory.createNative()); if (nativeFactory == 0) { @@ -449,8 +449,7 @@ public class PeerConnectionFactory { // Must be called at least once before creating a PeerConnectionFactory // (for example, at application startup time). - private static native void nativeInitializeAndroidGlobals( - Context context, boolean videoHwAcceleration); + private static native void nativeInitializeAndroidGlobals(boolean videoHwAcceleration); private static native void nativeInitializeFieldTrials(String fieldTrialsInitString); private static native String nativeFindFieldTrialsFullName(String name); // Internal tracing initialization. Must be called before PeerConnectionFactory is created to @@ -462,7 +461,7 @@ public class PeerConnectionFactory { private static native void nativeShutdownInternalTracer(); private static native boolean nativeStartInternalTracingCapture(String tracingFilename); private static native void nativeStopInternalTracingCapture(); - private static native long nativeCreatePeerConnectionFactory(Options options, + private static native long nativeCreatePeerConnectionFactory(Context context, Options options, VideoEncoderFactory encoderFactory, VideoDecoderFactory decoderFactory, long nativeAudioProcessor, long nativeFecControllerFactory); private static native long nativeCreatePeerConnection(long factory, diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.cc b/sdk/android/native_api/audio_device_module/audio_device_android.cc index 7d5a2171b4..9821f3f699 100644 --- a/sdk/android/native_api/audio_device_module/audio_device_android.cc +++ b/sdk/android/native_api/audio_device_module/audio_device_android.cc @@ -15,55 +15,27 @@ #include "rtc_base/refcount.h" #include "rtc_base/refcountedobject.h" #include "system_wrappers/include/metrics.h" - -#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO) -#include "sdk/android/src/jni/audio_device/aaudio_player.h" -#include "sdk/android/src/jni/audio_device/aaudio_recorder.h" -#endif -#include "sdk/android/src/jni/audio_device/audio_device_template_android.h" #include "sdk/android/src/jni/audio_device/audio_manager.h" -#include "sdk/android/src/jni/audio_device/audio_record_jni.h" -#include "sdk/android/src/jni/audio_device/audio_track_jni.h" -#include "sdk/android/src/jni/audio_device/opensles_player.h" -#include "sdk/android/src/jni/audio_device/opensles_recorder.h" namespace webrtc { -rtc::scoped_refptr CreateAndroidAudioDeviceModule() { - RTC_LOG(INFO) << __FUNCTION__; - // Create an Android audio manager. - android_adm::AudioManager audio_manager_android; - // Select best possible combination of audio layers. - if (audio_manager_android.IsAAudioSupported()) { #if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO) - return new rtc::RefCountedObject>( - AudioDeviceModule::kAndroidAAudioAudio); +rtc::scoped_refptr CreateAAudioAudioDeviceModule( + JNIEnv* env, + jobject application_context) { + return android_adm::AudioManager::CreateAAudioAudioDeviceModule( + env, JavaParamRef(application_context)); +} #endif - } else if (audio_manager_android.IsLowLatencyPlayoutSupported() && - audio_manager_android.IsLowLatencyRecordSupported()) { - // Use OpenSL ES for both playout and recording. - return new rtc::RefCountedObject>( - AudioDeviceModule::kAndroidOpenSLESAudio); - } else if (audio_manager_android.IsLowLatencyPlayoutSupported() && - !audio_manager_android.IsLowLatencyRecordSupported()) { - // Use OpenSL ES for output on devices that only supports the - // low-latency output audio path. - // This combination provides low-latency output audio and at the same - // time support for HW AEC using the AudioRecord Java API. - return new rtc::RefCountedObject>( - AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio); - } else { - // Use Java-based audio in both directions when low-latency output is - // not supported. - return new rtc::RefCountedObject>( - AudioDeviceModule::kAndroidJavaAudio); - } - RTC_LOG(LS_ERROR) << "The requested audio layer is not supported"; - return nullptr; + +rtc::scoped_refptr CreateAudioDeviceModule( + JNIEnv* env, + jobject application_context, + bool use_opensles_input, + bool use_opensles_output) { + return android_adm::AudioManager::CreateAudioDeviceModule( + env, JavaParamRef(application_context), use_opensles_input, + use_opensles_output); } } // namespace webrtc diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.h b/sdk/android/native_api/audio_device_module/audio_device_android.h index 56567785a2..5f2561d805 100644 --- a/sdk/android/native_api/audio_device_module/audio_device_android.h +++ b/sdk/android/native_api/audio_device_module/audio_device_android.h @@ -11,11 +11,23 @@ #ifndef SDK_ANDROID_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_ANDROID_H_ #define SDK_ANDROID_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_ANDROID_H_ +#include + #include "modules/audio_device/include/audio_device.h" namespace webrtc { -rtc::scoped_refptr CreateAndroidAudioDeviceModule(); +#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO) +rtc::scoped_refptr CreateAAudioAudioDeviceModule( + JNIEnv* env, + jobject application_context); +#endif + +rtc::scoped_refptr CreateAudioDeviceModule( + JNIEnv* env, + jobject application_context, + bool use_opensles_input, + bool use_opensles_output); } // namespace webrtc diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java index 64521cdfba..27e4356012 100644 --- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java +++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java @@ -20,10 +20,8 @@ import android.media.AudioTrack; import android.os.Build; import java.util.Timer; import java.util.TimerTask; -import org.webrtc.ContextUtils; import org.webrtc.Logging; import org.webrtc.CalledByNative; -import org.webrtc.NativeClassQualifiedName; // WebRtcAudioManager handles tasks that uses android.media.AudioManager. // At construction, storeAudioParameters() is called and it retrieves @@ -39,10 +37,6 @@ class WebRtcAudioManager { private static final String TAG = "WebRtcAudioManager"; - // TODO(bugs.webrtc.org/8914): disabled by default until AAudio support has - // been completed. Goal is to always return false on Android O MR1 and higher. - private static final boolean blacklistDeviceForAAudioUsage = true; - // Use mono as default for both audio directions. private static boolean useStereoOutput = false; private static boolean useStereoInput = false; @@ -79,13 +73,15 @@ class WebRtcAudioManager { // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression. @SuppressWarnings("NoSynchronizedMethodCheck") - public static synchronized boolean getStereoOutput() { + @CalledByNative + public synchronized boolean getStereoOutput() { return useStereoOutput; } // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression. @SuppressWarnings("NoSynchronizedMethodCheck") - public static synchronized boolean getStereoInput() { + @CalledByNative + public synchronized boolean getStereoInput() { return useStereoInput; } @@ -150,42 +146,34 @@ class WebRtcAudioManager { } } - private final long nativeAudioManager; private final AudioManager audioManager; - - private boolean initialized = false; - private int nativeSampleRate; - private int nativeChannels; - - private boolean hardwareAEC; - private boolean hardwareAGC; - private boolean hardwareNS; - private boolean lowLatencyOutput; - private boolean lowLatencyInput; - private boolean proAudio; - private boolean aAudio; - private int sampleRate; - private int outputChannels; - private int inputChannels; - private int outputBufferSize; - private int inputBufferSize; - + private final int sampleRate; + private final int outputBufferSize; + private final int inputBufferSize; private final VolumeLogger volumeLogger; + private boolean initialized = false; + @CalledByNative - WebRtcAudioManager(long nativeAudioManager) { + WebRtcAudioManager(Context context) { Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo()); - this.nativeAudioManager = nativeAudioManager; - audioManager = - (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE); + this.audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); if (DEBUG) { WebRtcAudioUtils.logDeviceInfo(TAG); } - volumeLogger = new VolumeLogger(audioManager); - storeAudioParameters(); - nativeCacheAudioParameters(nativeAudioManager, sampleRate, outputChannels, inputChannels, - hardwareAEC, hardwareAGC, hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, aAudio, - outputBufferSize, inputBufferSize); + this.volumeLogger = new VolumeLogger(audioManager); + + final int outputChannels = getStereoOutput() ? 2 : 1; + final int inputChannels = getStereoInput() ? 2 : 1; + + this.sampleRate = getNativeOutputSampleRate(); + this.outputBufferSize = isLowLatencyOutputSupported(context) + ? getLowLatencyOutputFramesPerBuffer() + : getMinOutputFrameSize(sampleRate, outputChannels); + this.inputBufferSize = isLowLatencyInputSupported(context) + ? getLowLatencyInputFramesPerBuffer() + : getMinInputFrameSize(sampleRate, inputChannels); + WebRtcAudioUtils.logAudioState(TAG); } @@ -216,7 +204,7 @@ class WebRtcAudioManager { } @CalledByNative - private boolean isDeviceBlacklistedForOpenSLESUsage() { + private static boolean isDeviceBlacklistedForOpenSLESUsage() { boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden ? blacklistDeviceForOpenSLESUsage : WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage(); @@ -226,64 +214,22 @@ class WebRtcAudioManager { return blacklisted; } - private void storeAudioParameters() { - outputChannels = getStereoOutput() ? 2 : 1; - inputChannels = getStereoInput() ? 2 : 1; - sampleRate = getNativeOutputSampleRate(); - hardwareAEC = isAcousticEchoCancelerSupported(); - // TODO(henrika): use of hardware AGC is no longer supported. Currently - // hardcoded to false. To be removed. - hardwareAGC = false; - hardwareNS = isNoiseSuppressorSupported(); - lowLatencyOutput = isLowLatencyOutputSupported(); - lowLatencyInput = isLowLatencyInputSupported(); - proAudio = isProAudioSupported(); - aAudio = isAAudioSupported(); - outputBufferSize = lowLatencyOutput ? getLowLatencyOutputFramesPerBuffer() - : getMinOutputFrameSize(sampleRate, outputChannels); - inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer() - : getMinInputFrameSize(sampleRate, inputChannels); - } - - // Gets the current earpiece state. - private boolean hasEarpiece() { - return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature( - PackageManager.FEATURE_TELEPHONY); - } - // Returns true if low-latency audio output is supported. - private boolean isLowLatencyOutputSupported() { - return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature( - PackageManager.FEATURE_AUDIO_LOW_LATENCY); + @CalledByNative + public static boolean isLowLatencyOutputSupported(Context context) { + return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY); } // Returns true if low-latency audio input is supported. // TODO(henrika): remove the hardcoded false return value when OpenSL ES // input performance has been evaluated and tested more. - public boolean isLowLatencyInputSupported() { + @CalledByNative + public static boolean isLowLatencyInputSupported(Context context) { // TODO(henrika): investigate if some sort of device list is needed here // as well. The NDK doc states that: "As of API level 21, lower latency // audio input is supported on select devices. To take advantage of this // feature, first confirm that lower latency output is available". - return WebRtcAudioUtils.runningOnLollipopOrHigher() && isLowLatencyOutputSupported(); - } - - // Returns true if the device has professional audio level of functionality - // and therefore supports the lowest possible round-trip latency. - @TargetApi(23) - private boolean isProAudioSupported() { - return WebRtcAudioUtils.runningOnMarshmallowOrHigher() - && ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature( - PackageManager.FEATURE_AUDIO_PRO); - } - - // AAudio is supported on Androio Oreo MR1 (API 27) and higher. - // TODO(bugs.webrtc.org/8914): currently disabled by default. - private boolean isAAudioSupported() { - if (blacklistDeviceForAAudioUsage) { - Logging.w(TAG, "AAudio support is currently disabled on all devices!"); - } - return !blacklistDeviceForAAudioUsage && WebRtcAudioUtils.runningOnOreoMR1OrHigher(); + return WebRtcAudioUtils.runningOnLollipopOrHigher() && isLowLatencyOutputSupported(context); } // Returns the native output sample rate for this device's output stream. @@ -314,6 +260,11 @@ class WebRtcAudioManager { return sampleRateHz; } + @CalledByNative + int getSampleRate() { + return sampleRate; + } + @TargetApi(17) private int getSampleRateOnJellyBeanMR10OrHigher() { String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); @@ -324,7 +275,6 @@ class WebRtcAudioManager { // Returns the native output buffer size for low-latency output streams. @TargetApi(17) private int getLowLatencyOutputFramesPerBuffer() { - assertTrue(isLowLatencyOutputSupported()); if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { return DEFAULT_FRAME_PER_BUFFER; } @@ -339,13 +289,26 @@ class WebRtcAudioManager { // 2) explicit use (override) of a WebRTC based version must not be set, // 3) the device must not be blacklisted for use of the effect, and // 4) the UUID of the effect must be approved (some UUIDs can be excluded). - private static boolean isAcousticEchoCancelerSupported() { + @CalledByNative + boolean isAcousticEchoCancelerSupported() { return WebRtcAudioEffects.canUseAcousticEchoCanceler(); } - private static boolean isNoiseSuppressorSupported() { + + @CalledByNative + boolean isNoiseSuppressorSupported() { return WebRtcAudioEffects.canUseNoiseSuppressor(); } + @CalledByNative + int getOutputBufferSize() { + return outputBufferSize; + } + + @CalledByNative + int getInputBufferSize() { + return inputBufferSize; + } + // Returns the minimum output buffer size for Java based audio (AudioTrack). // This size can also be used for OpenSL ES implementations on devices that // lacks support of low-latency output. @@ -360,7 +323,6 @@ class WebRtcAudioManager { // Returns the native input buffer size for input streams. private int getLowLatencyInputFramesPerBuffer() { - assertTrue(isLowLatencyInputSupported()); return getLowLatencyOutputFramesPerBuffer(); } @@ -375,17 +337,4 @@ class WebRtcAudioManager { sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT) / bytesPerFrame; } - - // Helper method which throws an exception when an assertion has failed. - private static void assertTrue(boolean condition) { - if (!condition) { - throw new AssertionError("Expected condition to be true"); - } - } - - @NativeClassQualifiedName("webrtc::android_adm::AudioManager") - private native void nativeCacheAudioParameters(long nativeAudioManager, int sampleRate, - int outputChannels, int inputChannels, boolean hardwareAEC, boolean hardwareAGC, - boolean hardwareNS, boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, - boolean aAudio, int outputBufferSize, int inputBufferSize); } diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java index 73fe32b19f..0693922f70 100644 --- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java +++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioUtils.java @@ -130,11 +130,6 @@ final class WebRtcAudioUtils { public static boolean isNoiseSuppressorSupported() { return WebRtcAudioEffects.canUseNoiseSuppressor(); } - // TODO(henrika): deprecated; remove when no longer used by any client. - public static boolean isAutomaticGainControlSupported() { - // Always return false here to avoid trying to use any built-in AGC. - return false; - } // Call this method if the default handling of querying the native sample // rate shall be overridden. Can be useful on some devices where the diff --git a/sdk/android/src/jni/audio_device/audio_device_template_android.h b/sdk/android/src/jni/audio_device/audio_device_template_android.h index 9e7c42e4a8..b6e8e9ac23 100644 --- a/sdk/android/src/jni/audio_device/audio_device_template_android.h +++ b/sdk/android/src/jni/audio_device/audio_device_template_android.h @@ -62,8 +62,12 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { NUM_STATUSES = 4 }; - explicit AudioDeviceTemplateAndroid(AudioDeviceModule::AudioLayer audio_layer) - : audio_layer_(audio_layer), initialized_(false) { + AudioDeviceTemplateAndroid(JNIEnv* env, + const JavaParamRef& application_context, + AudioDeviceModule::AudioLayer audio_layer) + : audio_layer_(audio_layer), + audio_manager_(env, audio_layer, application_context), + initialized_(false) { RTC_LOG(INFO) << __FUNCTION__; thread_checker_.DetachFromThread(); } @@ -85,24 +89,22 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { int32_t Init() override { RTC_LOG(INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.CalledOnValidThread()); - audio_manager_ = rtc::MakeUnique(); - output_ = rtc::MakeUnique(audio_manager_.get()); - input_ = rtc::MakeUnique(audio_manager_.get()); - audio_manager_->SetActiveAudioLayer(audio_layer_); + output_ = rtc::MakeUnique(&audio_manager_); + input_ = rtc::MakeUnique(&audio_manager_); audio_device_buffer_ = rtc::MakeUnique(); AttachAudioBuffer(); if (initialized_) { return 0; } InitStatus status; - if (!audio_manager_->Init()) { + if (!audio_manager_.Init()) { status = InitStatus::OTHER_ERROR; } else if (output_->Init() != 0) { - audio_manager_->Close(); + audio_manager_.Close(); status = InitStatus::PLAYOUT_ERROR; } else if (input_->Init() != 0) { output_->Terminate(); - audio_manager_->Close(); + audio_manager_.Close(); status = InitStatus::RECORDING_ERROR; } else { initialized_ = true; @@ -125,7 +127,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { RTC_DCHECK(thread_checker_.CalledOnValidThread()); int32_t err = input_->Terminate(); err |= output_->Terminate(); - err |= !audio_manager_->Close(); + err |= !audio_manager_.Close(); initialized_ = false; RTC_DCHECK_EQ(err, 0); return err; @@ -248,7 +250,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { return 0; } audio_device_buffer_->StartPlayout(); - if (!audio_manager_->IsCommunicationModeEnabled()) { + if (!audio_manager_.IsCommunicationModeEnabled()) { RTC_LOG(WARNING) << "The application should use MODE_IN_COMMUNICATION audio mode!"; } @@ -286,7 +288,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { if (Recording()) { return 0; } - if (!audio_manager_->IsCommunicationModeEnabled()) { + if (!audio_manager_.IsCommunicationModeEnabled()) { RTC_LOG(WARNING) << "The application should use MODE_IN_COMMUNICATION audio mode!"; } @@ -471,7 +473,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { int32_t StereoPlayoutIsAvailable(bool* available) const override { RTC_LOG(INFO) << __FUNCTION__; CHECKinitialized_(); - *available = audio_manager_->IsStereoPlayoutSupported(); + *available = audio_manager_.IsStereoPlayoutSupported(); RTC_LOG(INFO) << "output: " << *available; return 0; } @@ -483,7 +485,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { RTC_LOG(WARNING) << "recording in stereo is not supported"; return -1; } - bool available = audio_manager_->IsStereoPlayoutSupported(); + bool available = audio_manager_.IsStereoPlayoutSupported(); // Android does not support changes between mono and stero on the fly. // Instead, the native audio layer is configured via the audio manager // to either support mono or stereo. It is allowed to call this method @@ -503,7 +505,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { int32_t StereoPlayout(bool* enabled) const override { RTC_LOG(INFO) << __FUNCTION__; CHECKinitialized_(); - *enabled = audio_manager_->IsStereoPlayoutSupported(); + *enabled = audio_manager_.IsStereoPlayoutSupported(); RTC_LOG(INFO) << "output: " << *enabled; return 0; } @@ -512,7 +514,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { RTC_LOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; - if (audio_manager_->IsStereoRecordSupported() == -1) { + if (audio_manager_.IsStereoRecordSupported() == -1) { return -1; } *available = isAvailable; @@ -527,7 +529,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { RTC_LOG(WARNING) << "recording in stereo is not supported"; return -1; } - bool available = audio_manager_->IsStereoRecordSupported(); + bool available = audio_manager_.IsStereoRecordSupported(); // Android does not support changes between mono and stero on the fly. // Instead, the native audio layer is configured via the audio manager // to either support mono or stereo. It is allowed to call this method @@ -547,7 +549,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { int32_t StereoRecording(bool* enabled) const override { RTC_LOG(INFO) << __FUNCTION__; CHECKinitialized_(); - *enabled = audio_manager_->IsStereoRecordSupported(); + *enabled = audio_manager_.IsStereoRecordSupported(); RTC_LOG(INFO) << "output: " << *enabled; return 0; } @@ -555,7 +557,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { int32_t PlayoutDelay(uint16_t* delay_ms) const override { CHECKinitialized_(); // Best guess we can do is to use half of the estimated total delay. - *delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2; + *delay_ms = audio_manager_.GetDelayEstimateInMilliseconds() / 2; RTC_DCHECK_GT(*delay_ms, 0); return 0; } @@ -575,7 +577,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { bool BuiltInAECIsAvailable() const override { RTC_LOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); - bool isAvailable = audio_manager_->IsAcousticEchoCancelerSupported(); + bool isAvailable = audio_manager_.IsAcousticEchoCancelerSupported(); RTC_LOG(INFO) << "output: " << isAvailable; return isAvailable; } @@ -587,7 +589,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { bool BuiltInAGCIsAvailable() const override { RTC_LOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); - bool isAvailable = audio_manager_->IsAutomaticGainControlSupported(); + bool isAvailable = false; RTC_LOG(INFO) << "output: " << isAvailable; return isAvailable; } @@ -599,7 +601,7 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { bool BuiltInNSIsAvailable() const override { RTC_LOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); - bool isAvailable = audio_manager_->IsNoiseSuppressorSupported(); + bool isAvailable = audio_manager_.IsNoiseSuppressorSupported(); RTC_LOG(INFO) << "output: " << isAvailable; return isAvailable; } @@ -634,11 +636,6 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { return result; } - AudioDeviceModule::AudioLayer PlatformAudioLayer() const { - RTC_LOG(INFO) << __FUNCTION__; - return audio_layer_; - } - int32_t AttachAudioBuffer() { RTC_LOG(INFO) << __FUNCTION__; output_->AttachAudioBuffer(audio_device_buffer_.get()); @@ -646,16 +643,12 @@ class AudioDeviceTemplateAndroid : public AudioDeviceModule { return 0; } - AudioDeviceBuffer* GetAudioDeviceBuffer() { - return audio_device_buffer_.get(); - } - private: rtc::ThreadChecker thread_checker_; - AudioDeviceModule::AudioLayer audio_layer_; + const AudioDeviceModule::AudioLayer audio_layer_; - std::unique_ptr audio_manager_; + AudioManager audio_manager_; std::unique_ptr output_; std::unique_ptr input_; std::unique_ptr audio_device_buffer_; diff --git a/sdk/android/src/jni/audio_device/audio_manager.cc b/sdk/android/src/jni/audio_device/audio_manager.cc index 3ed3fd10fb..6b119ac8d0 100644 --- a/sdk/android/src/jni/audio_device/audio_manager.cc +++ b/sdk/android/src/jni/audio_device/audio_manager.cc @@ -16,61 +16,111 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/ptr_util.h" +#include "rtc_base/refcount.h" +#include "rtc_base/refcountedobject.h" + #include "sdk/android/generated_audio_jni/jni/WebRtcAudioManager_jni.h" #include "sdk/android/src/jni/audio_device/audio_common.h" #include "sdk/android/src/jni/jni_helpers.h" +#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO) +#include "sdk/android/src/jni/audio_device/aaudio_player.h" +#include "sdk/android/src/jni/audio_device/aaudio_recorder.h" +#endif +#include "sdk/android/src/jni/audio_device/audio_device_template_android.h" +#include "sdk/android/src/jni/audio_device/audio_manager.h" +#include "sdk/android/src/jni/audio_device/audio_record_jni.h" +#include "sdk/android/src/jni/audio_device/audio_track_jni.h" +#include "sdk/android/src/jni/audio_device/opensles_player.h" +#include "sdk/android/src/jni/audio_device/opensles_recorder.h" + namespace webrtc { namespace android_adm { -// AudioManager::JavaAudioManager implementation -AudioManager::JavaAudioManager::JavaAudioManager( - const ScopedJavaLocalRef& audio_manager) - : env_(audio_manager.env()), audio_manager_(audio_manager) { - RTC_LOG(INFO) << "JavaAudioManager::ctor"; +#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO) +rtc::scoped_refptr +AudioManager::CreateAAudioAudioDeviceModule( + JNIEnv* env, + const JavaParamRef& application_context) { + RTC_LOG(INFO) << __FUNCTION__; + return new rtc::RefCountedObject>( + env, AudioDeviceModule::kAndroidAAudioAudio); +} +#endif + +rtc::scoped_refptr AudioManager::CreateAudioDeviceModule( + JNIEnv* env, + const JavaParamRef& application_context) { + const bool use_opensles_output = + !Java_WebRtcAudioManager_isDeviceBlacklistedForOpenSLESUsage(env) && + Java_WebRtcAudioManager_isLowLatencyOutputSupported(env, + application_context); + const bool use_opensles_input = + use_opensles_output && Java_WebRtcAudioManager_isLowLatencyInputSupported( + env, application_context); + return CreateAudioDeviceModule(env, application_context, use_opensles_input, + use_opensles_output); } -AudioManager::JavaAudioManager::~JavaAudioManager() { - RTC_LOG(INFO) << "JavaAudioManager::~dtor"; -} +rtc::scoped_refptr AudioManager::CreateAudioDeviceModule( + JNIEnv* env, + const JavaParamRef& application_context, + bool use_opensles_input, + bool use_opensles_output) { + RTC_LOG(INFO) << __FUNCTION__; -bool AudioManager::JavaAudioManager::Init() { - thread_checker_.CalledOnValidThread(); - return Java_WebRtcAudioManager_init(env_, audio_manager_); -} - -void AudioManager::JavaAudioManager::Close() { - thread_checker_.CalledOnValidThread(); - Java_WebRtcAudioManager_dispose(env_, audio_manager_); -} - -bool AudioManager::JavaAudioManager::IsCommunicationModeEnabled() { - thread_checker_.CalledOnValidThread(); - return Java_WebRtcAudioManager_isCommunicationModeEnabled(env_, - audio_manager_); -} - -bool AudioManager::JavaAudioManager::IsDeviceBlacklistedForOpenSLESUsage() { - thread_checker_.CalledOnValidThread(); - return Java_WebRtcAudioManager_isDeviceBlacklistedForOpenSLESUsage( - env_, audio_manager_); + if (use_opensles_output) { + if (use_opensles_input) { + // Use OpenSL ES for both playout and recording. + return new rtc::RefCountedObject>( + env, application_context, AudioDeviceModule::kAndroidOpenSLESAudio); + } else { + // Use OpenSL ES for output and AudioRecord API for input. This + // combination provides low-latency output audio and at the same + // time support for HW AEC using the AudioRecord Java API. + return new rtc::RefCountedObject>( + env, application_context, + AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio); + } + } else { + RTC_DCHECK(!use_opensles_input) + << "Combination of OpenSLES input and Java-based output not supported"; + // Use Java-based audio in both directions. + return new rtc::RefCountedObject>( + env, application_context, AudioDeviceModule::kAndroidJavaAudio); + } } // AudioManager implementation -AudioManager::AudioManager() - : audio_layer_(AudioDeviceModule::kPlatformDefaultAudio), - initialized_(false), - hardware_aec_(false), - hardware_agc_(false), - hardware_ns_(false), - low_latency_playout_(false), - low_latency_record_(false), - delay_estimate_in_milliseconds_(0) { +AudioManager::AudioManager(JNIEnv* env, + AudioDeviceModule::AudioLayer audio_layer, + const JavaParamRef& application_context) + : j_audio_manager_( + Java_WebRtcAudioManager_Constructor(env, application_context)), + audio_layer_(audio_layer), + initialized_(false) { RTC_LOG(INFO) << "ctor"; - j_audio_manager_.reset( - new JavaAudioManager(Java_WebRtcAudioManager_Constructor( - AttachCurrentThreadIfNeeded(), jni::jlongFromPointer(this)))); + const int sample_rate = + Java_WebRtcAudioManager_getSampleRate(env, j_audio_manager_); + const size_t output_channels = + Java_WebRtcAudioManager_getStereoOutput(env, j_audio_manager_) ? 2 : 1; + const size_t input_channels = + Java_WebRtcAudioManager_getStereoInput(env, j_audio_manager_) ? 2 : 1; + const size_t output_buffer_size = + Java_WebRtcAudioManager_getOutputBufferSize(env, j_audio_manager_); + const size_t input_buffer_size = + Java_WebRtcAudioManager_getInputBufferSize(env, j_audio_manager_); + playout_parameters_.reset(sample_rate, static_cast(output_channels), + static_cast(output_buffer_size)); + record_parameters_.reset(sample_rate, static_cast(input_channels), + static_cast(input_buffer_size)); + thread_checker_.DetachFromThread(); } AudioManager::~AudioManager() { @@ -79,25 +129,6 @@ AudioManager::~AudioManager() { Close(); } -void AudioManager::SetActiveAudioLayer( - AudioDeviceModule::AudioLayer audio_layer) { - RTC_LOG(INFO) << "SetActiveAudioLayer: " << audio_layer; - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - RTC_DCHECK(!initialized_); - // Store the currently utilized audio layer. - audio_layer_ = audio_layer; - // The delay estimate can take one of two fixed values depending on if the - // device supports low-latency output or not. However, it is also possible - // that the user explicitly selects the high-latency audio path, hence we use - // the selected |audio_layer| here to set the delay estimate. - delay_estimate_in_milliseconds_ = - (audio_layer == AudioDeviceModule::kAndroidJavaAudio) - ? kHighLatencyModeDelayEstimateInMilliseconds - : kLowLatencyModeDelayEstimateInMilliseconds; - RTC_LOG(INFO) << "delay_estimate_in_milliseconds: " - << delay_estimate_in_milliseconds_; -} - SLObjectItf AudioManager::GetOpenSLEngine() { RTC_LOG(INFO) << "GetOpenSLEngine"; RTC_DCHECK(thread_checker_.CalledOnValidThread()); @@ -144,7 +175,8 @@ bool AudioManager::Init() { RTC_DCHECK(thread_checker_.CalledOnValidThread()); RTC_DCHECK(!initialized_); RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio); - if (!j_audio_manager_->Init()) { + JNIEnv* env = AttachCurrentThreadIfNeeded(); + if (!Java_WebRtcAudioManager_init(env, j_audio_manager_)) { RTC_LOG(LS_ERROR) << "Init() failed"; return false; } @@ -157,60 +189,31 @@ bool AudioManager::Close() { RTC_DCHECK(thread_checker_.CalledOnValidThread()); if (!initialized_) return true; - j_audio_manager_->Close(); + JNIEnv* env = AttachCurrentThreadIfNeeded(); + Java_WebRtcAudioManager_dispose(env, j_audio_manager_); initialized_ = false; return true; } bool AudioManager::IsCommunicationModeEnabled() const { RTC_DCHECK(thread_checker_.CalledOnValidThread()); - return j_audio_manager_->IsCommunicationModeEnabled(); + JNIEnv* env = AttachCurrentThreadIfNeeded(); + return Java_WebRtcAudioManager_isCommunicationModeEnabled(env, + j_audio_manager_); } bool AudioManager::IsAcousticEchoCancelerSupported() const { RTC_DCHECK(thread_checker_.CalledOnValidThread()); - return hardware_aec_; -} - -bool AudioManager::IsAutomaticGainControlSupported() const { - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - return hardware_agc_; + JNIEnv* env = AttachCurrentThreadIfNeeded(); + return Java_WebRtcAudioManager_isAcousticEchoCancelerSupported( + env, j_audio_manager_); } bool AudioManager::IsNoiseSuppressorSupported() const { RTC_DCHECK(thread_checker_.CalledOnValidThread()); - return hardware_ns_; -} - -bool AudioManager::IsLowLatencyPlayoutSupported() const { - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - // Some devices are blacklisted for usage of OpenSL ES even if they report - // that low-latency playout is supported. See b/21485703 for details. - return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage() - ? false - : low_latency_playout_; -} - -bool AudioManager::IsLowLatencyRecordSupported() const { - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - return low_latency_record_; -} - -bool AudioManager::IsProAudioSupported() const { - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - // TODO(henrika): return the state independently of if OpenSL ES is - // blacklisted or not for now. We could use the same approach as in - // IsLowLatencyPlayoutSupported() but I can't see the need for it yet. - return pro_audio_; -} - -// TODO(henrika): improve comments... -bool AudioManager::IsAAudioSupported() const { -#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO) - return a_audio_; -#else - return false; -#endif + JNIEnv* env = AttachCurrentThreadIfNeeded(); + return Java_WebRtcAudioManager_isNoiseSuppressorSupported(env, + j_audio_manager_); } bool AudioManager::IsStereoPlayoutSupported() const { @@ -224,49 +227,9 @@ bool AudioManager::IsStereoRecordSupported() const { } int AudioManager::GetDelayEstimateInMilliseconds() const { - return delay_estimate_in_milliseconds_; -} - -void AudioManager::CacheAudioParameters(JNIEnv* env, - const JavaParamRef& j_caller, - jint sample_rate, - jint output_channels, - jint input_channels, - jboolean hardware_aec, - jboolean hardware_agc, - jboolean hardware_ns, - jboolean low_latency_output, - jboolean low_latency_input, - jboolean pro_audio, - jboolean a_audio, - jint output_buffer_size, - jint input_buffer_size) { - RTC_LOG(INFO) - << "OnCacheAudioParameters: " - << "hardware_aec: " << static_cast(hardware_aec) - << ", hardware_agc: " << static_cast(hardware_agc) - << ", hardware_ns: " << static_cast(hardware_ns) - << ", low_latency_output: " << static_cast(low_latency_output) - << ", low_latency_input: " << static_cast(low_latency_input) - << ", pro_audio: " << static_cast(pro_audio) - << ", a_audio: " << static_cast(a_audio) - << ", sample_rate: " << static_cast(sample_rate) - << ", output_channels: " << static_cast(output_channels) - << ", input_channels: " << static_cast(input_channels) - << ", output_buffer_size: " << static_cast(output_buffer_size) - << ", input_buffer_size: " << static_cast(input_buffer_size); - RTC_DCHECK(thread_checker_.CalledOnValidThread()); - hardware_aec_ = hardware_aec; - hardware_agc_ = hardware_agc; - hardware_ns_ = hardware_ns; - low_latency_playout_ = low_latency_output; - low_latency_record_ = low_latency_input; - pro_audio_ = pro_audio; - a_audio_ = a_audio; - playout_parameters_.reset(sample_rate, static_cast(output_channels), - static_cast(output_buffer_size)); - record_parameters_.reset(sample_rate, static_cast(input_channels), - static_cast(input_buffer_size)); + return audio_layer_ == AudioDeviceModule::kAndroidJavaAudio + ? kHighLatencyModeDelayEstimateInMilliseconds + : kLowLatencyModeDelayEstimateInMilliseconds; } const AudioParameters& AudioManager::GetPlayoutAudioParameters() { diff --git a/sdk/android/src/jni/audio_device/audio_manager.h b/sdk/android/src/jni/audio_device/audio_manager.h index b260fc6182..201b363566 100644 --- a/sdk/android/src/jni/audio_device/audio_manager.h +++ b/sdk/android/src/jni/audio_device/audio_manager.h @@ -30,37 +30,32 @@ namespace android_adm { // relies on the AudioManager in android.media. It also populates an // AudioParameter structure with native audio parameters detected at // construction. This class does not make any audio-related modifications -// unless Init() is called. Caching audio parameters makes no changes but only -// reads data from the Java side. +// unless Init() is called. class AudioManager { public: - // Wraps the Java specific parts of the AudioManager into one helper class. - // Stores method IDs for all supported methods at construction and then - // allows calls like JavaAudioManager::Close() while hiding the Java/JNI - // parts that are associated with this call. - class JavaAudioManager { - public: - explicit JavaAudioManager(const ScopedJavaLocalRef& audio_manager); - ~JavaAudioManager(); +#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO) + static rtc::scoped_refptr CreateAAudioAudioDeviceModule( + JNIEnv* env, + const JavaParamRef& application_context); +#endif - bool Init(); - void Close(); - bool IsCommunicationModeEnabled(); - bool IsDeviceBlacklistedForOpenSLESUsage(); + static rtc::scoped_refptr CreateAudioDeviceModule( + JNIEnv* env, + const JavaParamRef& application_context, + bool use_opensles_input, + bool use_opensles_output); - private: - JNIEnv* const env_; - rtc::ThreadChecker thread_checker_; - ScopedJavaGlobalRef audio_manager_; - }; + // This function has internal logic checking if OpenSLES is blacklisted and + // whether it's supported. + static rtc::scoped_refptr CreateAudioDeviceModule( + JNIEnv* env, + const JavaParamRef& application_context); - AudioManager(); + AudioManager(JNIEnv* env, + AudioDeviceModule::AudioLayer audio_layer, + const JavaParamRef& application_context); ~AudioManager(); - // Sets the currently active audio layer combination. Must be called before - // Init(). - void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer); - // Creates and realizes the main (global) Open SL engine object and returns // a reference to it. The engine object is only created at the first call // since OpenSL ES for Android only supports a single engine per application. @@ -91,14 +86,8 @@ class AudioManager { // Can currently only be used in combination with a Java based audio backend // for the recoring side (i.e. using the android.media.AudioRecord API). bool IsAcousticEchoCancelerSupported() const; - bool IsAutomaticGainControlSupported() const; bool IsNoiseSuppressorSupported() const; - // Returns true if the device supports the low-latency audio paths in - // combination with OpenSL ES. - bool IsLowLatencyPlayoutSupported() const; - bool IsLowLatencyRecordSupported() const; - // Returns true if the device supports (and has been configured for) stereo. // Call the Java API WebRtcAudioManager.setStereoOutput/Input() with true as // paramter to enable stereo. Default is mono in both directions and the @@ -107,49 +96,23 @@ class AudioManager { bool IsStereoPlayoutSupported() const; bool IsStereoRecordSupported() const; - // Returns true if the device supports pro-audio features in combination with - // OpenSL ES. - bool IsProAudioSupported() const; - - // Returns true if the device supports AAudio. - bool IsAAudioSupported() const; - // Returns the estimated total delay of this device. Unit is in milliseconds. // The vaule is set once at construction and never changes after that. // Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and // webrtc::kHighLatencyModeDelayEstimateInMilliseconds. int GetDelayEstimateInMilliseconds() const; - // Called from Java side so we can cache the native audio parameters. - // This method will be called by the WebRtcAudioManager constructor, i.e. - // on the same thread that this object is created on. - void CacheAudioParameters(JNIEnv* env, - const JavaParamRef& j_caller, - jint sample_rate, - jint output_channels, - jint input_channels, - jboolean hardware_aec, - jboolean hardware_agc, - jboolean hardware_ns, - jboolean low_latency_output, - jboolean low_latency_input, - jboolean pro_audio, - jboolean a_audio, - jint output_buffer_size, - jint input_buffer_size); - private: - // Stores thread ID in the constructor. - // We can then use ThreadChecker::CalledOnValidThread() to ensure that - // other methods are called from the same thread. + // This class is single threaded except that construction might happen on a + // different thread. rtc::ThreadChecker thread_checker_; // Wraps the Java specific parts of the AudioManager. - std::unique_ptr j_audio_manager_; + ScopedJavaGlobalRef j_audio_manager_; // Contains the selected audio layer specified by the AudioLayer enumerator // in the AudioDeviceModule class. - AudioDeviceModule::AudioLayer audio_layer_; + const AudioDeviceModule::AudioLayer audio_layer_; // This object is the global entry point of the OpenSL ES API. // After creating the engine object, the application can obtain this object‘s @@ -161,32 +124,8 @@ class AudioManager { // Set to true by Init() and false by Close(). bool initialized_; - // True if device supports hardware (or built-in) AEC. - bool hardware_aec_; - // True if device supports hardware (or built-in) AGC. - bool hardware_agc_; - // True if device supports hardware (or built-in) NS. - bool hardware_ns_; - - // True if device supports the low-latency OpenSL ES audio path for output. - bool low_latency_playout_; - - // True if device supports the low-latency OpenSL ES audio path for input. - bool low_latency_record_; - - // True if device supports the low-latency OpenSL ES pro-audio path. - bool pro_audio_; - - // True if device supports the low-latency AAudio audio path. - bool a_audio_; - - // The delay estimate can take one of two fixed values depending on if the - // device supports low-latency output or not. - int delay_estimate_in_milliseconds_; - - // Contains native parameters (e.g. sample rate, channel configuration). - // Set at construction in OnCacheAudioParameters() which is called from - // Java on the same thread as this object is created on. + // Contains native parameters (e.g. sample rate, channel configuration). Set + // at construction. AudioParameters playout_parameters_; AudioParameters record_parameters_; }; diff --git a/sdk/android/src/jni/pc/peerconnectionfactory.cc b/sdk/android/src/jni/pc/peerconnectionfactory.cc index cf6459de5c..b4496a5244 100644 --- a/sdk/android/src/jni/pc/peerconnectionfactory.cc +++ b/sdk/android/src/jni/pc/peerconnectionfactory.cc @@ -26,8 +26,8 @@ #include "rtc_base/stringutils.h" #include "rtc_base/thread.h" #include "sdk/android/generated_peerconnection_jni/jni/PeerConnectionFactory_jni.h" -#include "sdk/android/native_api/audio_device_module/audio_device_android.h" #include "sdk/android/native_api/jni/java_types.h" +#include "sdk/android/src/jni/audio_device/audio_manager.h" #include "sdk/android/src/jni/jni_helpers.h" #include "sdk/android/src/jni/pc/androidnetworkmonitor.h" #include "sdk/android/src/jni/pc/audio.h" @@ -102,7 +102,6 @@ void PeerConnectionFactorySignalingThreadReady() { static void JNI_PeerConnectionFactory_InitializeAndroidGlobals( JNIEnv* jni, const JavaParamRef&, - const JavaParamRef& context, jboolean video_hw_acceleration) { video_hw_acceleration_enabled = video_hw_acceleration; if (!factory_static_initialized) { @@ -173,6 +172,7 @@ static void JNI_PeerConnectionFactory_ShutdownInternalTracer( jlong CreatePeerConnectionFactoryForJava( JNIEnv* jni, + const JavaParamRef& jcontext, const JavaParamRef& joptions, const JavaParamRef& jencoder_factory, const JavaParamRef& jdecoder_factory, @@ -217,7 +217,7 @@ jlong CreatePeerConnectionFactoryForJava( rtc::scoped_refptr adm = field_trial::IsEnabled(kExternalAndroidAudioDeviceFieldTrialName) - ? CreateAndroidAudioDeviceModule() + ? android_adm::AudioManager::CreateAudioDeviceModule(jni, jcontext) : nullptr; rtc::scoped_refptr audio_mixer = nullptr; std::unique_ptr call_factory(CreateCallFactory()); @@ -295,6 +295,7 @@ jlong CreatePeerConnectionFactoryForJava( static jlong JNI_PeerConnectionFactory_CreatePeerConnectionFactory( JNIEnv* jni, const JavaParamRef&, + const JavaParamRef& jcontext, const JavaParamRef& joptions, const JavaParamRef& jencoder_factory, const JavaParamRef& jdecoder_factory, @@ -306,7 +307,7 @@ static jlong JNI_PeerConnectionFactory_CreatePeerConnectionFactory( reinterpret_cast( native_fec_controller_factory)); return CreatePeerConnectionFactoryForJava( - jni, joptions, jencoder_factory, jdecoder_factory, + jni, jcontext, joptions, jencoder_factory, jdecoder_factory, audio_processor ? audio_processor : CreateAudioProcessing(), std::move(fec_controller_factory)); }