Improving support for Android Audio Effects in WebRTC.

Now also supports AGC and NS effects and adds the possibility
to override default settings.

R=magjed@webrtc.org, pbos@webrtc.org, sophiechang@chromium.org
TBR=perkj
BUG=NONE

Review URL: https://codereview.webrtc.org/1344563002 .

Cr-Commit-Position: refs/heads/master@{#10030}
This commit is contained in:
henrika 2015-09-23 14:08:33 +02:00
parent c9bbeb0354
commit c14f5ff60f
25 changed files with 846 additions and 226 deletions

View File

@ -191,6 +191,10 @@ class FakeAudioCaptureModule
int32_t GetLoudspeakerStatus(bool* enabled) const override;
virtual bool BuiltInAECIsAvailable() const { return false; }
virtual int32_t EnableBuiltInAEC(bool enable) { return -1; }
virtual bool BuiltInAGCIsAvailable() const { return false; }
virtual int32_t EnableBuiltInAGC(bool enable) { return -1; }
virtual bool BuiltInNSIsAvailable() const { return false; }
virtual int32_t EnableBuiltInNS(bool enable) { return -1; }
// End of functions inherited from webrtc::AudioDeviceModule.
// The following function is inherited from rtc::MessageHandler.

View File

@ -162,6 +162,7 @@
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java',
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java',

View File

@ -735,6 +735,10 @@ class FakeWebRtcVoiceEngine
}
WEBRTC_STUB(EnableBuiltInAEC, (bool enable));
virtual bool BuiltInAECIsAvailable() const { return false; }
WEBRTC_STUB(EnableBuiltInAGC, (bool enable));
virtual bool BuiltInAGCIsAvailable() const { return false; }
WEBRTC_STUB(EnableBuiltInNS, (bool enable));
virtual bool BuiltInNSIsAvailable() const { return false; }
// webrtc::VoENetEqStats
WEBRTC_FUNC(GetNetworkStatistics, (int channel,

View File

@ -615,6 +615,7 @@ bool WebRtcVoiceEngine::ClearOptionOverrides() {
// AudioOptions defaults are set in InitInternal (for options with corresponding
// MediaEngineInterface flags) and in SetOptions(int) for flagless options.
bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
LOG(LS_INFO) << "ApplyOptions: " << options_in.ToString();
AudioOptions options = options_in; // The options are modified below.
// kEcConference is AEC with high suppression.
webrtc::EcModes ec_mode = webrtc::kEcConference;
@ -659,8 +660,6 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
}
#endif
LOG(LS_INFO) << "Applying audio options: " << options.ToString();
webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
bool echo_cancellation = false;
@ -707,8 +706,19 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
}
}
bool auto_gain_control;
bool auto_gain_control = false;
if (options.auto_gain_control.Get(&auto_gain_control)) {
const bool built_in_agc = voe_wrapper_->hw()->BuiltInAGCIsAvailable();
if (built_in_agc) {
if (voe_wrapper_->hw()->EnableBuiltInAGC(auto_gain_control) == 0 &&
auto_gain_control) {
// Disable internal software AGC if built-in AGC is enabled,
// i.e., replace the software AGC with the built-in AGC.
options.auto_gain_control.Set(false);
auto_gain_control = false;
LOG(LS_INFO) << "Disabling AGC since built-in AGC will be used instead";
}
}
if (voep->SetAgcStatus(auto_gain_control, agc_mode) == -1) {
LOG_RTCERR2(SetAgcStatus, auto_gain_control, agc_mode);
return false;
@ -747,14 +757,25 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
}
}
bool noise_suppression;
bool noise_suppression = false;
if (options.noise_suppression.Get(&noise_suppression)) {
const bool built_in_ns = voe_wrapper_->hw()->BuiltInNSIsAvailable();
if (built_in_ns) {
if (voe_wrapper_->hw()->EnableBuiltInNS(noise_suppression) == 0 &&
noise_suppression) {
// Disable internal software NS if built-in NS is enabled,
// i.e., replace the software NS with the built-in NS.
options.noise_suppression.Set(false);
noise_suppression = false;
LOG(LS_INFO) << "Disabling NS since built-in NS will be used instead";
}
}
if (voep->SetNsStatus(noise_suppression, ns_mode) == -1) {
LOG_RTCERR2(SetNsStatus, noise_suppression, ns_mode);
return false;
} else {
LOG(LS_VERBOSE) << "Noise suppression set to " << noise_suppression
<< " with mode " << ns_mode;
LOG(LS_INFO) << "Noise suppression set to " << noise_suppression
<< " with mode " << ns_mode;
}
}

View File

@ -460,6 +460,28 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
return input_.EnableBuiltInAEC(enable);
}
// Returns true if the device both supports built in AGC and the device
// is not blacklisted.
bool BuiltInAGCIsAvailable() const override {
return audio_manager_->IsAutomaticGainControlSupported();
}
int32_t EnableBuiltInAGC(bool enable) override {
RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available";
return input_.EnableBuiltInAGC(enable);
}
// Returns true if the device both supports built in NS and the device
// is not blacklisted.
bool BuiltInNSIsAvailable() const override {
return audio_manager_->IsNoiseSuppressorSupported();
}
int32_t EnableBuiltInNS(bool enable) override {
RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
return input_.EnableBuiltInNS(enable);
}
private:
rtc::ThreadChecker thread_checker_;

View File

@ -68,13 +68,15 @@ AudioManager::AudioManager()
audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
initialized_(false),
hardware_aec_(false),
hardware_agc_(false),
hardware_ns_(false),
low_latency_playout_(false),
delay_estimate_in_milliseconds_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
RTC_CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheAudioParameters",
"(IIZZIIJ)V",
"(IIZZZZIIJ)V",
reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioManager",
@ -144,6 +146,16 @@ bool AudioManager::IsAcousticEchoCancelerSupported() const {
return hardware_aec_;
}
bool AudioManager::IsAutomaticGainControlSupported() const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return hardware_agc_;
}
bool AudioManager::IsNoiseSuppressorSupported() const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return hardware_ns_;
}
bool AudioManager::IsLowLatencyPlayoutSupported() const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
ALOGD("IsLowLatencyPlayoutSupported()");
@ -162,6 +174,8 @@ void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
jint sample_rate,
jint channels,
jboolean hardware_aec,
jboolean hardware_agc,
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
@ -169,19 +183,23 @@ void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
webrtc::AudioManager* this_object =
reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
this_object->OnCacheAudioParameters(
env, sample_rate, channels, hardware_aec, low_latency_output,
output_buffer_size, input_buffer_size);
env, sample_rate, channels, hardware_aec, hardware_agc, hardware_ns,
low_latency_output, output_buffer_size, input_buffer_size);
}
void AudioManager::OnCacheAudioParameters(JNIEnv* env,
jint sample_rate,
jint channels,
jboolean hardware_aec,
jboolean hardware_agc,
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size) {
ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str());
ALOGD("hardware_aec: %d", hardware_aec);
ALOGD("hardware_agc: %d", hardware_agc);
ALOGD("hardware_ns: %d", hardware_ns);
ALOGD("low_latency_output: %d", low_latency_output);
ALOGD("sample_rate: %d", sample_rate);
ALOGD("channels: %d", channels);
@ -189,6 +207,8 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env,
ALOGD("input_buffer_size: %d", input_buffer_size);
RTC_DCHECK(thread_checker_.CalledOnValidThread());
hardware_aec_ = hardware_aec;
hardware_agc_ = hardware_agc;
hardware_ns_ = hardware_ns;
low_latency_playout_ = low_latency_output;
// TODO(henrika): add support for stereo output.
playout_parameters_.reset(sample_rate, channels,

View File

@ -74,12 +74,14 @@ class AudioManager {
const AudioParameters& GetPlayoutAudioParameters();
const AudioParameters& GetRecordAudioParameters();
// Returns true if the device supports a built-in Acoustic Echo Canceler.
// Some devices can also be blacklisted for use in combination with an AEC
// and these devices will return false.
// Returns true if the device supports built-in audio effects for AEC, AGC
// and NS. Some devices can also be blacklisted for use in combination with
// platform effects and these devices will return false.
// Can currently only be used in combination with a Java based audio backend
// for the recoring side (i.e. using the android.media.AudioRecord API).
bool IsAcousticEchoCancelerSupported() const;
bool IsAutomaticGainControlSupported() const;
bool IsNoiseSuppressorSupported() const;
// Returns true if the device supports the low-latency audio paths in
// combination with OpenSL ES.
@ -100,6 +102,8 @@ class AudioManager {
jint sample_rate,
jint channels,
jboolean hardware_aec,
jboolean hardware_agc,
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
@ -108,6 +112,8 @@ class AudioManager {
jint sample_rate,
jint channels,
jboolean hardware_aec,
jboolean hardware_agc,
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size);
@ -137,6 +143,10 @@ class AudioManager {
// True if device supports hardware (or built-in) AEC.
bool hardware_aec_;
// True if device supports hardware (or built-in) AGC.
bool hardware_agc_;
// True if device supports hardware (or built-in) NS.
bool hardware_ns_;
// True if device supports the low-latency OpenSL ES audio path.
bool low_latency_playout_;

View File

@ -61,6 +61,16 @@ TEST_F(AudioManagerTest, IsAcousticEchoCancelerSupported) {
audio_manager()->IsAcousticEchoCancelerSupported() ? "Yes" : "No");
}
TEST_F(AudioManagerTest, IsAutomaticGainControlSupported) {
PRINT("%sAutomatic Gain Control support: %s\n", kTag,
audio_manager()->IsAutomaticGainControlSupported() ? "Yes" : "No");
}
TEST_F(AudioManagerTest, IsNoiseSuppressorSupported) {
PRINT("%sNoise Suppressor support: %s\n", kTag,
audio_manager()->IsNoiseSuppressorSupported() ? "Yes" : "No");
}
TEST_F(AudioManagerTest, IsLowLatencyPlayoutSupported) {
PRINT("%sLow latency output support: %s\n", kTag,
audio_manager()->IsLowLatencyPlayoutSupported() ? "Yes" : "No");

View File

@ -30,11 +30,15 @@ namespace webrtc {
AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_record)
: audio_record_(audio_record.Pass()),
init_recording_(native_reg->GetMethodId("InitRecording", "(II)I")),
start_recording_(native_reg->GetMethodId("StartRecording", "()Z")),
stop_recording_(native_reg->GetMethodId("StopRecording", "()Z")),
init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
enable_built_in_aec_(native_reg->GetMethodId(
"EnableBuiltInAEC", "(Z)Z")) {
"enableBuiltInAEC", "(Z)Z")),
enable_built_in_agc_(native_reg->GetMethodId(
"enableBuiltInAGC", "(Z)Z")),
enable_built_in_ns_(native_reg->GetMethodId(
"enableBuiltInNS", "(Z)Z")) {
}
AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
@ -59,6 +63,16 @@ bool AudioRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
static_cast<jboolean>(enable));
}
bool AudioRecordJni::JavaAudioRecord::EnableBuiltInAGC(bool enable) {
return audio_record_->CallBooleanMethod(enable_built_in_agc_,
static_cast<jboolean>(enable));
}
bool AudioRecordJni::JavaAudioRecord::EnableBuiltInNS(bool enable) {
return audio_record_->CallBooleanMethod(enable_built_in_ns_,
static_cast<jboolean>(enable));
}
// AudioRecordJni implementation.
AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
: j_environment_(JVM::GetInstance()->environment()),
@ -186,6 +200,18 @@ int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
}
int32_t AudioRecordJni::EnableBuiltInAGC(bool enable) {
ALOGD("EnableBuiltInAGC%s", GetThreadInfo().c_str());
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return j_audio_record_->EnableBuiltInAGC(enable) ? 0 : -1;
}
int32_t AudioRecordJni::EnableBuiltInNS(bool enable) {
ALOGD("EnableBuiltInNS%s", GetThreadInfo().c_str());
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
}
void JNICALL AudioRecordJni::CacheDirectBufferAddress(
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord) {
webrtc::AudioRecordJni* this_object =

View File

@ -53,6 +53,8 @@ class AudioRecordJni {
bool StartRecording();
bool StopRecording();
bool EnableBuiltInAEC(bool enable);
bool EnableBuiltInAGC(bool enable);
bool EnableBuiltInNS(bool enable);
private:
rtc::scoped_ptr<GlobalRef> audio_record_;
@ -60,6 +62,8 @@ class AudioRecordJni {
jmethodID start_recording_;
jmethodID stop_recording_;
jmethodID enable_built_in_aec_;
jmethodID enable_built_in_agc_;
jmethodID enable_built_in_ns_;
};
explicit AudioRecordJni(AudioManager* audio_manager);
@ -78,6 +82,8 @@ class AudioRecordJni {
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
int32_t EnableBuiltInAEC(bool enable);
int32_t EnableBuiltInAGC(bool enable);
int32_t EnableBuiltInNS(bool enable);
private:
// Called from Java side so we can cache the address of the Java-manged

View File

@ -30,13 +30,13 @@ namespace webrtc {
AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_track)
: audio_track_(audio_track.Pass()),
init_playout_(native_reg->GetMethodId("InitPlayout", "(II)V")),
start_playout_(native_reg->GetMethodId("StartPlayout", "()Z")),
stop_playout_(native_reg->GetMethodId("StopPlayout", "()Z")),
set_stream_volume_(native_reg->GetMethodId("SetStreamVolume", "(I)Z")),
init_playout_(native_reg->GetMethodId("initPlayout", "(II)V")),
start_playout_(native_reg->GetMethodId("startPlayout", "()Z")),
stop_playout_(native_reg->GetMethodId("stopPlayout", "()Z")),
set_stream_volume_(native_reg->GetMethodId("setStreamVolume", "(I)Z")),
get_stream_max_volume_(native_reg->GetMethodId(
"GetStreamMaxVolume", "()I")),
get_stream_volume_(native_reg->GetMethodId("GetStreamVolume", "()I")) {
"getStreamMaxVolume", "()I")),
get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")) {
}
AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {}

View File

@ -0,0 +1,377 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.voiceengine;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AudioEffect;
import android.media.audiofx.AudioEffect.Descriptor;
import android.media.audiofx.AutomaticGainControl;
import android.media.audiofx.NoiseSuppressor;
import android.os.Build;
import org.webrtc.Logging;
import java.util.List;
import java.util.UUID;
// This class wraps control of three different platform effects. Supported
// effects are: AcousticEchoCanceler (AEC), AutomaticGainControl (AGC) and
// NoiseSuppressor (NS). Calling enable() will active all effects that are
// supported by the device if the corresponding |shouldEnableXXX| member is set.
class WebRtcAudioEffects {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioEffects";
// UUIDs for Software Audio Effects that we want to avoid using.
// The implementor field will be set to "The Android Open Source Project".
private static final UUID AOSP_ACOUSTIC_ECHO_CANCELER =
UUID.fromString("bb392ec0-8d4d-11e0-a896-0002a5d5c51b");
private static final UUID AOSP_AUTOMATIC_GAIN_CONTROL =
UUID.fromString("aa8130e0-66fc-11e0-bad0-0002a5d5c51b");
private static final UUID AOSP_NOISE_SUPPRESSOR =
UUID.fromString("c06c8400-8e06-11e0-9cb6-0002a5d5c51b");
// Static Boolean objects used to avoid expensive queries more than once.
// The first result is cached in these members and then reused if needed.
// Each member is null until it has been evaluated/set for the first time.
private static Boolean canUseAcousticEchoCanceler = null;
private static Boolean canUseAutomaticGainControl = null;
private static Boolean canUseNoiseSuppressor = null;
// Contains the audio effect objects. Created in enable() and destroyed
// in release().
private AcousticEchoCanceler aec = null;
private AutomaticGainControl agc = null;
private NoiseSuppressor ns = null;
// Affects the final state given to the setEnabled() method on each effect.
// The default state is set to "disabled" but each effect can also be enabled
// by calling setAEC(), setAGC() and setNS().
// To enable an effect, both the shouldEnableXXX member and the static
// canUseXXX() must be true.
private boolean shouldEnableAec = false;
private boolean shouldEnableAgc = false;
private boolean shouldEnableNs = false;
// Checks if the device implements Acoustic Echo Cancellation (AEC).
// Returns true if the device implements AEC, false otherwise.
public static boolean isAcousticEchoCancelerSupported() {
return WebRtcAudioUtils.runningOnJellyBeanOrHigher()
&& AcousticEchoCanceler.isAvailable();
}
// Checks if the device implements Automatic Gain Control (AGC).
// Returns true if the device implements AGC, false otherwise.
public static boolean isAutomaticGainControlSupported() {
return WebRtcAudioUtils.runningOnJellyBeanOrHigher()
&& AutomaticGainControl.isAvailable();
}
// Checks if the device implements Noise Suppression (NS).
// Returns true if the device implements NS, false otherwise.
public static boolean isNoiseSuppressorSupported() {
return WebRtcAudioUtils.runningOnJellyBeanOrHigher()
&& NoiseSuppressor.isAvailable();
}
// Returns true if the device is blacklisted for HW AEC usage.
public static boolean isAcousticEchoCancelerBlacklisted() {
List<String> blackListedModels =
WebRtcAudioUtils.getBlackListedModelsForAecUsage();
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
if (isBlacklisted) {
Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
}
return isBlacklisted;
}
// Returns true if the device is blacklisted for HW AGC usage.
public static boolean isAutomaticGainControlBlacklisted() {
List<String> blackListedModels =
WebRtcAudioUtils.getBlackListedModelsForAgcUsage();
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
if (isBlacklisted) {
Logging.w(TAG, Build.MODEL + " is blacklisted for HW AGC usage!");
}
return isBlacklisted;
}
// Returns true if the device is blacklisted for HW NS usage.
public static boolean isNoiseSuppressorBlacklisted() {
List<String> blackListedModels =
WebRtcAudioUtils.getBlackListedModelsForNsUsage();
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
if (isBlacklisted) {
Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
}
return isBlacklisted;
}
// Returns true if the platform AEC should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
private static boolean isAcousticEchoCancelerExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC) &&
d.uuid.equals(AOSP_ACOUSTIC_ECHO_CANCELER)) {
return true;
}
}
return false;
}
// Returns true if the platform AGC should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
private static boolean isAutomaticGainControlExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_AGC) &&
d.uuid.equals(AOSP_AUTOMATIC_GAIN_CONTROL)) {
return true;
}
}
return false;
}
// Returns true if the platform NS should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
private static boolean isNoiseSuppressorExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) &&
d.uuid.equals(AOSP_NOISE_SUPPRESSOR)) {
return true;
}
}
return false;
}
// Returns true if all conditions for supporting the HW AEC are fulfilled.
// It will not be possible to enable the HW AEC if this method returns false.
public static boolean canUseAcousticEchoCanceler() {
if (canUseAcousticEchoCanceler == null) {
canUseAcousticEchoCanceler = new Boolean(
isAcousticEchoCancelerSupported()
&& !WebRtcAudioUtils.useWebRtcBasedAcousticEchoCanceler()
&& !isAcousticEchoCancelerBlacklisted()
&& !isAcousticEchoCancelerExcludedByUUID());
Logging.d(TAG, "canUseAcousticEchoCanceler: "
+ canUseAcousticEchoCanceler);
}
return canUseAcousticEchoCanceler;
}
// Returns true if all conditions for supporting the HW AGC are fulfilled.
// It will not be possible to enable the HW AGC if this method returns false.
public static boolean canUseAutomaticGainControl() {
if (canUseAutomaticGainControl == null) {
canUseAutomaticGainControl = new Boolean(
isAutomaticGainControlSupported()
&& !WebRtcAudioUtils.useWebRtcBasedAutomaticGainControl()
&& !isAutomaticGainControlBlacklisted()
&& !isAutomaticGainControlExcludedByUUID());
Logging.d(TAG, "canUseAutomaticGainControl: "
+ canUseAutomaticGainControl);
}
return canUseAutomaticGainControl;
}
// Returns true if all conditions for supporting the HW NS are fulfilled.
// It will not be possible to enable the HW NS if this method returns false.
public static boolean canUseNoiseSuppressor() {
if (canUseNoiseSuppressor == null) {
canUseNoiseSuppressor = new Boolean(
isNoiseSuppressorSupported()
&& !WebRtcAudioUtils.useWebRtcBasedNoiseSuppressor()
&& !isNoiseSuppressorBlacklisted()
&& !isNoiseSuppressorExcludedByUUID());
Logging.d(TAG, "canUseNoiseSuppressor: " + canUseNoiseSuppressor);
}
return canUseNoiseSuppressor;
}
static WebRtcAudioEffects create() {
// Return null if VoIP effects (AEC, AGC and NS) are not supported.
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
Logging.w(TAG, "API level 16 or higher is required!");
return null;
}
return new WebRtcAudioEffects();
}
private WebRtcAudioEffects() {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
for (Descriptor d : AudioEffect.queryEffects()) {
if (effectTypeIsVoIP(d.type)) {
// Only log information for VoIP effects (AEC, AEC and NS).
Logging.d(TAG, "name: " + d.name + ", " +
"mode: " + d.connectMode + ", " +
"implementor: " + d.implementor + ", " +
"UUID: " + d.uuid);
}
}
}
// Call this method to enable or disable the platform AEC. It modifies
// |shouldEnableAec| which is used in enable() where the actual state
// of the AEC effect is modified. Returns true if HW AEC is supported and
// false otherwise.
public boolean setAEC(boolean enable) {
Logging.d(TAG, "setAEC(" + enable + ")");
if (!canUseAcousticEchoCanceler()) {
Logging.w(TAG, "Platform AEC is not supported");
shouldEnableAec = false;
return false;
}
if (aec != null && (enable != shouldEnableAec)) {
Logging.e(TAG, "Platform AEC state can't be modified while recording");
return false;
}
shouldEnableAec = enable;
return true;
}
// Call this method to enable or disable the platform AGC. It modifies
// |shouldEnableAgc| which is used in enable() where the actual state
// of the AGC effect is modified. Returns true if HW AGC is supported and
// false otherwise.
public boolean setAGC(boolean enable) {
Logging.d(TAG, "setAGC(" + enable + ")");
if (!canUseAutomaticGainControl()) {
Logging.w(TAG, "Platform AGC is not supported");
shouldEnableAgc = false;
return false;
}
if (agc != null && (enable != shouldEnableAgc)) {
Logging.e(TAG, "Platform AGC state can't be modified while recording");
return false;
}
shouldEnableAgc = enable;
return true;
}
// Call this method to enable or disable the platform NS. It modifies
// |shouldEnableNs| which is used in enable() where the actual state
// of the NS effect is modified. Returns true if HW NS is supported and
// false otherwise.
public boolean setNS(boolean enable) {
Logging.d(TAG, "setNS(" + enable + ")");
if (!canUseNoiseSuppressor()) {
Logging.w(TAG, "Platform NS is not supported");
shouldEnableNs = false;
return false;
}
if (ns != null && (enable != shouldEnableNs)) {
Logging.e(TAG, "Platform NS state can't be modified while recording");
return false;
}
shouldEnableNs = enable;
return true;
}
public void enable(int audioSession) {
Logging.d(TAG, "enable(audioSession=" + audioSession + ")");
assertTrue(aec == null);
assertTrue(agc == null);
assertTrue(ns == null);
if (isAcousticEchoCancelerSupported()) {
// Create an AcousticEchoCanceler and attach it to the AudioRecord on
// the specified audio session.
aec = AcousticEchoCanceler.create(audioSession);
if (aec != null) {
boolean enabled = aec.getEnabled();
boolean enable = shouldEnableAec && canUseAcousticEchoCanceler();
if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
}
Logging.d(TAG, "AcousticEchoCanceler: was " +
(enabled ? "enabled" : "disabled") +
", enable: " + enable + ", is now: " +
(aec.getEnabled() ? "enabled" : "disabled"));
} else {
Logging.e(TAG, "Failed to create the AcousticEchoCanceler instance");
}
}
if (isAutomaticGainControlSupported()) {
// Create an AutomaticGainControl and attach it to the AudioRecord on
// the specified audio session.
agc = AutomaticGainControl.create(audioSession);
if (agc != null) {
boolean enabled = agc.getEnabled();
boolean enable = shouldEnableAgc && canUseAutomaticGainControl();
if (agc.setEnabled(enable) != AudioEffect.SUCCESS) {
Logging.e(TAG, "Failed to set the AutomaticGainControl state");
}
Logging.d(TAG, "AutomaticGainControl: was " +
(enabled ? "enabled" : "disabled") +
", enable: " + enable + ", is now: " +
(agc.getEnabled() ? "enabled" : "disabled"));
} else {
Logging.e(TAG, "Failed to create the AutomaticGainControl instance");
}
}
if (isNoiseSuppressorSupported()) {
// Create an NoiseSuppressor and attach it to the AudioRecord on the
// specified audio session.
ns = NoiseSuppressor.create(audioSession);
if (ns != null) {
boolean enabled = ns.getEnabled();
boolean enable = shouldEnableNs && canUseNoiseSuppressor();
if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
Logging.e(TAG, "Failed to set the NoiseSuppressor state");
}
Logging.d(TAG, "NoiseSuppressor: was " +
(enabled ? "enabled" : "disabled") +
", enable: " + enable + ", is now: " +
(ns.getEnabled() ? "enabled" : "disabled"));
} else {
Logging.e(TAG, "Failed to create the NoiseSuppressor instance");
}
}
}
// Releases all native audio effect resources. It is a good practice to
// release the effect engine when not in use as control can be returned
// to other applications or the native resources released.
public void release() {
Logging.d(TAG, "release");
if (aec != null) {
aec.release();
aec = null;
}
if (agc != null) {
agc.release();
agc = null;
}
if (ns != null) {
ns.release();
ns = null;
}
}
// Returns true for effect types in |type| that are of "VoIP" types:
// Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
// Noise Suppressor (NS).
private boolean effectTypeIsVoIP(UUID type) {
return AudioEffect.EFFECT_TYPE_AEC.equals(type)
|| AudioEffect.EFFECT_TYPE_AGC.equals(type)
|| AudioEffect.EFFECT_TYPE_NS.equals(type);
}
// Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
}
}
}

View File

@ -42,10 +42,6 @@ class WebRtcAudioManager {
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
// Use 16kHz as the default sample rate. A higher sample rate might prevent
// us from supporting communication mode on some older (e.g. ICS) devices.
private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
private static final int DEFAULT_FRAME_PER_BUFFER = 256;
// TODO(henrika): add stereo support for playout.
@ -68,6 +64,8 @@ class WebRtcAudioManager {
private int nativeChannels;
private boolean hardwareAEC;
private boolean hardwareAGC;
private boolean hardwareNS;
private boolean lowLatencyOutput;
private int sampleRate;
private int channels;
@ -75,7 +73,7 @@ class WebRtcAudioManager {
private int inputBufferSize;
WebRtcAudioManager(Context context, long nativeAudioManager) {
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioManager = nativeAudioManager;
audioManager = (AudioManager) context.getSystemService(
@ -85,22 +83,23 @@ class WebRtcAudioManager {
}
storeAudioParameters();
nativeCacheAudioParameters(
sampleRate, channels, hardwareAEC, lowLatencyOutput, outputBufferSize,
inputBufferSize, nativeAudioManager);
sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS,
lowLatencyOutput, outputBufferSize, inputBufferSize,
nativeAudioManager);
}
private boolean init() {
Logd("init" + WebRtcAudioUtils.getThreadInfo());
Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
if (initialized) {
return true;
}
Logd("audio mode is: " + AUDIO_MODES[audioManager.getMode()]);
Logging.d(TAG, "audio mode is: " + AUDIO_MODES[audioManager.getMode()]);
initialized = true;
return true;
}
private void dispose() {
Logd("dispose" + WebRtcAudioUtils.getThreadInfo());
Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
if (!initialized) {
return;
}
@ -116,7 +115,7 @@ class WebRtcAudioManager {
if (blacklisted) {
// TODO(henrika): enable again for all devices once issue in b/21485703
// has been resolved.
Loge(Build.MODEL + " is blacklisted for OpenSL ES usage!");
Logging.e(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
}
return blacklisted;
}
@ -127,6 +126,8 @@ class WebRtcAudioManager {
channels = CHANNELS;
sampleRate = getNativeOutputSampleRate();
hardwareAEC = isAcousticEchoCancelerSupported();
hardwareAGC = isAutomaticGainControlSupported();
hardwareNS = isNoiseSuppressorSupported();
lowLatencyOutput = isLowLatencyOutputSupported();
outputBufferSize = lowLatencyOutput ?
getLowLatencyOutputFramesPerBuffer() :
@ -163,16 +164,30 @@ class WebRtcAudioManager {
// Override this if we're running on an old emulator image which only
// supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
if (WebRtcAudioUtils.runningOnEmulator()) {
Logd("Running on old emulator, overriding sampling rate to 8 kHz.");
Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
return 8000;
}
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
return DEFAULT_SAMPLE_RATE_HZ;
// Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
// If so, use that value and return here.
if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
Logging.d(TAG, "Default sample rate is overriden to " +
WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
return WebRtcAudioUtils.getDefaultSampleRateHz();
}
String sampleRateString = audioManager.getProperty(
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
return (sampleRateString == null) ?
DEFAULT_SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
// No overrides available. Deliver best possible estimate based on default
// Android AudioManager APIs.
final int sampleRateHz;
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
} else {
String sampleRateString = audioManager.getProperty(
AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
sampleRateHz = (sampleRateString == null)
? WebRtcAudioUtils.getDefaultSampleRateHz()
: Integer.parseInt(sampleRateString);
}
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
return sampleRateHz;
}
// Returns the native output buffer size for low-latency output streams.
@ -187,14 +202,20 @@ class WebRtcAudioManager {
DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
}
// Returns true if the device supports Acoustic Echo Canceler (AEC).
// Also takes blacklisting into account.
// Returns true if the device supports an audio effect (AEC, AGC or NS).
// Four conditions must be fulfilled if functions are to return true:
// 1) the platform must support the built-in (HW) effect,
// 2) explicit use (override) of a WebRTC based version must not be set,
// 3) the device must not be blacklisted for use of the effect, and
// 4) the UUID of the effect must be approved (some UUIDs can be excluded).
private static boolean isAcousticEchoCancelerSupported() {
if (WebRtcAudioUtils.deviceIsBlacklistedForHwAecUsage()) {
Logd(Build.MODEL + " is blacklisted for HW AEC usage!");
return false;
}
return WebRtcAudioUtils.isAcousticEchoCancelerSupported();
return WebRtcAudioEffects.canUseAcousticEchoCanceler();
}
private static boolean isAutomaticGainControlSupported() {
return WebRtcAudioEffects.canUseAutomaticGainControl();
}
private static boolean isNoiseSuppressorSupported() {
return WebRtcAudioEffects.canUseNoiseSuppressor();
}
// Returns the minimum output buffer size for Java based audio (AudioTrack).
@ -245,16 +266,8 @@ class WebRtcAudioManager {
}
}
private static void Logd(String msg) {
Logging.d(TAG, msg);
}
private static void Loge(String msg) {
Logging.e(TAG, msg);
}
private native void nativeCacheAudioParameters(
int sampleRate, int channels, boolean hardwareAEC, boolean lowLatencyOutput,
int outputBufferSize, int inputBufferSize,
long nativeAudioManager);
int sampleRate, int channels, boolean hardwareAEC, boolean hardwareAGC,
boolean hardwareNS, boolean lowLatencyOutput, int outputBufferSize,
int inputBufferSize, long nativeAudioManager);
}

View File

@ -16,9 +16,6 @@ import java.util.concurrent.TimeUnit;
import android.content.Context;
import android.media.AudioFormat;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AudioEffect;
import android.media.audiofx.AudioEffect.Descriptor;
import android.media.AudioRecord;
import android.media.MediaRecorder.AudioSource;
import android.os.Build;
@ -45,14 +42,13 @@ class WebRtcAudioRecord {
private final long nativeAudioRecord;
private final Context context;
private WebRtcAudioEffects effects = null;
private ByteBuffer byteBuffer;
private AudioRecord audioRecord = null;
private AudioRecordThread audioThread = null;
private AcousticEchoCanceler aec = null;
private boolean useBuiltInAEC = false;
/**
* Audio thread which keeps calling ByteBuffer.read() waiting for audio
* to be recorded. Feeds recorded data to the native counterpart as a
@ -69,7 +65,7 @@ class WebRtcAudioRecord {
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
Logd("AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
Logging.w(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
assertTrue(audioRecord.getRecordingState()
== AudioRecord.RECORDSTATE_RECORDING);
@ -79,7 +75,7 @@ class WebRtcAudioRecord {
if (bytesRead == byteBuffer.capacity()) {
nativeDataIsRecorded(bytesRead, nativeAudioRecord);
} else {
Loge("AudioRecord.read failed: " + bytesRead);
Logging.e(TAG,"AudioRecord.read failed: " + bytesRead);
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
keepAlive = false;
}
@ -89,14 +85,14 @@ class WebRtcAudioRecord {
long durationInMs =
TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
lastTime = nowTime;
Logd("bytesRead[" + durationInMs + "] " + bytesRead);
Logging.w(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
}
}
try {
audioRecord.stop();
} catch (IllegalStateException e) {
Loge("AudioRecord.stop failed: " + e.getMessage());
Logging.e(TAG,"AudioRecord.stop failed: " + e.getMessage());
}
}
@ -113,47 +109,58 @@ class WebRtcAudioRecord {
}
WebRtcAudioRecord(Context context, long nativeAudioRecord) {
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
Logging.w(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioRecord = nativeAudioRecord;
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
effects = WebRtcAudioEffects.create();
}
private boolean EnableBuiltInAEC(boolean enable) {
Logd("EnableBuiltInAEC(" + enable + ')');
assertTrue(WebRtcAudioUtils.isAcousticEchoCancelerApproved());
// Store the AEC state.
useBuiltInAEC = enable;
// Set AEC state if AEC has already been created.
if (aec != null) {
int ret = aec.setEnabled(enable);
if (ret != AudioEffect.SUCCESS) {
Loge("AcousticEchoCanceler.setEnabled failed");
return false;
}
Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
private boolean enableBuiltInAEC(boolean enable) {
Logging.w(TAG, "enableBuiltInAEC(" + enable + ')');
if (effects == null) {
Logging.e(TAG,"Built-in AEC is not supported on this platform");
return false;
}
return true;
return effects.setAEC(enable);
}
private int InitRecording(int sampleRate, int channels) {
Logd("InitRecording(sampleRate=" + sampleRate + ", channels=" +
private boolean enableBuiltInAGC(boolean enable) {
Logging.w(TAG, "enableBuiltInAGC(" + enable + ')');
if (effects == null) {
Logging.e(TAG,"Built-in AGC is not supported on this platform");
return false;
}
return effects.setAGC(enable);
}
private boolean enableBuiltInNS(boolean enable) {
Logging.w(TAG, "enableBuiltInNS(" + enable + ')');
if (effects == null) {
Logging.e(TAG,"Built-in NS is not supported on this platform");
return false;
}
return effects.setNS(enable);
}
private int initRecording(int sampleRate, int channels) {
Logging.w(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" +
channels + ")");
if (!WebRtcAudioUtils.hasPermission(
context, android.Manifest.permission.RECORD_AUDIO)) {
Loge("RECORD_AUDIO permission is missing");
Logging.e(TAG,"RECORD_AUDIO permission is missing");
return -1;
}
if (audioRecord != null) {
Loge("InitRecording() called twice without StopRecording()");
Logging.e(TAG,"InitRecording() called twice without StopRecording()");
return -1;
}
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
Logd("byteBuffer.capacity: " + byteBuffer.capacity());
Logging.w(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
// Rather than passing the ByteBuffer with every callback (requiring
// the potentially expensive GetDirectBufferAddress) we simply have the
// the native class cache the address to the memory once.
@ -167,11 +174,11 @@ class WebRtcAudioRecord {
sampleRate,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
Logd("AudioRecord.getMinBufferSize: " + minBufferSize);
Logging.w(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
Logd("bufferSizeInBytes: " + bufferSizeInBytes);
Logging.w(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
try {
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
sampleRate,
@ -180,63 +187,37 @@ class WebRtcAudioRecord {
bufferSizeInBytes);
} catch (IllegalArgumentException e) {
Loge(e.getMessage());
Logging.e(TAG,e.getMessage());
return -1;
}
if (audioRecord == null ||
audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
Loge("Failed to create a new AudioRecord instance");
Logging.e(TAG,"Failed to create a new AudioRecord instance");
return -1;
}
Logd("AudioRecord " +
"session ID: " + audioRecord.getAudioSessionId() + ", " +
"audio format: " + audioRecord.getAudioFormat() + ", " +
"channels: " + audioRecord.getChannelCount() + ", " +
"sample rate: " + audioRecord.getSampleRate());
Logd("AcousticEchoCanceler.isAvailable: " + builtInAECIsAvailable());
if (!builtInAECIsAvailable()) {
return framesPerBuffer;
Logging.w(TAG, "AudioRecord "
+ "session ID: " + audioRecord.getAudioSessionId() + ", "
+ "audio format: " + audioRecord.getAudioFormat() + ", "
+ "channels: " + audioRecord.getChannelCount() + ", "
+ "sample rate: " + audioRecord.getSampleRate());
if (effects != null) {
effects.enable(audioRecord.getAudioSessionId());
}
if (WebRtcAudioUtils.deviceIsBlacklistedForHwAecUsage()) {
// Just in case, ensure that no attempt has been done to enable the
// HW AEC on a blacklisted device.
assertTrue(!useBuiltInAEC);
}
// We create an AEC also for blacklisted devices since it is possible that
// HW EAC is enabled by default. Hence, the AEC object is needed to be
// able to check the current state and to disable the AEC if enabled.
aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
if (aec == null) {
Loge("AcousticEchoCanceler.create failed");
return -1;
}
int ret = aec.setEnabled(useBuiltInAEC);
if (ret != AudioEffect.SUCCESS) {
Loge("AcousticEchoCanceler.setEnabled failed");
return -1;
}
Descriptor descriptor = aec.getDescriptor();
Logd("AcousticEchoCanceler " +
"name: " + descriptor.name + ", " +
"implementor: " + descriptor.implementor + ", " +
"uuid: " + descriptor.uuid);
Logd("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
return framesPerBuffer;
}
private boolean StartRecording() {
Logd("StartRecording");
private boolean startRecording() {
Logging.w(TAG, "startRecording");
assertTrue(audioRecord != null);
assertTrue(audioThread == null);
try {
audioRecord.startRecording();
} catch (IllegalStateException e) {
Loge("AudioRecord.startRecording failed: " + e.getMessage());
Logging.e(TAG,"AudioRecord.startRecording failed: " + e.getMessage());
return false;
}
if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
Loge("AudioRecord.startRecording failed");
Logging.e(TAG,"AudioRecord.startRecording failed");
return false;
}
audioThread = new AudioRecordThread("AudioRecordJavaThread");
@ -244,26 +225,19 @@ class WebRtcAudioRecord {
return true;
}
private boolean StopRecording() {
Logd("StopRecording");
private boolean stopRecording() {
Logging.w(TAG, "stopRecording");
assertTrue(audioThread != null);
audioThread.joinThread();
audioThread = null;
if (aec != null) {
aec.release();
aec = null;
if (effects != null) {
effects.release();
}
audioRecord.release();
audioRecord = null;
return true;
}
// Returns true if built-in AEC is available. Does not take blacklisting
// into account.
private static boolean builtInAECIsAvailable() {
return WebRtcAudioUtils.isAcousticEchoCancelerSupported();
}
// Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
@ -271,14 +245,6 @@ class WebRtcAudioRecord {
}
}
private static void Logd(String msg) {
Logging.d(TAG, msg);
}
private static void Loge(String msg) {
Logging.e(TAG, msg);
}
private native void nativeCacheDirectBufferAddress(
ByteBuffer byteBuffer, long nativeAudioRecord);

View File

@ -146,8 +146,8 @@ class WebRtcAudioTrack {
}
}
private void InitPlayout(int sampleRate, int channels) {
Logd("InitPlayout(sampleRate=" + sampleRate + ", channels=" +
private void initPlayout(int sampleRate, int channels) {
Logd("initPlayout(sampleRate=" + sampleRate + ", channels=" +
channels + ")");
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
byteBuffer = byteBuffer.allocateDirect(
@ -192,8 +192,8 @@ class WebRtcAudioTrack {
assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
}
private boolean StartPlayout() {
Logd("StartPlayout");
private boolean startPlayout() {
Logd("startPlayout");
assertTrue(audioTrack != null);
assertTrue(audioThread == null);
audioThread = new AudioTrackThread("AudioTrackJavaThread");
@ -201,8 +201,8 @@ class WebRtcAudioTrack {
return true;
}
private boolean StopPlayout() {
Logd("StopPlayout");
private boolean stopPlayout() {
Logd("stopPlayout");
assertTrue(audioThread != null);
audioThread.joinThread();
audioThread = null;
@ -214,15 +214,15 @@ class WebRtcAudioTrack {
}
/** Get max possible volume index for a phone call audio stream. */
private int GetStreamMaxVolume() {
Logd("GetStreamMaxVolume");
private int getStreamMaxVolume() {
Logd("getStreamMaxVolume");
assertTrue(audioManager != null);
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
}
/** Set current volume level for a phone call audio stream. */
private boolean SetStreamVolume(int volume) {
Logd("SetStreamVolume(" + volume + ")");
private boolean setStreamVolume(int volume) {
Logd("setStreamVolume(" + volume + ")");
assertTrue(audioManager != null);
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
if (audioManager.isVolumeFixed()) {
@ -235,8 +235,8 @@ class WebRtcAudioTrack {
}
/** Get current volume level for a phone call audio stream. */
private int GetStreamVolume() {
Logd("GetStreamVolume");
private int getStreamVolume() {
Logd("getStreamVolume");
assertTrue(audioManager != null);
return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
}

View File

@ -22,29 +22,111 @@ import android.os.Process;
import org.webrtc.Logging;
import java.lang.Thread;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public final class WebRtcAudioUtils {
// List of devices where it has been verified that the built-in AEC performs
// bad and where it makes sense to avoid using it and instead rely on the
// native WebRTC AEC instead. The device name is given by Build.MODEL.
private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
"Nexus 5", // Nexus 5
"D6503", // Sony Xperia Z2 D6503
};
private static final String TAG = "WebRtcAudioUtils";
// List of devices where we have seen issues (e.g. bad audio quality) using
// the low latency ouput mode in combination with OpenSL ES.
// the low latency output mode in combination with OpenSL ES.
// The device name is given by Build.MODEL.
private static final String[] BLACKLISTED_OPEN_SL_ES_MODELS = new String[] {
"Nexus 6", // Nexus 6
};
// Use 44.1kHz as the default sampling rate.
private static final int SAMPLE_RATE_HZ = 44100;
// List of devices where it has been verified that the built-in effect
// bad and where it makes sense to avoid using it and instead rely on the
// native WebRTC version instead. The device name is given by Build.MODEL.
private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
"Nexus 5",
"D6503", // Sony Xperia Z2 D6503
};
private static final String[] BLACKLISTED_AGC_MODELS = new String[] {
"Nexus 10",
"Nexus 9",
};
private static final String[] BLACKLISTED_NS_MODELS = new String[] {
"Nexus 10",
"Nexus 9",
"Nexus 6",
"Nexus 5",
};
// Use 16kHz as the default sample rate. A higher sample rate might prevent
// us from supporting communication mode on some older (e.g. ICS) devices.
private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
private static int defaultSampleRateHz = DEFAULT_SAMPLE_RATE_HZ;
// Set to true if setDefaultSampleRateHz() has been called.
private static boolean isDefaultSampleRateOverridden = false;
// By default, utilize hardware based audio effects when available.
private static boolean useWebRtcBasedAcousticEchoCanceler = false;
private static boolean useWebRtcBasedAutomaticGainControl = false;
private static boolean useWebRtcBasedNoiseSuppressor = false;
// Call these methods if any hardware based effect shall be replaced by a
// software based version provided by the WebRTC stack instead.
public static synchronized void setWebRtcBasedAcousticEchoCanceler(
boolean enable) {
useWebRtcBasedAcousticEchoCanceler = enable;
}
public static synchronized void setWebRtcBasedAutomaticGainControl(
boolean enable) {
useWebRtcBasedAutomaticGainControl = enable;
}
public static synchronized void setWebRtcBasedNoiseSuppressor(
boolean enable) {
useWebRtcBasedNoiseSuppressor = enable;
}
public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
if (useWebRtcBasedAcousticEchoCanceler) {
Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
}
return useWebRtcBasedAcousticEchoCanceler;
}
public static synchronized boolean useWebRtcBasedAutomaticGainControl() {
if (useWebRtcBasedAutomaticGainControl) {
Logging.w(TAG, "Overriding default behavior; now using WebRTC AGC!");
}
return useWebRtcBasedAutomaticGainControl;
}
public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
if (useWebRtcBasedNoiseSuppressor) {
Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
}
return useWebRtcBasedNoiseSuppressor;
}
// Call this method if the default handling of querying the native sample
// rate shall be overridden. Can be useful on some devices where the
// available Android APIs are known to return invalid results.
public static synchronized void setDefaultSampleRateHz(int sampleRateHz) {
isDefaultSampleRateOverridden = true;
defaultSampleRateHz = sampleRateHz;
}
public static synchronized boolean isDefaultSampleRateOverridden() {
return isDefaultSampleRateOverridden;
}
public static synchronized int getDefaultSampleRateHz() {
return defaultSampleRateHz;
}
public static List<String> getBlackListedModelsForAecUsage() {
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AEC_MODELS);
}
public static List<String> getBlackListedModelsForAgcUsage() {
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AGC_MODELS);
}
public static List<String> getBlackListedModelsForNsUsage() {
return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_NS_MODELS);
}
public static boolean runningOnGingerBreadOrHigher() {
// November 2010: Android 2.3, API Level 9.
@ -78,12 +160,6 @@ public final class WebRtcAudioUtils {
Build.BRAND.startsWith("generic_");
}
// Returns true if the device is blacklisted for HW AEC usage.
public static boolean deviceIsBlacklistedForHwAecUsage() {
List<String> blackListedModels = Arrays.asList(BLACKLISTED_AEC_MODELS);
return blackListedModels.contains(Build.MODEL);
}
// Returns true if the device is blacklisted for OpenSL ES usage.
public static boolean deviceIsBlacklistedForOpenSLESUsage() {
List<String> blackListedModels =
@ -91,23 +167,6 @@ public final class WebRtcAudioUtils {
return blackListedModels.contains(Build.MODEL);
}
// Returns true if the device supports Acoustic Echo Canceler (AEC).
public static boolean isAcousticEchoCancelerSupported() {
// AcousticEchoCanceler was added in API level 16 (Jelly Bean).
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
return false;
}
// Check if the device implements acoustic echo cancellation.
return AcousticEchoCanceler.isAvailable();
}
// Returns true if the device supports AEC and it not blacklisted.
public static boolean isAcousticEchoCancelerApproved() {
if (deviceIsBlacklistedForHwAecUsage())
return false;
return isAcousticEchoCancelerSupported();
}
// Information about the current build, taken from system properties.
public static void logDeviceInfo(String tag) {
Logging.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "

View File

@ -62,6 +62,26 @@ bool AudioDeviceGeneric::BuiltInAECIsEnabled() const {
return false;
}
bool AudioDeviceGeneric::BuiltInAGCIsAvailable() const {
LOG_F(LS_ERROR) << "Not supported on this platform";
return false;
}
int32_t AudioDeviceGeneric::EnableBuiltInAGC(bool enable) {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
bool AudioDeviceGeneric::BuiltInNSIsAvailable() const {
LOG_F(LS_ERROR) << "Not supported on this platform";
return false;
}
int32_t AudioDeviceGeneric::EnableBuiltInNS(bool enable) {
LOG_F(LS_ERROR) << "Not supported on this platform";
return -1;
}
int AudioDeviceGeneric::GetPlayoutAudioParameters(
AudioParameters* params) const {
LOG_F(LS_ERROR) << "Not supported on this platform";

View File

@ -146,9 +146,13 @@ class AudioDeviceGeneric {
// Android only
virtual bool BuiltInAECIsAvailable() const;
virtual bool BuiltInAGCIsAvailable() const;
virtual bool BuiltInNSIsAvailable() const;
// Windows Core Audio and Android only.
virtual int32_t EnableBuiltInAEC(bool enable);
virtual int32_t EnableBuiltInAGC(bool enable);
virtual int32_t EnableBuiltInNS(bool enable);
// Windows Core Audio only.
virtual bool BuiltInAECIsEnabled() const;

View File

@ -1869,29 +1869,17 @@ int32_t AudioDeviceModuleImpl::SetLoudspeakerStatus(bool enable)
// GetLoudspeakerStatus
// ----------------------------------------------------------------------------
int32_t AudioDeviceModuleImpl::GetLoudspeakerStatus(bool* enabled) const
{
CHECK_INITIALIZED();
if (_ptrAudioDevice->GetLoudspeakerStatus(*enabled) != 0)
{
return -1;
}
return 0;
}
int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable)
{
int32_t AudioDeviceModuleImpl::GetLoudspeakerStatus(bool* enabled) const {
CHECK_INITIALIZED();
return _ptrAudioDevice->EnableBuiltInAEC(enable);
if (_ptrAudioDevice->GetLoudspeakerStatus(*enabled) != 0) {
return -1;
}
return 0;
}
bool AudioDeviceModuleImpl::BuiltInAECIsEnabled() const
{
CHECK_INITIALIZED_BOOL();
return _ptrAudioDevice->BuiltInAECIsEnabled();
bool AudioDeviceModuleImpl::BuiltInAECIsEnabled() const {
CHECK_INITIALIZED_BOOL();
return _ptrAudioDevice->BuiltInAECIsEnabled();
}
bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const {
@ -1899,6 +1887,31 @@ bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const {
return _ptrAudioDevice->BuiltInAECIsAvailable();
}
int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable) {
CHECK_INITIALIZED();
return _ptrAudioDevice->EnableBuiltInAEC(enable);
}
bool AudioDeviceModuleImpl::BuiltInAGCIsAvailable() const {
CHECK_INITIALIZED_BOOL();
return _ptrAudioDevice->BuiltInAGCIsAvailable();
}
int32_t AudioDeviceModuleImpl::EnableBuiltInAGC(bool enable) {
CHECK_INITIALIZED();
return _ptrAudioDevice->EnableBuiltInAGC(enable);
}
bool AudioDeviceModuleImpl::BuiltInNSIsAvailable() const {
CHECK_INITIALIZED_BOOL();
return _ptrAudioDevice->BuiltInNSIsAvailable();
}
int32_t AudioDeviceModuleImpl::EnableBuiltInNS(bool enable) {
CHECK_INITIALIZED();
return _ptrAudioDevice->EnableBuiltInNS(enable);
}
int AudioDeviceModuleImpl::GetPlayoutAudioParameters(
AudioParameters* params) const {
return _ptrAudioDevice->GetPlayoutAudioParameters(params);

View File

@ -182,10 +182,13 @@ class AudioDeviceModuleImpl : public AudioDeviceModule {
int32_t SetLoudspeakerStatus(bool enable) override;
int32_t GetLoudspeakerStatus(bool* enabled) const override;
bool BuiltInAECIsAvailable() const override;
int32_t EnableBuiltInAEC(bool enable) override;
bool BuiltInAECIsEnabled() const override;
bool BuiltInAECIsAvailable() const override;
int32_t EnableBuiltInAEC(bool enable) override;
bool BuiltInAGCIsAvailable() const override;
int32_t EnableBuiltInAGC(bool enable) override;
bool BuiltInNSIsAvailable() const override;
int32_t EnableBuiltInNS(bool enable) override;
int GetPlayoutAudioParameters(AudioParameters* params) const override;
int GetRecordAudioParameters(AudioParameters* params) const override;

View File

@ -187,18 +187,14 @@ class AudioDeviceModule : public RefCountedModule {
// Only supported on Android.
// TODO(henrika): Make pure virtual after updating Chromium.
virtual bool BuiltInAECIsAvailable() const { return false; }
virtual bool BuiltInAGCIsAvailable() const { return false; }
virtual bool BuiltInNSIsAvailable() const { return false; }
// Enables the built-in AEC. Only supported on Windows and Android.
//
// For usage on Windows (requires Core Audio):
// Must be called before InitRecording(). When enabled:
// 1. StartPlayout() must be called before StartRecording().
// 2. StopRecording() should be called before StopPlayout().
// The reverse order may cause garbage audio to be rendered or the
// capture side to halt until StopRecording() is called.
// Enables the built-in audio effects. Only supported on Android.
// TODO(henrika): Make pure virtual after updating Chromium.
virtual int32_t EnableBuiltInAEC(bool enable) { return -1; }
virtual int32_t EnableBuiltInAGC(bool enable) { return -1; }
virtual int32_t EnableBuiltInNS(bool enable) { return -1; }
// Don't use.
virtual bool BuiltInAECIsEnabled() const { return false; }

View File

@ -147,6 +147,10 @@ class FakeAudioDeviceModule : public AudioDeviceModule {
virtual bool BuiltInAECIsAvailable() const { return false; }
virtual int32_t EnableBuiltInAEC(bool enable) { return -1; }
virtual bool BuiltInAECIsEnabled() const { return false; }
virtual bool BuiltInAGCIsAvailable() const { return false; }
virtual int32_t EnableBuiltInAGC(bool enable) { return -1; }
virtual bool BuiltInNSIsAvailable() const { return false; }
virtual int32_t EnableBuiltInNS(bool enable) { return -1; }
};
} // namespace webrtc

View File

@ -91,8 +91,13 @@ class WEBRTC_DLLEXPORT VoEHardware {
virtual int SetPlayoutSampleRate(unsigned int samples_per_sec) = 0;
virtual int PlayoutSampleRate(unsigned int* samples_per_sec) const = 0;
// Queries and controls platform audio effects on Android devices.
virtual bool BuiltInAECIsAvailable() const = 0;
virtual int EnableBuiltInAEC(bool enable) = 0;
virtual bool BuiltInAGCIsAvailable() const = 0;
virtual int EnableBuiltInAGC(bool enable) = 0;
virtual bool BuiltInNSIsAvailable() const = 0;
virtual int EnableBuiltInNS(bool enable) = 0;
protected:
VoEHardware() {}

View File

@ -480,6 +480,38 @@ int VoEHardwareImpl::EnableBuiltInAEC(bool enable) {
return _shared->audio_device()->EnableBuiltInAEC(enable);
}
bool VoEHardwareImpl::BuiltInAGCIsAvailable() const {
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return false;
}
return _shared->audio_device()->BuiltInAGCIsAvailable();
}
int VoEHardwareImpl::EnableBuiltInAGC(bool enable) {
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
return _shared->audio_device()->EnableBuiltInAGC(enable);
}
bool VoEHardwareImpl::BuiltInNSIsAvailable() const {
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return false;
}
return _shared->audio_device()->BuiltInNSIsAvailable();
}
int VoEHardwareImpl::EnableBuiltInNS(bool enable) {
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
return _shared->audio_device()->EnableBuiltInNS(enable);
}
#endif // WEBRTC_VOICE_ENGINE_HARDWARE_API
} // namespace webrtc

View File

@ -47,6 +47,10 @@ class VoEHardwareImpl : public VoEHardware {
bool BuiltInAECIsAvailable() const override;
int EnableBuiltInAEC(bool enable) override;
bool BuiltInAGCIsAvailable() const override;
int EnableBuiltInAGC(bool enable) override;
bool BuiltInNSIsAvailable() const override;
int EnableBuiltInNS(bool enable) override;
protected:
VoEHardwareImpl(voe::SharedData* shared);