Fix for "Android audio playout doesn't support non-call media stream"

BUG=webrtc:4767
R=magjed@webrtc.org

Review URL: https://codereview.webrtc.org/1419693004 .

Cr-Commit-Position: refs/heads/master@{#10435}
This commit is contained in:
henrika 2015-10-28 13:06:15 +01:00
parent 83585c9075
commit 6408174cdc
8 changed files with 94 additions and 16 deletions

View File

@ -71,12 +71,13 @@ AudioManager::AudioManager()
hardware_agc_(false),
hardware_ns_(false),
low_latency_playout_(false),
delay_estimate_in_milliseconds_(0) {
delay_estimate_in_milliseconds_(0),
output_stream_type_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
RTC_CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheAudioParameters",
"(IIZZZZIIJ)V",
"(IIZZZZIIIJ)V",
reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioManager",
@ -179,12 +180,14 @@ void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
jint output_stream_type,
jlong native_audio_manager) {
webrtc::AudioManager* this_object =
reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
this_object->OnCacheAudioParameters(
env, sample_rate, channels, hardware_aec, hardware_agc, hardware_ns,
low_latency_output, output_buffer_size, input_buffer_size);
low_latency_output, output_buffer_size, input_buffer_size,
output_stream_type);
}
void AudioManager::OnCacheAudioParameters(JNIEnv* env,
@ -195,7 +198,8 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env,
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size) {
jint input_buffer_size,
jint output_stream_type) {
ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str());
ALOGD("hardware_aec: %d", hardware_aec);
ALOGD("hardware_agc: %d", hardware_agc);
@ -205,11 +209,13 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env,
ALOGD("channels: %d", channels);
ALOGD("output_buffer_size: %d", output_buffer_size);
ALOGD("input_buffer_size: %d", input_buffer_size);
ALOGD("output_stream_type: %d", output_stream_type);
RTC_DCHECK(thread_checker_.CalledOnValidThread());
hardware_aec_ = hardware_aec;
hardware_agc_ = hardware_agc;
hardware_ns_ = hardware_ns;
low_latency_playout_ = low_latency_output;
output_stream_type_ = output_stream_type;
// TODO(henrika): add support for stereo output.
playout_parameters_.reset(sample_rate, channels,
static_cast<size_t>(output_buffer_size));

View File

@ -93,6 +93,8 @@ class AudioManager {
// webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
int GetDelayEstimateInMilliseconds() const;
int OutputStreamType() const { return output_stream_type_; }
private:
// Called from Java side so we can cache the native audio parameters.
// This method will be called by the WebRtcAudioManager constructor, i.e.
@ -107,6 +109,7 @@ class AudioManager {
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
jint output_stream_type,
jlong native_audio_manager);
void OnCacheAudioParameters(JNIEnv* env,
jint sample_rate,
@ -116,7 +119,8 @@ class AudioManager {
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size);
jint input_buffer_size,
jint output_stream_type);
// Stores thread ID in the constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
@ -155,6 +159,13 @@ class AudioManager {
// device supports low-latency output or not.
int delay_estimate_in_milliseconds_;
// Contains the output stream type provided to this class at construction by
// the AudioManager in Java land. Possible values are:
// - AudioManager.STREAM_VOICE_CALL = 0
// - AudioManager.STREAM_RING = 2
// - AudioManager.STREAM_MUSIC = 3
int output_stream_type_;
// Contains native parameters (e.g. sample rate, channel configuration).
// Set at construction in OnCacheAudioParameters() which is called from
// Java on the same thread as this object is created on.

View File

@ -71,6 +71,7 @@ class WebRtcAudioManager {
private int channels;
private int outputBufferSize;
private int inputBufferSize;
private int outputStreamType;
WebRtcAudioManager(Context context, long nativeAudioManager) {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
@ -84,7 +85,7 @@ class WebRtcAudioManager {
storeAudioParameters();
nativeCacheAudioParameters(
sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS,
lowLatencyOutput, outputBufferSize, inputBufferSize,
lowLatencyOutput, outputBufferSize, inputBufferSize, outputStreamType,
nativeAudioManager);
}
@ -132,6 +133,8 @@ class WebRtcAudioManager {
getMinOutputFrameSize(sampleRate, channels);
// TODO(henrika): add support for low-latency input.
inputBufferSize = getMinInputFrameSize(sampleRate, channels);
outputStreamType = WebRtcAudioUtils.getOutputStreamTypeFromAudioMode(
audioManager.getMode());
}
// Gets the current earpiece state.
@ -267,5 +270,5 @@ class WebRtcAudioManager {
private native void nativeCacheAudioParameters(
int sampleRate, int channels, boolean hardwareAEC, boolean hardwareAGC,
boolean hardwareNS, boolean lowLatencyOutput, int outputBufferSize,
int inputBufferSize, long nativeAudioManager);
int inputBufferSize, int outputStreamType, long nativeAudioManager);
}

View File

@ -192,6 +192,10 @@ class WebRtcAudioRecord {
Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
try {
// TODO(henrika): the only supported audio source for input is currently
// AudioSource.VOICE_COMMUNICATION. Is there any reason why we should
// support other types, e.g. DEFAULT or MIC? Only reason I can think of
// is if the device does not support VOICE_COMMUNICATION.
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
sampleRate,
AudioFormat.CHANNEL_IN_MONO,

View File

@ -39,6 +39,7 @@ class WebRtcAudioTrack {
private final Context context;
private final long nativeAudioTrack;
private final AudioManager audioManager;
private final int streamType;
private ByteBuffer byteBuffer;
@ -141,6 +142,9 @@ class WebRtcAudioTrack {
this.nativeAudioTrack = nativeAudioTrack;
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
this.streamType =
WebRtcAudioUtils.getOutputStreamTypeFromAudioMode(
audioManager.getMode());
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
@ -177,7 +181,7 @@ class WebRtcAudioTrack {
// Create an AudioTrack object and initialize its associated audio buffer.
// The size of this buffer determines how long an AudioTrack can play
// before running out of data.
audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL,
audioTrack = new AudioTrack(streamType,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
@ -189,7 +193,7 @@ class WebRtcAudioTrack {
}
assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED);
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
assertTrue(audioTrack.getStreamType() == streamType);
}
private boolean startPlayout() {
@ -213,14 +217,14 @@ class WebRtcAudioTrack {
return true;
}
/** Get max possible volume index for a phone call audio stream. */
/** Get max possible volume index given type of audio stream. */
private int getStreamMaxVolume() {
Logging.d(TAG, "getStreamMaxVolume");
assertTrue(audioManager != null);
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
return audioManager.getStreamMaxVolume(streamType);
}
/** Set current volume level for a phone call audio stream. */
/** Set current volume level given type of audio stream. */
private boolean setStreamVolume(int volume) {
Logging.d(TAG, "setStreamVolume(" + volume + ")");
assertTrue(audioManager != null);
@ -230,15 +234,15 @@ class WebRtcAudioTrack {
return false;
}
}
audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
audioManager.setStreamVolume(streamType, volume, 0);
return true;
}
/** Get current volume level for a phone call audio stream. */
/** Get current volume level given type of audio stream. */
private int getStreamVolume() {
Logging.d(TAG, "getStreamVolume");
assertTrue(audioManager != null);
return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
return audioManager.getStreamVolume(streamType);
}
/** Helper method which throws an exception when an assertion has failed. */

View File

@ -193,5 +193,37 @@ public final class WebRtcAudioUtils {
permission,
Process.myPid(),
Process.myUid()) == PackageManager.PERMISSION_GRANTED;
}
// Convert the provided audio |mode| into most suitable audio output stream
// type. The stream type is used for creating audio streams and for volume
// changes. It is essential that the mode and type are in-line to ensure
// correct behavior. If for example a STREAM_MUSIC type of stream is created
// in a MODE_IN_COMMUNICATION mode, audio will be played out and the volume
// icon will look OK but the actual volume will not be changed when the user
// changes the volume slider.
// TODO(henrika): there is currently no mapping to STREAM_ALARM, STREAM_DTMF,
// or STREAM_NOTIFICATION types since I am unable to see a reason for using
// them. There are only four different modes.
public static int getOutputStreamTypeFromAudioMode(int mode) {
Logging.d(TAG, "getOutputStreamTypeFromAudioMode(mode=" + mode + ")");
switch (mode) {
case AudioManager.MODE_NORMAL:
// The audio stream for music playback.
Logging.d(TAG, "AudioManager.STREAM_MUSIC");
return AudioManager.STREAM_MUSIC;
case AudioManager.MODE_RINGTONE:
// Audio stream for the phone ring.
Logging.d(TAG, "AudioManager.STREAM_RING");
return AudioManager.STREAM_RING;
case AudioManager.MODE_IN_CALL:
case AudioManager.MODE_IN_COMMUNICATION:
// Audio stream for phone calls.
Logging.d(TAG, "AudioManager.STREAM_VOICE_CALL");
return AudioManager.STREAM_VOICE_CALL;
default:
Logging.d(TAG, "AudioManager.USE_DEFAULT_STREAM_TYPE");
return AudioManager.USE_DEFAULT_STREAM_TYPE;
}
}
}

View File

@ -38,6 +38,7 @@ namespace webrtc {
OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
: audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
stream_type_(audio_manager->OutputStreamType()),
audio_device_buffer_(NULL),
initialized_(false),
playing_(false),
@ -48,6 +49,9 @@ OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
simple_buffer_queue_(nullptr),
volume_(nullptr) {
ALOGD("ctor%s", GetThreadInfo().c_str());
RTC_DCHECK(stream_type_ == SL_ANDROID_STREAM_VOICE ||
stream_type_ == SL_ANDROID_STREAM_RING ||
stream_type_ == SL_ANDROID_STREAM_MEDIA) << stream_type_;
// Use native audio output parameters provided by the audio manager and
// define the PCM format structure.
pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
@ -347,7 +351,7 @@ bool OpenSLESPlayer::CreateAudioPlayer() {
false);
// Set audio player configuration to SL_ANDROID_STREAM_VOICE which
// corresponds to android.media.AudioManager.STREAM_VOICE_CALL.
SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
SLint32 stream_type = stream_type_;
RETURN_ON_ERROR(
(*player_config)
->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE,

View File

@ -130,6 +130,20 @@ class OpenSLESPlayer {
// AudioManager.
const AudioParameters audio_parameters_;
// Contains the stream type provided to this class at construction by the
// AudioManager. Possible input values are:
// - AudioManager.STREAM_VOICE_CALL = 0
// - AudioManager.STREAM_RING = 2
// - AudioManager.STREAM_MUSIC = 3
// These value are mapped to the corresponding audio playback stream type
// values in the "OpenSL ES domain":
// - SL_ANDROID_STREAM_VOICE <=> STREAM_VOICE_CALL (0)
// - SL_ANDROID_STREAM_RING <=> STREAM_RING (2)
// - SL_ANDROID_STREAM_MEDIA <=> STREAM_MUSIC (3)
// when creating the audio player. See SLES/OpenSLES_AndroidConfiguration.h
// for details.
const int stream_type_;
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
AudioDeviceBuffer* audio_device_buffer_;