am 90d78fa4: am 95546693: Merge "Add AudioAttributes support in the TTS." into lmp-dev

* commit '90d78fa4ec5326d0cacfa5221c6675bc28b8c9d3':
  Add AudioAttributes support in the TTS.
This commit is contained in:
Przemyslaw Szczepaniak
2014-07-17 18:23:39 +00:00
committed by Android Git Automerger
5 changed files with 82 additions and 24 deletions

View File

@@ -27257,6 +27257,7 @@ package android.speech.tts {
method public deprecated int playEarcon(java.lang.String, int, java.util.HashMap<java.lang.String, java.lang.String>);
method public int playSilence(long, int, java.util.HashMap<java.lang.String, java.lang.String>, java.lang.String);
method public deprecated int playSilence(long, int, java.util.HashMap<java.lang.String, java.lang.String>);
method public int setAudioAttributes(android.media.AudioAttributes);
method public deprecated int setEngineByPackageName(java.lang.String);
method public int setLanguage(java.util.Locale);
method public deprecated int setOnUtteranceCompletedListener(android.speech.tts.TextToSpeech.OnUtteranceCompletedListener);

View File

@@ -54,7 +54,11 @@ class AudioPlaybackQueueItem extends PlaybackQueueItem {
final UtteranceProgressDispatcher dispatcher = getDispatcher();
dispatcher.dispatchOnStart();
mPlayer = MediaPlayer.create(mContext, mUri);
int sessionId = mAudioParams.mSessionId;
mPlayer = MediaPlayer.create(
mContext, mUri, null, mAudioParams.mAudioAttributes,
sessionId > 0 ? sessionId : AudioSystem.AUDIO_SESSION_ALLOCATE);
if (mPlayer == null) {
dispatcher.dispatchOnError(TextToSpeech.ERROR_OUTPUT);
return;
@@ -76,11 +80,8 @@ class AudioPlaybackQueueItem extends PlaybackQueueItem {
mDone.open();
}
});
mPlayer.setAudioStreamType(mAudioParams.mStreamType);
setupVolume(mPlayer, mAudioParams.mVolume, mAudioParams.mPan);
if (mAudioParams.mSessionId != AudioSystem.AUDIO_SESSION_ALLOCATE) {
mPlayer.setAudioSessionId(mAudioParams.mSessionId);
}
mPlayer.start();
mDone.block();
finish();

View File

@@ -2,6 +2,7 @@
package android.speech.tts;
import android.media.AudioAttributes;
import android.media.AudioFormat;
import android.media.AudioTrack;
import android.speech.tts.TextToSpeechService.AudioOutputParams;
@@ -214,9 +215,14 @@ class BlockingAudioTrack {
= AudioTrack.getMinBufferSize(mSampleRateInHz, channelConfig, mAudioFormat);
int bufferSizeInBytes = Math.max(MIN_AUDIO_BUFFER_SIZE, minBufferSizeInBytes);
AudioTrack audioTrack = new AudioTrack(mAudioParams.mStreamType, mSampleRateInHz,
channelConfig, mAudioFormat, bufferSizeInBytes, AudioTrack.MODE_STREAM,
AudioFormat audioFormat = (new AudioFormat.Builder())
.setChannelMask(channelConfig)
.setEncoding(mAudioFormat)
.setSampleRate(mSampleRateInHz).build();
AudioTrack audioTrack = new AudioTrack(mAudioParams.mAudioAttributes,
audioFormat, bufferSizeInBytes, AudioTrack.MODE_STREAM,
mAudioParams.mSessionId);
if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
Log.w(TAG, "Unable to create audio track.");
audioTrack.release();

View File

@@ -22,6 +22,7 @@ import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
import android.content.ServiceConnection;
import android.media.AudioAttributes;
import android.media.AudioManager;
import android.net.Uri;
import android.os.AsyncTask;
@@ -523,6 +524,17 @@ public class TextToSpeech {
*/
public static final String KEY_PARAM_STREAM = "streamType";
/**
* Parameter key to specify the audio attributes to be used when
* speaking text or playing back a file. The value should be set
* using {@link TextToSpeech#setAudioAttributes(AudioAttributes)}.
*
* @see TextToSpeech#speak(String, int, HashMap)
* @see TextToSpeech#playEarcon(String, int, HashMap)
* @hide
*/
public static final String KEY_PARAM_AUDIO_ATTRIBUTES = "audioAttributes";
/**
* Parameter key to identify an utterance in the
* {@link TextToSpeech.OnUtteranceCompletedListener} after text has been
@@ -1356,6 +1368,25 @@ public class TextToSpeech {
return ERROR;
}
/**
* Sets the audio attributes to be used when speaking text or playing
* back a file.
*
* @param audioAttributes Valid AudioAttributes instance.
*
* @return {@link #ERROR} or {@link #SUCCESS}.
*/
public int setAudioAttributes(AudioAttributes audioAttributes) {
if (audioAttributes != null) {
synchronized (mStartLock) {
mParams.putParcelable(Engine.KEY_PARAM_AUDIO_ATTRIBUTES,
audioAttributes);
}
return SUCCESS;
}
return ERROR;
}
/**
* @return the engine currently in use by this TextToSpeech instance.
* @hide

View File

@@ -17,7 +17,7 @@ package android.speech.tts;
import android.app.Service;
import android.content.Intent;
import android.media.AudioManager;
import android.media.AudioAttributes;
import android.media.AudioSystem;
import android.net.Uri;
import android.os.Binder;
@@ -607,12 +607,6 @@ public abstract class TextToSpeechService extends Service {
*/
public final int mSessionId;
/**
* Audio stream type. Must be one of the STREAM_ contants defined in
* {@link android.media.AudioManager}.
*/
public final int mStreamType;
/**
* Volume, in the range [0.0f, 1.0f]. The default value is
* {@link TextToSpeech.Engine#DEFAULT_VOLUME} (1.0f).
@@ -625,42 +619,62 @@ public abstract class TextToSpeechService extends Service {
*/
public final float mPan;
/**
* Audio attributes, set by {@link TextToSpeech#setAudioAttributes}
* or created from the value of {@link TextToSpeech.Engine#KEY_PARAM_STREAM}.
*/
public final AudioAttributes mAudioAttributes;
/** Create AudioOutputParams with default values */
AudioOutputParams() {
mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
mStreamType = Engine.DEFAULT_STREAM;
mVolume = Engine.DEFAULT_VOLUME;
mPan = Engine.DEFAULT_PAN;
mAudioAttributes = null;
}
AudioOutputParams(int sessionId, int streamType, float volume, float pan) {
AudioOutputParams(int sessionId, float volume, float pan,
AudioAttributes audioAttributes) {
mSessionId = sessionId;
mStreamType = streamType;
mVolume = volume;
mPan = pan;
mAudioAttributes = audioAttributes;
}
/** Create AudioOutputParams from A {@link SynthesisRequest#getParams()} bundle */
static AudioOutputParams createFromV1ParamsBundle(Bundle paramsBundle) {
static AudioOutputParams createFromV1ParamsBundle(Bundle paramsBundle,
boolean isSpeech) {
if (paramsBundle == null) {
return new AudioOutputParams();
}
AudioAttributes audioAttributes =
(AudioAttributes) paramsBundle.getParcelable(
Engine.KEY_PARAM_AUDIO_ATTRIBUTES);
if (audioAttributes == null) {
int streamType = paramsBundle.getInt(
Engine.KEY_PARAM_STREAM, Engine.DEFAULT_STREAM);
audioAttributes = (new AudioAttributes.Builder())
.setLegacyStreamType(streamType)
.setContentType((isSpeech ?
AudioAttributes.CONTENT_TYPE_SPEECH :
AudioAttributes.CONTENT_TYPE_SONIFICATION))
.build();
}
return new AudioOutputParams(
paramsBundle.getInt(
Engine.KEY_PARAM_SESSION_ID,
AudioSystem.AUDIO_SESSION_ALLOCATE),
paramsBundle.getInt(
Engine.KEY_PARAM_STREAM,
Engine.DEFAULT_STREAM),
paramsBundle.getFloat(
Engine.KEY_PARAM_VOLUME,
Engine.DEFAULT_VOLUME),
paramsBundle.getFloat(
Engine.KEY_PARAM_PAN,
Engine.DEFAULT_PAN));
Engine.DEFAULT_PAN),
audioAttributes);
}
}
@@ -832,7 +846,7 @@ public abstract class TextToSpeechService extends Service {
}
AudioOutputParams getAudioParams() {
return AudioOutputParams.createFromV1ParamsBundle(mParams);
return AudioOutputParams.createFromV1ParamsBundle(mParams, true);
}
}
@@ -1005,6 +1019,11 @@ public abstract class TextToSpeechService extends Service {
public String getUtteranceId() {
return getStringParam(mParams, Engine.KEY_PARAM_UTTERANCE_ID, null);
}
@Override
AudioOutputParams getAudioParams() {
return AudioOutputParams.createFromV1ParamsBundle(mParams, false);
}
}
private class SilenceSpeechItem extends UtteranceSpeechItem {