Merge change 25366 into eclair

* changes:
  do not merge.  Revert "do not merge.  Remove voicesearch-specific functionality from framework."
This commit is contained in:
Android (Google) Code Review
2009-09-17 11:21:47 -04:00
6 changed files with 439 additions and 0 deletions

View File

@@ -126,6 +126,8 @@ LOCAL_SRC_FILES += \
core/java/android/view/IWindow.aidl \
core/java/android/view/IWindowManager.aidl \
core/java/android/view/IWindowSession.aidl \
core/java/android/speech/IRecognitionListener.aidl \
core/java/android/speech/IRecognitionService.aidl \
core/java/android/speech/tts/ITts.aidl \
core/java/android/speech/tts/ITtsCallback.aidl \
core/java/com/android/internal/app/IBatteryStats.aidl \

View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.os.Bundle;
import android.speech.RecognitionResult;
/**
* Listener for speech recognition events, used with RecognitionService.
* This gives you both the final recognition results, as well as various
* intermediate events that can be used to show visual feedback to the user.
* {@hide}
*/
interface IRecognitionListener {
/** Called when the endpointer is ready for the user to start speaking. */
void onReadyForSpeech(in Bundle noiseParams);
/** The user has started to speak. */
void onBeginningOfSpeech();
/** The sound level in the audio stream has changed. */
void onRmsChanged(in float rmsdB);
/**
* More sound has been received. Buffer is a byte buffer containing
* a sequence of 16-bit shorts.
*/
void onBufferReceived(in byte[] buffer);
/** Called after the user stops speaking. */
void onEndOfSpeech();
/**
* A network or recognition error occurred. The code is defined in
* {@link android.speech.RecognitionResult}
*/
void onError(in int error);
/**
* Called when recognition results are ready.
* @param results: an ordered list of the most likely results (N-best list).
* @param key: a key associated with the results. The same results can
* be retrieved asynchronously later using the key, if available.
*/
void onResults(in List<RecognitionResult> results, long key);
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.content.Intent;
import android.speech.IRecognitionListener;
import android.speech.RecognitionResult;
// A Service interface to speech recognition. Call startListening when
// you want to begin capturing audio; RecognitionService will automatically
// determine when the user has finished speaking, stream the audio to the
// recognition servers, and notify you when results are ready.
/** {@hide} */
interface IRecognitionService {
// Start listening for speech. Can only call this from one thread at once.
// see RecognizerIntent.java for constants used to specify the intent.
void startListening(in Intent recognizerIntent,
in IRecognitionListener listener);
List<RecognitionResult> getRecognitionResults(in long key);
void cancel();
}

View File

@@ -0,0 +1,19 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
parcelable RecognitionResult;

View File

@@ -0,0 +1,220 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.os.Parcel;
import android.os.Parcelable;
/**
* RecognitionResult is a passive object that stores a single recognized query
* and its search result.
*
* TODO: Revisit and improve this class, reconciling the different types of actions and
* the different ways they are represented. Maybe we should have a separate result object
* for each type, and put them (type/value) in bundle?
* {@hide}
*/
public class RecognitionResult implements Parcelable {
/**
* Status of the recognize request.
*/
public static final int NETWORK_TIMEOUT = 1; // Network operation timed out.
public static final int NETWORK_ERROR = 2; // Other network related errors.
public static final int AUDIO_ERROR = 3; // Audio recording error.
public static final int SERVER_ERROR = 4; // Server sends error status.
public static final int CLIENT_ERROR = 5; // Other client side errors.
public static final int SPEECH_TIMEOUT = 6; // No speech input
public static final int NO_MATCH = 7; // No recognition result matched.
public static final int SERVICE_BUSY = 8; // RecognitionService busy.
/**
* Type of the recognition results.
*/
public static final int RAW_RECOGNITION_RESULT = 0;
public static final int WEB_SEARCH_RESULT = 1;
public static final int CONTACT_RESULT = 2;
public static final int ACTION_RESULT = 3;
/**
* A factory method to create a raw RecognitionResult
*
* @param sentence the recognized text.
*/
public static RecognitionResult newRawRecognitionResult(String sentence) {
return new RecognitionResult(RAW_RECOGNITION_RESULT, sentence, null, null);
}
/**
* A factory method to create a RecognitionResult for contacts.
*
* @param contact the contact name.
* @param phoneType the phone type.
* @param callAction whether this result included a command to "call", or
* just the contact name.
*/
public static RecognitionResult newContactResult(String contact, int phoneType,
boolean callAction) {
return new RecognitionResult(CONTACT_RESULT, contact, phoneType, callAction);
}
/**
* A factory method to create a RecognitionResult for a web search query.
*
* @param query the query string.
* @param html the html page of the search result.
* @param url the url that performs the search with the query.
*/
public static RecognitionResult newWebResult(String query, String html, String url) {
return new RecognitionResult(WEB_SEARCH_RESULT, query, html, url);
}
/**
* A factory method to create a RecognitionResult for an action.
*
* @param action the action type
* @param query the query string associated with that action.
*/
public static RecognitionResult newActionResult(int action, String query) {
return new RecognitionResult(ACTION_RESULT, action, query);
}
public static final Parcelable.Creator<RecognitionResult> CREATOR =
new Parcelable.Creator<RecognitionResult>() {
public RecognitionResult createFromParcel(Parcel in) {
return new RecognitionResult(in);
}
public RecognitionResult[] newArray(int size) {
return new RecognitionResult[size];
}
};
/**
* Result type.
*/
public final int mResultType;
/**
* The recognized string when mResultType is WEB_SEARCH_RESULT. The name of
* the contact when mResultType is CONTACT_RESULT. The relevant query when
* mResultType is ACTION_RESULT.
*/
public final String mText;
/**
* The HTML result page for the query. If this is null, then the application
* must use the url field to get the HTML result page.
*/
public final String mHtml;
/**
* The url to get the result page for the query string. The application must
* use this url instead of performing the search with the query.
*/
public final String mUrl;
/**
* Phone number type. This is valid only when mResultType == CONTACT_RESULT.
*/
public final int mPhoneType;
/**
* Action type. This is valid only when mResultType == ACTION_RESULT.
*/
public final int mAction;
/**
* Whether a contact recognition result included a command to "call". This
* is valid only when mResultType == CONTACT_RESULT.
*/
public final boolean mCallAction;
private RecognitionResult(int type, int action, String query) {
mResultType = type;
mAction = action;
mText = query;
mHtml = null;
mUrl = null;
mPhoneType = -1;
mCallAction = false;
}
private RecognitionResult(int type, String query, String html, String url) {
mResultType = type;
mText = query;
mHtml = html;
mUrl = url;
mPhoneType = -1;
mAction = -1;
mCallAction = false;
}
private RecognitionResult(int type, String query, int phoneType, boolean callAction) {
mResultType = type;
mText = query;
mPhoneType = phoneType;
mHtml = null;
mUrl = null;
mAction = -1;
mCallAction = callAction;
}
private RecognitionResult(Parcel in) {
mResultType = in.readInt();
mText = in.readString();
mHtml = in.readString();
mUrl = in.readString();
mPhoneType = in.readInt();
mAction = in.readInt();
mCallAction = (in.readInt() == 1);
}
public void writeToParcel(Parcel out, int flags) {
out.writeInt(mResultType);
out.writeString(mText);
out.writeString(mHtml);
out.writeString(mUrl);
out.writeInt(mPhoneType);
out.writeInt(mAction);
out.writeInt(mCallAction ? 1 : 0);
}
@Override
public String toString() {
String resultType[] = {
"RAW", "WEB", "CONTACT", "ACTION"
};
return "[type=" + resultType[mResultType] + ", text=" + mText + ", mUrl=" + mUrl
+ ", html=" + mHtml + ", mAction=" + mAction + ", mCallAction=" + mCallAction + "]";
}
public int describeContents() {
// no special description
return 0;
}
}

View File

@@ -0,0 +1,101 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.speech;
import android.content.ComponentName;
import android.content.Intent;
import android.content.ServiceConnection;
import android.os.Bundle;
import android.os.IBinder;
import android.os.RemoteException;
import android.speech.RecognitionResult;
import android.util.Log;
import java.util.List;
/**
* Utils for Google's network-based speech recognizer, which lets you perform
* speech-to-text translation through RecognitionService. IRecognitionService
* and IRecognitionListener are the core interfaces; you begin recognition
* through IRecognitionService and subscribe to callbacks about when the user
* stopped speaking, results come in, errors, etc. through IRecognitionListener.
* RecognitionServiceUtil includes default IRecognitionListener and
* ServiceConnection implementations to reduce the amount of boilerplate.
*
* The Service provides no user interface. See RecognitionActivity if you
* want the standard voice search UI.
*
* Below is a small skeleton of how to use the recognizer:
*
* ServiceConnection conn = new RecognitionServiceUtil.Connection();
* mContext.bindService(RecognitionServiceUtil.sDefaultIntent,
* conn, Context.BIND_AUTO_CREATE);
* IRecognitionListener listener = new RecognitionServiceWrapper.NullListener() {
* public void onResults(List<String> results) {
* // Do something with recognition transcripts
* }
* }
*
* // Must wait for conn.mService to be populated, then call below
* conn.mService.startListening(null, listener);
*
* {@hide}
*/
public class RecognitionServiceUtil {
public static final Intent sDefaultIntent = new Intent(
RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
// Recognize request parameters
public static final String USE_LOCATION = "useLocation";
public static final String CONTACT_AUTH_TOKEN = "contactAuthToken";
// Bundles
public static final String NOISE_LEVEL = "NoiseLevel";
public static final String SIGNAL_NOISE_RATIO = "SignalNoiseRatio";
private RecognitionServiceUtil() {}
/**
* IRecognitionListener which does nothing in response to recognition
* callbacks. You can subclass from this and override only the methods
* whose events you want to respond to.
*/
public static class NullListener extends IRecognitionListener.Stub {
public void onReadyForSpeech(Bundle bundle) {}
public void onBeginningOfSpeech() {}
public void onRmsChanged(float rmsdB) {}
public void onBufferReceived(byte[] buf) {}
public void onEndOfSpeech() {}
public void onError(int error) {}
public void onResults(List<RecognitionResult> results, long key) {}
}
/**
* Basic ServiceConnection which just records mService variable.
*/
public static class Connection implements ServiceConnection {
public IRecognitionService mService;
public synchronized void onServiceConnected(ComponentName name, IBinder service) {
mService = IRecognitionService.Stub.asInterface(service);
}
public void onServiceDisconnected(ComponentName name) {
mService = null;
}
}
}