<{@link android.R.styleable#RecognitionService recognition-service}> or
* <{@link android.R.styleable#RecognitionService on-device-recognition-service}
* > tag.
*/
public static final String SERVICE_META_DATA = "android.speech";
/** Log messages identifier */
private static final String TAG = "RecognitionService";
/** Debugging flag */
private static final boolean DBG = false;
private static final int DEFAULT_MAX_CONCURRENT_SESSIONS_COUNT = 1;
private final MapIf you are recognizing speech from the microphone, in this callback you * should create an attribution context for the caller such that when you access * the mic the caller would be properly blamed (and their permission checked in * the process) for accessing the microphone and that you served as a proxy for * this sensitive data (and your permissions would be checked in the process). * You should also open the mic in this callback via the attribution context * and close the mic before returning the recognized result. If you don't do * that then the caller would be blamed and you as being a proxy as well as you * would get one more blame on yourself when you open the microphone. * *
* Context attributionContext = context.createContext(new ContextParams.Builder()
* .setNextAttributionSource(callback.getCallingAttributionSource())
* .build());
*
* AudioRecord recorder = AudioRecord.Builder()
* .setContext(attributionContext);
* . . .
* .build();
*
* recorder.startRecording()
*
*
* @param recognizerIntent contains parameters for the recognition to be performed. The intent
* may also contain optional extras, see {@link RecognizerIntent}. If these values are
* not set explicitly, default values should be used by the recognizer.
* @param listener that will receive the service's callbacks
*/
protected abstract void onStartListening(Intent recognizerIntent, Callback listener);
/**
* Notifies the service that it should cancel the speech recognition.
*/
protected abstract void onCancel(Callback listener);
/**
* Notifies the service that it should stop listening for speech. Speech captured so far should
* be recognized as if the user had stopped speaking at this point. This method is only called
* if the application calls it explicitly.
*/
protected abstract void onStopListening(Callback listener);
/**
* Queries the service on whether it would support a {@link #onStartListening(Intent, Callback)}
* for the same {@code recognizerIntent}.
*
* The service will notify the caller about the level of support or error via * {@link SupportCallback}. * *
If the service does not offer the support check it will notify the caller with * {@link SpeechRecognizer#ERROR_CANNOT_CHECK_SUPPORT}. */ public void onCheckRecognitionSupport( @NonNull Intent recognizerIntent, @NonNull SupportCallback supportCallback) { if (DBG) { Log.i(TAG, String.format("#onSupports [%s]", recognizerIntent)); } supportCallback.onError(SpeechRecognizer.ERROR_CANNOT_CHECK_SUPPORT); } /** * Queries the service on whether it would support a {@link #onStartListening(Intent, Callback)} * for the same {@code recognizerIntent}. * *
The service will notify the caller about the level of support or error via * {@link SupportCallback}. * *
If the service does not offer the support check it will notify the caller with * {@link SpeechRecognizer#ERROR_CANNOT_CHECK_SUPPORT}. * *
Provides the calling AttributionSource to the service implementation so that permissions * and bandwidth could be correctly blamed.
*/ public void onCheckRecognitionSupport( @NonNull Intent recognizerIntent, @NonNull AttributionSource attributionSource, @NonNull SupportCallback supportCallback) { onCheckRecognitionSupport(recognizerIntent, supportCallback); } /** * Requests the download of the recognizer support for {@code recognizerIntent}. */ public void onTriggerModelDownload(@NonNull Intent recognizerIntent) { if (DBG) { Log.i(TAG, String.format("#downloadModel [%s]", recognizerIntent)); } } /** * Requests the download of the recognizer support for {@code recognizerIntent}. * *Provides the calling AttributionSource to the service implementation so that permissions * and bandwidth could be correctly blamed.
*/ public void onTriggerModelDownload( @NonNull Intent recognizerIntent, @NonNull AttributionSource attributionSource) { onTriggerModelDownload(recognizerIntent); } /** * Requests the download of the recognizer support for {@code recognizerIntent}. * *Provides the calling {@link AttributionSource} to the service implementation so that * permissions and bandwidth could be correctly blamed. * *
Client will receive the progress updates via the given {@link ModelDownloadListener}: * *
* The default value is 1, meaning concurrency should be enabled by overriding this method.
*/
public int getMaxConcurrentSessionsCount() {
return DEFAULT_MAX_CONCURRENT_SESSIONS_COUNT;
}
/**
* This class receives callbacks from the speech recognition service and forwards them to the
* user. An instance of this class is passed to the
* {@link RecognitionService#onStartListening(Intent, Callback)} method. Recognizers may call
* these methods on any thread.
*/
public class Callback {
private final IRecognitionListener mListener;
@NonNull private final AttributionSource mCallingAttributionSource;
@Nullable private Context mAttributionContext;
private boolean mAttributionContextCreated;
private Callback(IRecognitionListener listener,
@NonNull AttributionSource attributionSource) {
mListener = listener;
mCallingAttributionSource = attributionSource;
}
/**
* The service should call this method when the user has started to speak.
*/
public void beginningOfSpeech() throws RemoteException {
mListener.onBeginningOfSpeech();
}
/**
* The service should call this method when sound has been received. The purpose of this
* function is to allow giving feedback to the user regarding the captured audio.
*
* @param buffer a buffer containing a sequence of big-endian 16-bit integers representing a
* single channel audio stream. The sample rate is implementation dependent.
*/
public void bufferReceived(byte[] buffer) throws RemoteException {
mListener.onBufferReceived(buffer);
}
/**
* The service should call this method after the user stops speaking.
*/
public void endOfSpeech() throws RemoteException {
mListener.onEndOfSpeech();
}
/**
* The service should call this method when a network or recognition error occurred.
*
* @param error code is defined in {@link SpeechRecognizer}
*/
public void error(@SpeechRecognizer.RecognitionError int error) throws RemoteException {
Message.obtain(mHandler, MSG_RESET, mListener).sendToTarget();
mListener.onError(error);
}
/**
* The service should call this method when partial recognition results are available. This
* method can be called at any time between {@link #beginningOfSpeech()} and
* {@link #results(Bundle)} when partial results are ready. This method may be called zero,
* one or multiple times for each call to {@link SpeechRecognizer#startListening(Intent)},
* depending on the speech recognition service implementation.
*
* @param partialResults the returned results. To retrieve the results in
* ArrayList<String> format use {@link Bundle#getStringArrayList(String)} with
* {@link SpeechRecognizer#RESULTS_RECOGNITION} as a parameter
*/
public void partialResults(Bundle partialResults) throws RemoteException {
mListener.onPartialResults(partialResults);
}
/**
* The service should call this method when the endpointer is ready for the user to start
* speaking.
*
* @param params parameters set by the recognition service. Reserved for future use.
*/
public void readyForSpeech(Bundle params) throws RemoteException {
mListener.onReadyForSpeech(params);
}
/**
* The service should call this method when recognition results are ready.
*
* @param results the recognition results. To retrieve the results in {@code
* ArrayList To retrieve the most confidently detected language IETF tag
* (as defined by BCP 47, e.g., "en-US", "de-DE"),
* use {@link Bundle#getString(String)}
* with {@link SpeechRecognizer#DETECTED_LANGUAGE} as the parameter.
* To retrieve the language detection confidence level represented by a value
* prefixed by {@code LANGUAGE_DETECTION_CONFIDENCE_LEVEL_} defined in
* {@link SpeechRecognizer}, use {@link Bundle#getInt(String)} with
* {@link SpeechRecognizer#LANGUAGE_DETECTION_CONFIDENCE_LEVEL} as the parameter.
* To retrieve the alternative locales for the same language
* retrieved by the key {@link SpeechRecognizer#DETECTED_LANGUAGE},
* use {@link Bundle#getStringArrayList(String)}
* with {@link SpeechRecognizer#TOP_LOCALE_ALTERNATIVES} as the parameter.
* To retrieve the language switching results represented by a value
* prefixed by {@code LANGUAGE_SWITCH_RESULT_}
* and defined in {@link SpeechRecognizer}, use {@link Bundle#getInt(String)}
* with {@link SpeechRecognizer#LANGUAGE_SWITCH_RESULT} as the parameter.
*/
@SuppressLint("CallbackMethodName") // For consistency with existing methods.
public void languageDetection(@NonNull Bundle results) {
try {
mListener.onLanguageDetection(results);
} catch (RemoteException e) {
throw e.rethrowFromSystemServer();
}
}
/**
* Return the Linux uid assigned to the process that sent you the current transaction that
* is being processed. This is obtained from {@link Binder#getCallingUid()}.
*/
public int getCallingUid() {
return mCallingAttributionSource.getUid();
}
/**
* Gets the permission identity of the calling app. If you want to attribute
* the mic access to the calling app you can create an attribution context
* via {@link android.content.Context#createContext(android.content.ContextParams)}
* and passing this identity to {@link
* android.content.ContextParams.Builder#setNextAttributionSource(AttributionSource)}.
*
* @return The permission identity of the calling app.
*
* @see android.content.ContextParams.Builder#setNextAttributionSource(
* AttributionSource)
*/
@SuppressLint("CallbackMethodName")
@NonNull
public AttributionSource getCallingAttributionSource() {
return mCallingAttributionSource;
}
@NonNull Context getAttributionContextForCaller() {
if (mAttributionContext == null) {
mAttributionContext = createContext(new ContextParams.Builder()
.setNextAttributionSource(mCallingAttributionSource)
.build());
}
return mAttributionContext;
}
}
/**
* This class receives callbacks from the speech recognition service and forwards them to the
* user. An instance of this class is passed to the
* {@link RecognitionService#onCheckRecognitionSupport(Intent, SupportCallback)} method. Recognizers may call
* these methods on any thread.
*/
public static class SupportCallback {
private final IRecognitionSupportCallback mCallback;
private SupportCallback(
IRecognitionSupportCallback callback) {
this.mCallback = callback;
}
/** The service should call this method to notify the caller about the level of support. */
public void onSupportResult(@NonNull RecognitionSupport recognitionSupport) {
try {
mCallback.onSupportResult(recognitionSupport);
} catch (RemoteException e) {
throw e.rethrowFromSystemServer();
}
}
/**
* The service should call this method when an error occurred and can't satisfy the support
* request.
*
* @param errorCode code is defined in {@link SpeechRecognizer}
*/
public void onError(@SpeechRecognizer.RecognitionError int errorCode) {
try {
mCallback.onError(errorCode);
} catch (RemoteException e) {
throw e.rethrowFromSystemServer();
}
}
}
/** Binder of the recognition service. */
private static final class RecognitionServiceBinder extends IRecognitionService.Stub {
private final WeakReference
*