• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.speech;
18 
19 import android.app.Activity;
20 import android.content.ActivityNotFoundException;
21 import android.content.BroadcastReceiver;
22 import android.content.ComponentName;
23 import android.content.Context;
24 import android.content.Intent;
25 import android.content.pm.PackageManager;
26 import android.content.pm.ResolveInfo;
27 import android.os.Bundle;
28 
29 import java.util.ArrayList;
30 
31 /**
32  * Constants for supporting speech recognition through starting an {@link Intent}
33  */
34 public class RecognizerIntent {
35 
RecognizerIntent()36     private RecognizerIntent() {
37         // Not for instantiating.
38     }
39 
40     /**
41      * Starts an activity that will prompt the user for speech and send it through a
42      * speech recognizer.  The results will be returned via activity results (in
43      * {@link Activity#onActivityResult}, if you start the intent using
44      * {@link Activity#startActivityForResult(Intent, int)}), or forwarded via a PendingIntent
45      * if one is provided.
46      *
47      * <p>Starting this intent with just {@link Activity#startActivity(Intent)} is not supported.
48      * You must either use {@link Activity#startActivityForResult(Intent, int)}, or provide a
49      * PendingIntent, to receive recognition results.
50      *
51      * <p>The implementation of this API is likely to stream audio to remote servers to perform
52      * speech recognition which can use a substantial amount of bandwidth.
53      *
54      * <p>Required extras:
55      * <ul>
56      *   <li>{@link #EXTRA_LANGUAGE_MODEL}
57      * </ul>
58      *
59      * <p>Optional extras:
60      * <ul>
61      *   <li>{@link #EXTRA_PROMPT}
62      *   <li>{@link #EXTRA_LANGUAGE}
63      *   <li>{@link #EXTRA_MAX_RESULTS}
64      *   <li>{@link #EXTRA_RESULTS_PENDINGINTENT}
65      *   <li>{@link #EXTRA_RESULTS_PENDINGINTENT_BUNDLE}
66      * </ul>
67      *
68      * <p> Result extras (returned in the result, not to be specified in the request):
69      * <ul>
70      *   <li>{@link #EXTRA_RESULTS}
71      * </ul>
72      *
73      * <p>NOTE: There may not be any applications installed to handle this action, so you should
74      * make sure to catch {@link ActivityNotFoundException}.
75      */
76     public static final String ACTION_RECOGNIZE_SPEECH = "android.speech.action.RECOGNIZE_SPEECH";
77 
78     /**
79      * Starts an activity that will prompt the user for speech, send it through a
80      * speech recognizer, and either display a web search result or trigger
81      * another type of action based on the user's speech.
82      *
83      * <p>If you want to avoid triggering any type of action besides web search, you can use
84      * the {@link #EXTRA_WEB_SEARCH_ONLY} extra.
85      *
86      * <p>Required extras:
87      * <ul>
88      *   <li>{@link #EXTRA_LANGUAGE_MODEL}
89      * </ul>
90      *
91      * <p>Optional extras:
92      * <ul>
93      *   <li>{@link #EXTRA_PROMPT}
94      *   <li>{@link #EXTRA_LANGUAGE}
95      *   <li>{@link #EXTRA_MAX_RESULTS}
96      *   <li>{@link #EXTRA_PARTIAL_RESULTS}
97      *   <li>{@link #EXTRA_WEB_SEARCH_ONLY}
98      *   <li>{@link #EXTRA_ORIGIN}
99      * </ul>
100      *
101      * <p> Result extras (returned in the result, not to be specified in the request):
102      * <ul>
103      *   <li>{@link #EXTRA_RESULTS}
104      *   <li>{@link #EXTRA_CONFIDENCE_SCORES} (optional)
105      * </ul>
106      *
107      * <p>NOTE: There may not be any applications installed to handle this action, so you should
108      * make sure to catch {@link ActivityNotFoundException}.
109      */
110     public static final String ACTION_WEB_SEARCH = "android.speech.action.WEB_SEARCH";
111 
112     /**
113      * Starts an activity that will prompt the user for speech without requiring the user's
114      * visual attention or touch input. It will send it through a speech recognizer,
115      * and either synthesize speech for a web search result or trigger
116      * another type of action based on the user's speech.
117      *
118      * This activity may be launched while device is locked in a secure mode.
119      * Special care must be taken to ensure that the voice actions that are performed while
120      * hands free cannot compromise the device's security.
121      * The activity should check the value of the {@link #EXTRA_SECURE} extra to determine
122      * whether the device has been securely locked. If so, the activity should either restrict
123      * the set of voice actions that are permitted or require some form of secure
124      * authentication before proceeding.
125      *
126      * To ensure that the activity's user interface is visible while the lock screen is showing,
127      * the activity should set the
128      * {@link android.view.WindowManager.LayoutParams#FLAG_SHOW_WHEN_LOCKED} window flag.
129      * Otherwise the activity's user interface may be hidden by the lock screen. The activity
130      * should take care not to leak private information when the device is securely locked.
131      *
132      * <p>Optional extras:
133      * <ul>
134      *   <li>{@link #EXTRA_SECURE}
135      * </ul>
136      *
137      * <p class="note">
138      * In some cases, a matching Activity may not exist, so ensure you
139      * safeguard against this.
140      */
141     public static final String ACTION_VOICE_SEARCH_HANDS_FREE =
142             "android.speech.action.VOICE_SEARCH_HANDS_FREE";
143 
144     /**
145      * Optional {@link android.os.ParcelFileDescriptor} pointing to an already opened audio
146      * source for the recognizer to use. The caller of the recognizer is responsible for closing
147      * the audio. If this extra is not set or the recognizer does not support this feature, the
148      * recognizer will open the mic for audio and close it when the recognition is finished.
149      *
150      * <p>Along with this extra, please send {@link #EXTRA_AUDIO_SOURCE_CHANNEL_COUNT},
151      * {@link #EXTRA_AUDIO_SOURCE_ENCODING}, and {@link #EXTRA_AUDIO_SOURCE_SAMPLING_RATE}
152      * extras, otherwise the default values of these extras will be used.
153      *
154      * <p>Additionally, {@link #EXTRA_ENABLE_BIASING_DEVICE_CONTEXT} may have no effect when this
155      * extra is set.
156      *
157      * <p>This can also be used as the string value for {@link #EXTRA_SEGMENTED_SESSION} to
158      * enable segmented session mode. The audio must be passed in using this extra. The
159      * recognition session will end when and only when the audio is closed.
160      *
161      * @see #EXTRA_SEGMENTED_SESSION
162      */
163     public static final String EXTRA_AUDIO_SOURCE = "android.speech.extra.AUDIO_SOURCE";
164 
165     /**
166      * Optional integer, to be used with {@link #EXTRA_AUDIO_SOURCE}, to indicate the number of
167      * channels in the audio. The default value is 1.
168      */
169     public static final String EXTRA_AUDIO_SOURCE_CHANNEL_COUNT =
170             "android.speech.extra.AUDIO_SOURCE_CHANNEL_COUNT";
171 
172     /**
173      * Optional integer (from {@link android.media.AudioFormat}), to be used with
174      * {@link #EXTRA_AUDIO_SOURCE}, to indicate the audio encoding. The default value is
175      * {@link android.media.AudioFormat#ENCODING_PCM_16BIT}.
176      */
177     public static final String EXTRA_AUDIO_SOURCE_ENCODING =
178             "android.speech.extra.AUDIO_SOURCE_ENCODING";
179 
180     /**
181      * Optional integer, to be used with {@link #EXTRA_AUDIO_SOURCE}, to indicate the sampling
182      * rate of the audio. The default value is 16000.
183      */
184     public static final String EXTRA_AUDIO_SOURCE_SAMPLING_RATE =
185             "android.speech.extra.AUDIO_SOURCE_SAMPLING_RATE";
186 
187     /**
188      * Optional boolean to enable biasing towards device context. The recognizer will use the
189      * device context to tune the recognition results.
190      *
191      * <p>Depending on the recognizer implementation, this value may have no effect.
192      */
193     public static final String EXTRA_ENABLE_BIASING_DEVICE_CONTEXT =
194             "android.speech.extra.ENABLE_BIASING_DEVICE_CONTEXT";
195 
196     /**
197      * Optional list of strings, towards which the recognizer should bias the recognition results.
198      * These are separate from the device context.
199      */
200     public static final String EXTRA_BIASING_STRINGS = "android.speech.extra.BIASING_STRINGS";
201 
202     /**
203      * Optional string to enable text formatting (e.g. unspoken punctuation (examples: question
204      * mark, comma, period, etc.), capitalization, etc.) and specify the optimization strategy.
205      * If set, the partial and final result texts will be formatted. Each result list will
206      * contain two hypotheses in the order of 1) formatted text 2) raw text.
207      *
208      * <p>Depending on the recognizer implementation, this value may have no effect.
209      *
210      * @see #FORMATTING_OPTIMIZE_QUALITY
211      * @see #FORMATTING_OPTIMIZE_LATENCY
212      */
213     public static final String EXTRA_ENABLE_FORMATTING = "android.speech.extra.ENABLE_FORMATTING";
214 
215     /**
216      * Optimizes formatting quality. This will increase latency but provide the highest
217      * punctuation quality. This is a value to use for {@link #EXTRA_ENABLE_FORMATTING}.
218      *
219      * @see #EXTRA_ENABLE_FORMATTING
220      */
221     public static final String FORMATTING_OPTIMIZE_QUALITY = "quality";
222     /**
223      * Optimizes formatting latency. This will result in a slightly lower quality of punctuation
224      * but can improve the experience for real-time use cases. This is a value to use for
225      * {@link #EXTRA_ENABLE_FORMATTING}.
226      *
227      * @see #EXTRA_ENABLE_FORMATTING
228      */
229     public static final String FORMATTING_OPTIMIZE_LATENCY = "latency";
230 
231     /**
232      * Optional boolean, to be used with {@link #EXTRA_ENABLE_FORMATTING}, to prevent the
233      * recognizer adding punctuation after the last word of the partial results. The default is
234      * false.
235      */
236     public static final String EXTRA_HIDE_PARTIAL_TRAILING_PUNCTUATION =
237             "android.speech.extra.HIDE_PARTIAL_TRAILING_PUNCTUATION";
238 
239     /**
240      * Optional boolean indicating whether the recognizer should mask the offensive words in
241      * recognition results. The Default is true.
242      */
243     public static final String EXTRA_MASK_OFFENSIVE_WORDS =
244             "android.speech.extra.MASK_OFFENSIVE_WORDS";
245 
246     /**
247      * The extra key used in an intent to the speech recognizer for voice search. Not
248      * generally to be used by developers. The system search dialog uses this, for example,
249      * to set a calling package for identification by a voice search API. If this extra
250      * is set by anyone but the system process, it should be overridden by the voice search
251      * implementation.
252      */
253     public static final String EXTRA_CALLING_PACKAGE = "calling_package";
254 
255     /**
256      * The extra key used in an intent which is providing an already opened audio source for the
257      * RecognitionService to use. Data should be a URI to an audio resource.
258      *
259      * <p>Depending on the recognizer implementation, this value may have no effect.
260      *
261      * @deprecated Replaced with {@link #EXTRA_AUDIO_SOURCE}
262      */
263     @Deprecated
264     public static final String EXTRA_AUDIO_INJECT_SOURCE =
265             "android.speech.extra.AUDIO_INJECT_SOURCE";
266 
267     /**
268      * Optional boolean to indicate that a "hands free" voice search was performed while the device
269      * was in a secure mode. An example of secure mode is when the device's screen lock is active,
270      * and it requires some form of authentication to be unlocked.
271      *
272      * When the device is securely locked, the voice search activity should either restrict
273      * the set of voice actions that are permitted, or require some form of secure authentication
274      * before proceeding.
275      */
276     public static final String EXTRA_SECURE = "android.speech.extras.EXTRA_SECURE";
277 
278     /**
279      * Optional integer to indicate the minimum length of the recognition session. The recognizer
280      * will not stop recognizing speech before this amount of time.
281      *
282      * <p>Note that it is extremely rare you'd want to specify this value in an intent.
283      * Generally, it should be specified only when it is also used as the value for
284      * {@link #EXTRA_SEGMENTED_SESSION} to enable segmented session mode. Note also that certain
285      * values may cause undesired or unexpected results - use judiciously!
286      *
287      * <p>Depending on the recognizer implementation, these values may have no effect.
288      */
289     public static final String EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS =
290             "android.speech.extras.SPEECH_INPUT_MINIMUM_LENGTH_MILLIS";
291 
292     /**
293      * The amount of time that it should take after the recognizer stops hearing speech to
294      * consider the input complete hence end the recognition session.
295      *
296      * <p>Note that it is extremely rare you'd want to specify this value in an intent.
297      * Generally, it should be specified only when it is also used as the value for
298      * {@link #EXTRA_SEGMENTED_SESSION} to enable segmented session mode. Note also that certain
299      * values may cause undesired or unexpected results - use judiciously!
300      *
301      * <p>Depending on the recognizer implementation, these values may have no effect.
302      */
303     public static final String EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS =
304             "android.speech.extras.SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS";
305 
306     /**
307      * The amount of time that it should take after we stop hearing speech to consider the input
308      * possibly complete. This is used to prevent the endpointer cutting off during very short
309      * mid-speech pauses.
310      *
311      * Note that it is extremely rare you'd want to specify this value in an intent. If
312      * you don't have a very good reason to change these, you should leave them as they are. Note
313      * also that certain values may cause undesired or unexpected results - use judiciously!
314      * Additionally, depending on the recognizer implementation, these values may have no effect.
315      */
316     public static final String EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS =
317             "android.speech.extras.SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS";
318 
319     /**
320      * Informs the recognizer which speech model to prefer when performing
321      * {@link #ACTION_RECOGNIZE_SPEECH}. The recognizer uses this
322      * information to fine tune the results. This extra is required. Activities implementing
323      * {@link #ACTION_RECOGNIZE_SPEECH} may interpret the values as they see fit.
324      *
325      *  @see #LANGUAGE_MODEL_FREE_FORM
326      *  @see #LANGUAGE_MODEL_WEB_SEARCH
327      */
328     public static final String EXTRA_LANGUAGE_MODEL = "android.speech.extra.LANGUAGE_MODEL";
329 
330     /**
331      * Use a language model based on free-form speech recognition.  This is a value to use for
332      * {@link #EXTRA_LANGUAGE_MODEL}.
333      * @see #EXTRA_LANGUAGE_MODEL
334      */
335     public static final String LANGUAGE_MODEL_FREE_FORM = "free_form";
336     /**
337      * Use a language model based on web search terms.  This is a value to use for
338      * {@link #EXTRA_LANGUAGE_MODEL}.
339      * @see #EXTRA_LANGUAGE_MODEL
340      */
341     public static final String LANGUAGE_MODEL_WEB_SEARCH = "web_search";
342 
343     /** Optional text prompt to show to the user when asking them to speak. */
344     public static final String EXTRA_PROMPT = "android.speech.extra.PROMPT";
345 
346     /**
347      * Optional IETF language tag (as defined by BCP 47), for example "en-US". This tag informs the
348      * recognizer to perform speech recognition in a language different than the one set in the
349      * {@link java.util.Locale#getDefault()}.
350      */
351     public static final String EXTRA_LANGUAGE = "android.speech.extra.LANGUAGE";
352 
353     /**
354      * Optional value which can be used to indicate the referer url of a page in which
355      * speech was requested. For example, a web browser may choose to provide this for
356      * uses of speech on a given page.
357      */
358     public static final String EXTRA_ORIGIN = "android.speech.extra.ORIGIN";
359 
360     /**
361      * Optional limit on the maximum number of results to return. If omitted the recognizer
362      * will choose how many results to return. Must be an integer.
363      */
364     public static final String EXTRA_MAX_RESULTS = "android.speech.extra.MAX_RESULTS";
365 
366     /**
367      * Optional boolean, to be used with {@link #ACTION_WEB_SEARCH}, to indicate whether to
368      * only fire web searches in response to a user's speech. The default is false, meaning
369      * that other types of actions can be taken based on the user's speech.
370      */
371     public static final String EXTRA_WEB_SEARCH_ONLY = "android.speech.extra.WEB_SEARCH_ONLY";
372 
373     /**
374      * Optional boolean to indicate whether partial results should be returned by the recognizer
375      * as the user speaks (default is false).  The server may ignore a request for partial
376      * results in some or all cases.
377      */
378     public static final String EXTRA_PARTIAL_RESULTS = "android.speech.extra.PARTIAL_RESULTS";
379 
380     /**
381      * When the intent is {@link #ACTION_RECOGNIZE_SPEECH}, the speech input activity will
382      * return results to you via the activity results mechanism.  Alternatively, if you use this
383      * extra to supply a PendingIntent, the results will be added to its bundle and the
384      * PendingIntent will be sent to its target.
385      */
386     public static final String EXTRA_RESULTS_PENDINGINTENT =
387             "android.speech.extra.RESULTS_PENDINGINTENT";
388 
389     /**
390      * If you use {@link #EXTRA_RESULTS_PENDINGINTENT} to supply a forwarding intent, you can
391      * also use this extra to supply additional extras for the final intent.  The search results
392      * will be added to this bundle, and the combined bundle will be sent to the target.
393      */
394     public static final String EXTRA_RESULTS_PENDINGINTENT_BUNDLE =
395             "android.speech.extra.RESULTS_PENDINGINTENT_BUNDLE";
396 
397     /** Result code returned when no matches are found for the given speech */
398     public static final int RESULT_NO_MATCH = Activity.RESULT_FIRST_USER;
399     /** Result code returned when there is a generic client error */
400     public static final int RESULT_CLIENT_ERROR = Activity.RESULT_FIRST_USER + 1;
401     /** Result code returned when the recognition server returns an error */
402     public static final int RESULT_SERVER_ERROR = Activity.RESULT_FIRST_USER + 2;
403     /** Result code returned when a network error was encountered */
404     public static final int RESULT_NETWORK_ERROR = Activity.RESULT_FIRST_USER + 3;
405     /** Result code returned when an audio error was encountered */
406     public static final int RESULT_AUDIO_ERROR = Activity.RESULT_FIRST_USER + 4;
407 
408     /**
409      * An ArrayList&lt;String&gt; of the recognition results when performing
410      * {@link #ACTION_RECOGNIZE_SPEECH}. Generally this list should be ordered in
411      * descending order of speech recognizer confidence. (See {@link #EXTRA_CONFIDENCE_SCORES}).
412      * Returned in the results; not to be specified in the recognition request. Only present
413      * when {@link Activity#RESULT_OK} is returned in an activity result. In a PendingIntent,
414      * the lack of this extra indicates failure.
415      */
416     public static final String EXTRA_RESULTS = "android.speech.extra.RESULTS";
417 
418     /**
419      * A float array of confidence scores of the recognition results when performing
420      * {@link #ACTION_RECOGNIZE_SPEECH}. The array should be the same size as the ArrayList
421      * returned in {@link #EXTRA_RESULTS}, and should contain values ranging from 0.0 to 1.0,
422      * or -1 to represent an unavailable confidence score.
423      * <p>
424      * Confidence values close to 1.0 indicate high confidence (the speech recognizer is
425      * confident that the recognition result is correct), while values close to 0.0 indicate
426      * low confidence.
427      * <p>
428      * Returned in the results; not to be specified in the recognition request. This extra is
429      * optional and might not be provided. Only present when {@link Activity#RESULT_OK} is
430      * returned in an activity result.
431      */
432     public static final String EXTRA_CONFIDENCE_SCORES = "android.speech.extra.CONFIDENCE_SCORES";
433 
434     /**
435      * Returns the broadcast intent to fire with
436      * {@link Context#sendOrderedBroadcast(Intent, String, BroadcastReceiver, android.os.Handler, int, String, Bundle)}
437      * to receive details from the package that implements voice search.
438      * <p>
439      * This is based on the value specified by the voice search {@link Activity} in
440      * {@link #DETAILS_META_DATA}, and if this is not specified, will return null. Also if there
441      * is no chosen default to resolve for {@link #ACTION_WEB_SEARCH}, this will return null.
442      * <p>
443      * If an intent is returned and is fired, a {@link Bundle} of extras will be returned to the
444      * provided result receiver, and should ideally contain values for
445      * {@link #EXTRA_LANGUAGE_PREFERENCE} and {@link #EXTRA_SUPPORTED_LANGUAGES}.
446      * <p>
447      * (Whether these are actually provided is up to the particular implementation. It is
448      * recommended that {@link Activity}s implementing {@link #ACTION_WEB_SEARCH} provide this
449      * information, but it is not required.)
450      *
451      * @param context a context object
452      * @return the broadcast intent to fire or null if not available
453      */
getVoiceDetailsIntent(Context context)454     public static final Intent getVoiceDetailsIntent(Context context) {
455         Intent voiceSearchIntent = new Intent(ACTION_WEB_SEARCH);
456         ResolveInfo ri = context.getPackageManager().resolveActivity(
457                 voiceSearchIntent, PackageManager.GET_META_DATA);
458         if (ri == null || ri.activityInfo == null || ri.activityInfo.metaData == null) return null;
459 
460         String className = ri.activityInfo.metaData.getString(DETAILS_META_DATA);
461         if (className == null) return null;
462 
463         Intent detailsIntent = new Intent(ACTION_GET_LANGUAGE_DETAILS);
464         detailsIntent.setComponent(new ComponentName(ri.activityInfo.packageName, className));
465         return detailsIntent;
466     }
467 
468     /**
469      * Meta-data name under which an {@link Activity} implementing {@link #ACTION_WEB_SEARCH} can
470      * use to expose the class name of a {@link BroadcastReceiver} which can respond to request for
471      * more information, from any of the broadcast intents specified in this class.
472      * <p>
473      * Broadcast intents can be directed to the class name specified in the meta-data by creating
474      * an {@link Intent}, setting the component with
475      * {@link Intent#setComponent(android.content.ComponentName)}, and using
476      * {@link Context#sendOrderedBroadcast(Intent, String, BroadcastReceiver, android.os.Handler, int, String, android.os.Bundle)}
477      * with another {@link BroadcastReceiver} which can receive the results.
478      * <p>
479      * The {@link #getVoiceDetailsIntent(Context)} method is provided as a convenience to create
480      * a broadcast intent based on the value of this meta-data, if available.
481      * <p>
482      * This is optional and not all {@link Activity}s which implement {@link #ACTION_WEB_SEARCH}
483      * are required to implement this. Thus retrieving this meta-data may be null.
484      */
485     public static final String DETAILS_META_DATA = "android.speech.DETAILS";
486 
487     /**
488      * A broadcast intent which can be fired to the {@link BroadcastReceiver} component specified
489      * in the meta-data defined in the {@link #DETAILS_META_DATA} meta-data of an
490      * {@link Activity} satisfying {@link #ACTION_WEB_SEARCH}.
491      * <p>
492      * When fired with
493      * {@link Context#sendOrderedBroadcast(Intent, String, BroadcastReceiver, android.os.Handler, int, String, android.os.Bundle)},
494      * a {@link Bundle} of extras will be returned to the provided result receiver, and should
495      * ideally contain values for {@link #EXTRA_LANGUAGE_PREFERENCE} and
496      * {@link #EXTRA_SUPPORTED_LANGUAGES}.
497      * <p>
498      * (Whether these are actually provided is up to the particular implementation. It is
499      * recommended that {@link Activity}s implementing {@link #ACTION_WEB_SEARCH} provide this
500      * information, but it is not required.)
501      */
502     public static final String ACTION_GET_LANGUAGE_DETAILS =
503             "android.speech.action.GET_LANGUAGE_DETAILS";
504 
505     /**
506      * Specify this boolean extra in a broadcast of {@link #ACTION_GET_LANGUAGE_DETAILS} to
507      * indicate that only the current language preference is needed in the response. This
508      * avoids any additional computation if all you need is {@link #EXTRA_LANGUAGE_PREFERENCE}
509      * in the response.
510      */
511     public static final String EXTRA_ONLY_RETURN_LANGUAGE_PREFERENCE =
512             "android.speech.extra.ONLY_RETURN_LANGUAGE_PREFERENCE";
513 
514     /**
515      * The key to the extra in the {@link Bundle} returned by {@link #ACTION_GET_LANGUAGE_DETAILS}
516      * which is a {@link String} that represents the current language preference this user has
517      * specified - a locale string like "en-US".
518      */
519     public static final String EXTRA_LANGUAGE_PREFERENCE =
520             "android.speech.extra.LANGUAGE_PREFERENCE";
521 
522     /**
523      * The key to the extra in the {@link Bundle} returned by {@link #ACTION_GET_LANGUAGE_DETAILS}
524      * which is an {@link ArrayList} of {@link String}s that represents the languages supported by
525      * this implementation of voice recognition - a list of strings like "en-US", "cmn-Hans-CN",
526      * etc.
527      */
528     public static final String EXTRA_SUPPORTED_LANGUAGES =
529             "android.speech.extra.SUPPORTED_LANGUAGES";
530 
531     /**
532      * Optional boolean, to be used with {@link #ACTION_RECOGNIZE_SPEECH},
533      * {@link #ACTION_VOICE_SEARCH_HANDS_FREE}, {@link #ACTION_WEB_SEARCH} to indicate whether to
534      * only use an offline speech recognition engine. The default is false, meaning that either
535      * network or offline recognition engines may be used.
536      *
537      * <p>Depending on the recognizer implementation, these values may have
538      * no effect.</p>
539      *
540      */
541     public static final String EXTRA_PREFER_OFFLINE = "android.speech.extra.PREFER_OFFLINE";
542 
543     /**
544      * Optional string to enable segmented session mode of the specified type, which can be
545      * {@link #EXTRA_AUDIO_SOURCE}, {@link #EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS} or
546      * {@link #EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS}. When segmented session mode is
547      * supported by the recognizer implementation and this extra is set, it will return the
548      * recognition results in segments via {@link RecognitionListener#onSegmentResults(Bundle)}
549      * and terminate the session with {@link RecognitionListener#onEndOfSegmentedSession()}.
550      *
551      * <p>When setting this extra, make sure the extra used as the string value here is also set
552      * in the same intent with proper value.
553      *
554      * <p>Depending on the recognizer implementation, this value may have no effect.
555      *
556      * @see #EXTRA_AUDIO_SOURCE
557      * @see #EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS
558      * @see #EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS
559      */
560     public static final String EXTRA_SEGMENTED_SESSION = "android.speech.extra.SEGMENTED_SESSION";
561 }
562