• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License"); you may not
5  * use this file except in compliance with the License. You may obtain a copy of
6  * the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13  * License for the specific language governing permissions and limitations under
14  * the License.
15  */
16 
17 package com.android.inputmethod.deprecated.voice;
18 
19 import com.android.inputmethod.latin.EditingUtils;
20 import com.android.inputmethod.latin.LatinImeLogger;
21 import com.android.inputmethod.latin.R;
22 import com.android.inputmethod.latin.StaticInnerHandlerWrapper;
23 
24 import android.content.ContentResolver;
25 import android.content.Context;
26 import android.content.Intent;
27 import android.content.res.Configuration;
28 import android.os.Build;
29 import android.os.Bundle;
30 import android.os.Message;
31 import android.os.Parcelable;
32 import android.speech.RecognitionListener;
33 import android.speech.RecognizerIntent;
34 import android.speech.SpeechRecognizer;
35 import android.util.Log;
36 import android.view.View;
37 import android.view.View.OnClickListener;
38 import android.view.inputmethod.InputConnection;
39 
40 import java.io.ByteArrayOutputStream;
41 import java.io.IOException;
42 import java.util.ArrayList;
43 import java.util.HashMap;
44 import java.util.List;
45 import java.util.Locale;
46 import java.util.Map;
47 
48 /**
49  * Speech recognition input, including both user interface and a background
50  * process to stream audio to the network recognizer. This class supplies a
51  * View (getView()), which it updates as recognition occurs. The user of this
52  * class is responsible for making the view visible to the user, as well as
53  * handling various events returned through UiListener.
54  */
55 public class VoiceInput implements OnClickListener {
56     private static final String TAG = "VoiceInput";
57     private static final String EXTRA_RECOGNITION_CONTEXT =
58             "android.speech.extras.RECOGNITION_CONTEXT";
59     private static final String EXTRA_CALLING_PACKAGE = "calling_package";
60     private static final String EXTRA_ALTERNATES = "android.speech.extra.ALTERNATES";
61     private static final int MAX_ALT_LIST_LENGTH = 6;
62     private static boolean DBG = LatinImeLogger.sDBG;
63 
64     private static final String DEFAULT_RECOMMENDED_PACKAGES =
65             "com.android.mms " +
66             "com.google.android.gm " +
67             "com.google.android.talk " +
68             "com.google.android.apps.googlevoice " +
69             "com.android.email " +
70             "com.android.browser ";
71 
72     // WARNING! Before enabling this, fix the problem with calling getExtractedText() in
73     // landscape view. It causes Extracted text updates to be rejected due to a token mismatch
74     public static boolean ENABLE_WORD_CORRECTIONS = true;
75 
76     // Dummy word suggestion which means "delete current word"
77     public static final String DELETE_SYMBOL = " \u00D7 ";  // times symbol
78 
79     private Whitelist mRecommendedList;
80     private Whitelist mBlacklist;
81 
82     private VoiceInputLogger mLogger;
83 
84     // Names of a few extras defined in VoiceSearch's RecognitionController
85     // Note, the version of voicesearch that shipped in Froyo returns the raw
86     // RecognitionClientAlternates protocol buffer under the key "alternates",
87     // so a VS market update must be installed on Froyo devices in order to see
88     // alternatives.
89     private static final String ALTERNATES_BUNDLE = "alternates_bundle";
90 
91     //  This is copied from the VoiceSearch app.
92     @SuppressWarnings("unused")
93     private static final class AlternatesBundleKeys {
94         public static final String ALTERNATES = "alternates";
95         public static final String CONFIDENCE = "confidence";
96         public static final String LENGTH = "length";
97         public static final String MAX_SPAN_LENGTH = "max_span_length";
98         public static final String SPANS = "spans";
99         public static final String SPAN_KEY_DELIMITER = ":";
100         public static final String START = "start";
101         public static final String TEXT = "text";
102     }
103 
104     // Names of a few intent extras defined in VoiceSearch's RecognitionService.
105     // These let us tweak the endpointer parameters.
106     private static final String EXTRA_SPEECH_MINIMUM_LENGTH_MILLIS =
107             "android.speech.extras.SPEECH_INPUT_MINIMUM_LENGTH_MILLIS";
108     private static final String EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS =
109             "android.speech.extras.SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS";
110     private static final String EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS =
111             "android.speech.extras.SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS";
112 
113     // The usual endpointer default value for input complete silence length is 0.5 seconds,
114     // but that's used for things like voice search. For dictation-like voice input like this,
115     // we go with a more liberal value of 1 second. This value will only be used if a value
116     // is not provided from Gservices.
117     private static final String INPUT_COMPLETE_SILENCE_LENGTH_DEFAULT_VALUE_MILLIS = "1000";
118 
119     // Used to record part of that state for logging purposes.
120     public static final int DEFAULT = 0;
121     public static final int LISTENING = 1;
122     public static final int WORKING = 2;
123     public static final int ERROR = 3;
124 
125     private int mAfterVoiceInputDeleteCount = 0;
126     private int mAfterVoiceInputInsertCount = 0;
127     private int mAfterVoiceInputInsertPunctuationCount = 0;
128     private int mAfterVoiceInputCursorPos = 0;
129     private int mAfterVoiceInputSelectionSpan = 0;
130 
131     private int mState = DEFAULT;
132 
133     private final static int MSG_RESET = 1;
134 
135     private final UIHandler mHandler = new UIHandler(this);
136 
137     private static class UIHandler extends StaticInnerHandlerWrapper<VoiceInput> {
UIHandler(VoiceInput outerInstance)138         public UIHandler(VoiceInput outerInstance) {
139             super(outerInstance);
140         }
141 
142         @Override
handleMessage(Message msg)143         public void handleMessage(Message msg) {
144             if (msg.what == MSG_RESET) {
145                 final VoiceInput voiceInput = getOuterInstance();
146                 voiceInput.mState = DEFAULT;
147                 voiceInput.mRecognitionView.finish();
148                 voiceInput.mUiListener.onCancelVoice();
149             }
150         }
151     };
152 
153     /**
154      * Events relating to the recognition UI. You must implement these.
155      */
156     public interface UiListener {
157 
158         /**
159          * @param recognitionResults a set of transcripts for what the user
160          *   spoke, sorted by likelihood.
161          */
onVoiceResults( List<String> recognitionResults, Map<String, List<CharSequence>> alternatives)162         public void onVoiceResults(
163             List<String> recognitionResults,
164             Map<String, List<CharSequence>> alternatives);
165 
166         /**
167          * Called when the user cancels speech recognition.
168          */
onCancelVoice()169         public void onCancelVoice();
170     }
171 
172     private SpeechRecognizer mSpeechRecognizer;
173     private RecognitionListener mRecognitionListener;
174     private RecognitionView mRecognitionView;
175     private UiListener mUiListener;
176     private Context mContext;
177 
178     /**
179      * @param context the service or activity in which we're running.
180      * @param uiHandler object to receive events from VoiceInput.
181      */
VoiceInput(Context context, UiListener uiHandler)182     public VoiceInput(Context context, UiListener uiHandler) {
183         mLogger = VoiceInputLogger.getLogger(context);
184         mRecognitionListener = new ImeRecognitionListener();
185         mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(context);
186         mSpeechRecognizer.setRecognitionListener(mRecognitionListener);
187         mUiListener = uiHandler;
188         mContext = context;
189         newView();
190 
191         String recommendedPackages = SettingsUtil.getSettingsString(
192                 context.getContentResolver(),
193                 SettingsUtil.LATIN_IME_VOICE_INPUT_RECOMMENDED_PACKAGES,
194                 DEFAULT_RECOMMENDED_PACKAGES);
195 
196         mRecommendedList = new Whitelist();
197         for (String recommendedPackage : recommendedPackages.split("\\s+")) {
198             mRecommendedList.addApp(recommendedPackage);
199         }
200 
201         mBlacklist = new Whitelist();
202         mBlacklist.addApp("com.google.android.setupwizard");
203     }
204 
setCursorPos(int pos)205     public void setCursorPos(int pos) {
206         mAfterVoiceInputCursorPos = pos;
207     }
208 
getCursorPos()209     public int getCursorPos() {
210         return mAfterVoiceInputCursorPos;
211     }
212 
setSelectionSpan(int span)213     public void setSelectionSpan(int span) {
214         mAfterVoiceInputSelectionSpan = span;
215     }
216 
getSelectionSpan()217     public int getSelectionSpan() {
218         return mAfterVoiceInputSelectionSpan;
219     }
220 
incrementTextModificationDeleteCount(int count)221     public void incrementTextModificationDeleteCount(int count){
222         mAfterVoiceInputDeleteCount += count;
223         // Send up intents for other text modification types
224         if (mAfterVoiceInputInsertCount > 0) {
225             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
226             mAfterVoiceInputInsertCount = 0;
227         }
228         if (mAfterVoiceInputInsertPunctuationCount > 0) {
229             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
230             mAfterVoiceInputInsertPunctuationCount = 0;
231         }
232 
233     }
234 
incrementTextModificationInsertCount(int count)235     public void incrementTextModificationInsertCount(int count){
236         mAfterVoiceInputInsertCount += count;
237         if (mAfterVoiceInputSelectionSpan > 0) {
238             // If text was highlighted before inserting the char, count this as
239             // a delete.
240             mAfterVoiceInputDeleteCount += mAfterVoiceInputSelectionSpan;
241         }
242         // Send up intents for other text modification types
243         if (mAfterVoiceInputDeleteCount > 0) {
244             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
245             mAfterVoiceInputDeleteCount = 0;
246         }
247         if (mAfterVoiceInputInsertPunctuationCount > 0) {
248             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
249             mAfterVoiceInputInsertPunctuationCount = 0;
250         }
251     }
252 
incrementTextModificationInsertPunctuationCount(int count)253     public void incrementTextModificationInsertPunctuationCount(int count){
254         mAfterVoiceInputInsertPunctuationCount += count;
255         if (mAfterVoiceInputSelectionSpan > 0) {
256             // If text was highlighted before inserting the char, count this as
257             // a delete.
258             mAfterVoiceInputDeleteCount += mAfterVoiceInputSelectionSpan;
259         }
260         // Send up intents for aggregated non-punctuation insertions
261         if (mAfterVoiceInputDeleteCount > 0) {
262             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
263             mAfterVoiceInputDeleteCount = 0;
264         }
265         if (mAfterVoiceInputInsertCount > 0) {
266             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
267             mAfterVoiceInputInsertCount = 0;
268         }
269     }
270 
flushAllTextModificationCounters()271     public void flushAllTextModificationCounters() {
272         if (mAfterVoiceInputInsertCount > 0) {
273             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
274             mAfterVoiceInputInsertCount = 0;
275         }
276         if (mAfterVoiceInputDeleteCount > 0) {
277             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
278             mAfterVoiceInputDeleteCount = 0;
279         }
280         if (mAfterVoiceInputInsertPunctuationCount > 0) {
281             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
282             mAfterVoiceInputInsertPunctuationCount = 0;
283         }
284     }
285 
286     /**
287      * The configuration of the IME changed and may have caused the views to be layed out
288      * again. Restore the state of the recognition view.
289      */
onConfigurationChanged(Configuration configuration)290     public void onConfigurationChanged(Configuration configuration) {
291         mRecognitionView.restoreState();
292         mRecognitionView.getView().dispatchConfigurationChanged(configuration);
293     }
294 
295     /**
296      * @return true if field is blacklisted for voice
297      */
isBlacklistedField(FieldContext context)298     public boolean isBlacklistedField(FieldContext context) {
299         return mBlacklist.matches(context);
300     }
301 
302     /**
303      * Used to decide whether to show voice input hints for this field, etc.
304      *
305      * @return true if field is recommended for voice
306      */
isRecommendedField(FieldContext context)307     public boolean isRecommendedField(FieldContext context) {
308         return mRecommendedList.matches(context);
309     }
310 
311     /**
312      * Start listening for speech from the user. This will grab the microphone
313      * and start updating the view provided by getView(). It is the caller's
314      * responsibility to ensure that the view is visible to the user at this stage.
315      *
316      * @param context the same FieldContext supplied to voiceIsEnabled()
317      * @param swipe whether this voice input was started by swipe, for logging purposes
318      */
startListening(FieldContext context, boolean swipe)319     public void startListening(FieldContext context, boolean swipe) {
320         if (DBG) {
321             Log.d(TAG, "startListening: " + context);
322         }
323 
324         if (mState != DEFAULT) {
325             Log.w(TAG, "startListening in the wrong status " + mState);
326         }
327 
328         // If everything works ok, the voice input should be already in the correct state. As this
329         // class can be called by third-party, we call reset just to be on the safe side.
330         reset();
331 
332         Locale locale = Locale.getDefault();
333         String localeString = locale.getLanguage() + "-" + locale.getCountry();
334 
335         mLogger.start(localeString, swipe);
336 
337         mState = LISTENING;
338 
339         mRecognitionView.showInitializing();
340         startListeningAfterInitialization(context);
341     }
342 
343     /**
344      * Called only when the recognition manager's initialization completed
345      *
346      * @param context context with which {@link #startListening(FieldContext, boolean)} was executed
347      */
startListeningAfterInitialization(FieldContext context)348     private void startListeningAfterInitialization(FieldContext context) {
349         Intent intent = makeIntent();
350         intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "");
351         intent.putExtra(EXTRA_RECOGNITION_CONTEXT, context.getBundle());
352         intent.putExtra(EXTRA_CALLING_PACKAGE, "VoiceIME");
353         intent.putExtra(EXTRA_ALTERNATES, true);
354         intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS,
355                 SettingsUtil.getSettingsInt(
356                         mContext.getContentResolver(),
357                         SettingsUtil.LATIN_IME_MAX_VOICE_RESULTS,
358                         1));
359         // Get endpointer params from Gservices.
360         // TODO: Consider caching these values for improved performance on slower devices.
361         final ContentResolver cr = mContext.getContentResolver();
362         putEndpointerExtra(
363                 cr,
364                 intent,
365                 SettingsUtil.LATIN_IME_SPEECH_MINIMUM_LENGTH_MILLIS,
366                 EXTRA_SPEECH_MINIMUM_LENGTH_MILLIS,
367                 null  /* rely on endpointer default */);
368         putEndpointerExtra(
369                 cr,
370                 intent,
371                 SettingsUtil.LATIN_IME_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,
372                 EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,
373                 INPUT_COMPLETE_SILENCE_LENGTH_DEFAULT_VALUE_MILLIS
374                 /* our default value is different from the endpointer's */);
375         putEndpointerExtra(
376                 cr,
377                 intent,
378                 SettingsUtil.
379                         LATIN_IME_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS,
380                 EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS,
381                 null  /* rely on endpointer default */);
382 
383         mSpeechRecognizer.startListening(intent);
384     }
385 
386     /**
387      * Gets the value of the provided Gservices key, attempts to parse it into a long,
388      * and if successful, puts the long value as an extra in the provided intent.
389      */
putEndpointerExtra(ContentResolver cr, Intent i, String gservicesKey, String intentExtraKey, String defaultValue)390     private void putEndpointerExtra(ContentResolver cr, Intent i,
391             String gservicesKey, String intentExtraKey, String defaultValue) {
392         long l = -1;
393         String s = SettingsUtil.getSettingsString(cr, gservicesKey, defaultValue);
394         if (s != null) {
395             try {
396                 l = Long.valueOf(s);
397             } catch (NumberFormatException e) {
398                 Log.e(TAG, "could not parse value for " + gservicesKey + ": " + s);
399             }
400         }
401 
402         if (l != -1) i.putExtra(intentExtraKey, l);
403     }
404 
destroy()405     public void destroy() {
406         mSpeechRecognizer.destroy();
407     }
408 
409     /**
410      * Creates a new instance of the view that is returned by {@link #getView()}
411      * Clients should use this when a previously returned view is stuck in a
412      * layout that is being thrown away and a new one is need to show to the
413      * user.
414      */
newView()415     public void newView() {
416         mRecognitionView = new RecognitionView(mContext, this);
417     }
418 
419     /**
420      * @return a view that shows the recognition flow--e.g., "Speak now" and
421      * "working" dialogs.
422      */
getView()423     public View getView() {
424         return mRecognitionView.getView();
425     }
426 
427     /**
428      * Handle the cancel button.
429      */
430     @Override
onClick(View view)431     public void onClick(View view) {
432         switch(view.getId()) {
433             case R.id.button:
434                 cancel();
435                 break;
436         }
437     }
438 
logTextModifiedByTypingInsertion(int length)439     public void logTextModifiedByTypingInsertion(int length) {
440         mLogger.textModifiedByTypingInsertion(length);
441     }
442 
logTextModifiedByTypingInsertionPunctuation(int length)443     public void logTextModifiedByTypingInsertionPunctuation(int length) {
444         mLogger.textModifiedByTypingInsertionPunctuation(length);
445     }
446 
logTextModifiedByTypingDeletion(int length)447     public void logTextModifiedByTypingDeletion(int length) {
448         mLogger.textModifiedByTypingDeletion(length);
449     }
450 
logTextModifiedByChooseSuggestion(String suggestion, int index, String wordSeparators, InputConnection ic)451     public void logTextModifiedByChooseSuggestion(String suggestion, int index,
452                                                   String wordSeparators, InputConnection ic) {
453         String wordToBeReplaced = EditingUtils.getWordAtCursor(ic, wordSeparators);
454         // If we enable phrase-based alternatives, only send up the first word
455         // in suggestion and wordToBeReplaced.
456         mLogger.textModifiedByChooseSuggestion(suggestion.length(), wordToBeReplaced.length(),
457                                                index, wordToBeReplaced, suggestion);
458     }
459 
logKeyboardWarningDialogShown()460     public void logKeyboardWarningDialogShown() {
461         mLogger.keyboardWarningDialogShown();
462     }
463 
logKeyboardWarningDialogDismissed()464     public void logKeyboardWarningDialogDismissed() {
465         mLogger.keyboardWarningDialogDismissed();
466     }
467 
logKeyboardWarningDialogOk()468     public void logKeyboardWarningDialogOk() {
469         mLogger.keyboardWarningDialogOk();
470     }
471 
logKeyboardWarningDialogCancel()472     public void logKeyboardWarningDialogCancel() {
473         mLogger.keyboardWarningDialogCancel();
474     }
475 
logSwipeHintDisplayed()476     public void logSwipeHintDisplayed() {
477         mLogger.swipeHintDisplayed();
478     }
479 
logPunctuationHintDisplayed()480     public void logPunctuationHintDisplayed() {
481         mLogger.punctuationHintDisplayed();
482     }
483 
logVoiceInputDelivered(int length)484     public void logVoiceInputDelivered(int length) {
485         mLogger.voiceInputDelivered(length);
486     }
487 
logInputEnded()488     public void logInputEnded() {
489         mLogger.inputEnded();
490     }
491 
flushLogs()492     public void flushLogs() {
493         mLogger.flush();
494     }
495 
makeIntent()496     private static Intent makeIntent() {
497         Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
498 
499         // On Cupcake, use VoiceIMEHelper since VoiceSearch doesn't support.
500         // On Donut, always use VoiceSearch, since VoiceIMEHelper and
501         // VoiceSearch may conflict.
502         if (Build.VERSION.RELEASE.equals("1.5")) {
503             intent = intent.setClassName(
504               "com.google.android.voiceservice",
505               "com.google.android.voiceservice.IMERecognitionService");
506         } else {
507             intent = intent.setClassName(
508               "com.google.android.voicesearch",
509               "com.google.android.voicesearch.RecognitionService");
510         }
511 
512         return intent;
513     }
514 
515     /**
516      * Reset the current voice recognition.
517      */
reset()518     public void reset() {
519         if (mState != DEFAULT) {
520             mState = DEFAULT;
521 
522             // Remove all pending tasks (e.g., timers to cancel voice input)
523             mHandler.removeMessages(MSG_RESET);
524 
525             mSpeechRecognizer.cancel();
526             mRecognitionView.finish();
527         }
528     }
529 
530     /**
531      * Cancel in-progress speech recognition.
532      */
cancel()533     public void cancel() {
534         switch (mState) {
535         case LISTENING:
536             mLogger.cancelDuringListening();
537             break;
538         case WORKING:
539             mLogger.cancelDuringWorking();
540             break;
541         case ERROR:
542             mLogger.cancelDuringError();
543             break;
544         }
545 
546         reset();
547         mUiListener.onCancelVoice();
548     }
549 
getErrorStringId(int errorType, boolean endpointed)550     private int getErrorStringId(int errorType, boolean endpointed) {
551         switch (errorType) {
552             // We use CLIENT_ERROR to signify that voice search is not available on the device.
553             case SpeechRecognizer.ERROR_CLIENT:
554                 return R.string.voice_not_installed;
555             case SpeechRecognizer.ERROR_NETWORK:
556                 return R.string.voice_network_error;
557             case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
558                 return endpointed ?
559                         R.string.voice_network_error : R.string.voice_too_much_speech;
560             case SpeechRecognizer.ERROR_AUDIO:
561                 return R.string.voice_audio_error;
562             case SpeechRecognizer.ERROR_SERVER:
563                 return R.string.voice_server_error;
564             case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
565                 return R.string.voice_speech_timeout;
566             case SpeechRecognizer.ERROR_NO_MATCH:
567                 return R.string.voice_no_match;
568             default: return R.string.voice_error;
569         }
570     }
571 
onError(int errorType, boolean endpointed)572     private void onError(int errorType, boolean endpointed) {
573         Log.i(TAG, "error " + errorType);
574         mLogger.error(errorType);
575         onError(mContext.getString(getErrorStringId(errorType, endpointed)));
576     }
577 
onError(String error)578     private void onError(String error) {
579         mState = ERROR;
580         mRecognitionView.showError(error);
581         // Wait a couple seconds and then automatically dismiss message.
582         mHandler.sendMessageDelayed(Message.obtain(mHandler, MSG_RESET), 2000);
583     }
584 
585     private class ImeRecognitionListener implements RecognitionListener {
586         // Waveform data
587         final ByteArrayOutputStream mWaveBuffer = new ByteArrayOutputStream();
588         int mSpeechStart;
589         private boolean mEndpointed = false;
590 
591         @Override
onReadyForSpeech(Bundle noiseParams)592         public void onReadyForSpeech(Bundle noiseParams) {
593             mRecognitionView.showListening();
594         }
595 
596         @Override
onBeginningOfSpeech()597         public void onBeginningOfSpeech() {
598             mEndpointed = false;
599             mSpeechStart = mWaveBuffer.size();
600         }
601 
602         @Override
onRmsChanged(float rmsdB)603         public void onRmsChanged(float rmsdB) {
604             mRecognitionView.updateVoiceMeter(rmsdB);
605         }
606 
607         @Override
onBufferReceived(byte[] buf)608         public void onBufferReceived(byte[] buf) {
609             try {
610                 mWaveBuffer.write(buf);
611             } catch (IOException e) {
612                 // ignore.
613             }
614         }
615 
616         @Override
onEndOfSpeech()617         public void onEndOfSpeech() {
618             mEndpointed = true;
619             mState = WORKING;
620             mRecognitionView.showWorking(mWaveBuffer, mSpeechStart, mWaveBuffer.size());
621         }
622 
623         @Override
onError(int errorType)624         public void onError(int errorType) {
625             mState = ERROR;
626             VoiceInput.this.onError(errorType, mEndpointed);
627         }
628 
629         @Override
onResults(Bundle resultsBundle)630         public void onResults(Bundle resultsBundle) {
631             List<String> results = resultsBundle
632                     .getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
633             // VS Market update is needed for IME froyo clients to access the alternatesBundle
634             // TODO: verify this.
635             Bundle alternatesBundle = resultsBundle.getBundle(ALTERNATES_BUNDLE);
636             mState = DEFAULT;
637 
638             final Map<String, List<CharSequence>> alternatives =
639                 new HashMap<String, List<CharSequence>>();
640 
641             if (ENABLE_WORD_CORRECTIONS && alternatesBundle != null && results.size() > 0) {
642                 // Use the top recognition result to map each alternative's start:length to a word.
643                 String[] words = results.get(0).split(" ");
644                 Bundle spansBundle = alternatesBundle.getBundle(AlternatesBundleKeys.SPANS);
645                 for (String key : spansBundle.keySet()) {
646                     // Get the word for which these alternates correspond to.
647                     Bundle spanBundle = spansBundle.getBundle(key);
648                     int start = spanBundle.getInt(AlternatesBundleKeys.START);
649                     int length = spanBundle.getInt(AlternatesBundleKeys.LENGTH);
650                     // Only keep single-word based alternatives.
651                     if (length == 1 && start < words.length) {
652                         // Get the alternatives associated with the span.
653                         // If a word appears twice in a recognition result,
654                         // concatenate the alternatives for the word.
655                         List<CharSequence> altList = alternatives.get(words[start]);
656                         if (altList == null) {
657                             altList = new ArrayList<CharSequence>();
658                             alternatives.put(words[start], altList);
659                         }
660                         Parcelable[] alternatesArr = spanBundle
661                             .getParcelableArray(AlternatesBundleKeys.ALTERNATES);
662                         for (int j = 0; j < alternatesArr.length &&
663                                  altList.size() < MAX_ALT_LIST_LENGTH; j++) {
664                             Bundle alternateBundle = (Bundle) alternatesArr[j];
665                             String alternate = alternateBundle.getString(AlternatesBundleKeys.TEXT);
666                             // Don't allow duplicates in the alternates list.
667                             if (!altList.contains(alternate)) {
668                                 altList.add(alternate);
669                             }
670                         }
671                     }
672                 }
673             }
674 
675             if (results.size() > 5) {
676                 results = results.subList(0, 5);
677             }
678             mUiListener.onVoiceResults(results, alternatives);
679             mRecognitionView.finish();
680         }
681 
682         @Override
onPartialResults(final Bundle partialResults)683         public void onPartialResults(final Bundle partialResults) {
684             // currently - do nothing
685         }
686 
687         @Override
onEvent(int eventType, Bundle params)688         public void onEvent(int eventType, Bundle params) {
689             // do nothing - reserved for events that might be added in the future
690         }
691     }
692 }
693