• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License"); you may not
5  * use this file except in compliance with the License. You may obtain a copy of
6  * the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13  * License for the specific language governing permissions and limitations under
14  * the License.
15  */
16 package android.speech.tts;
17 
18 import android.annotation.IntDef;
19 import android.annotation.NonNull;
20 import android.annotation.Nullable;
21 import android.annotation.RawRes;
22 import android.annotation.SdkConstant;
23 import android.annotation.SdkConstant.SdkConstantType;
24 import android.compat.annotation.UnsupportedAppUsage;
25 import android.content.ComponentName;
26 import android.content.ContentResolver;
27 import android.content.Context;
28 import android.content.Intent;
29 import android.content.ServiceConnection;
30 import android.media.AudioAttributes;
31 import android.media.AudioManager;
32 import android.net.Uri;
33 import android.os.AsyncTask;
34 import android.os.Bundle;
35 import android.os.IBinder;
36 import android.os.ParcelFileDescriptor;
37 import android.os.RemoteException;
38 import android.os.ServiceManager;
39 import android.text.TextUtils;
40 import android.util.Log;
41 
42 import java.io.File;
43 import java.io.FileNotFoundException;
44 import java.io.IOException;
45 import java.lang.annotation.Retention;
46 import java.lang.annotation.RetentionPolicy;
47 import java.util.Collections;
48 import java.util.HashMap;
49 import java.util.HashSet;
50 import java.util.List;
51 import java.util.Locale;
52 import java.util.Map;
53 import java.util.MissingResourceException;
54 import java.util.Set;
55 import java.util.concurrent.Executor;
56 
57 /**
58  *
59  * Synthesizes speech from text for immediate playback or to create a sound file.
60  * <p>A TextToSpeech instance can only be used to synthesize text once it has completed its
61  * initialization. Implement the {@link TextToSpeech.OnInitListener} to be
62  * notified of the completion of the initialization.<br>
63  * When you are done using the TextToSpeech instance, call the {@link #shutdown()} method
64  * to release the native resources used by the TextToSpeech engine.
65  *
66  * Apps targeting Android 11 that use text-to-speech should declare {@link
67  * TextToSpeech.Engine#INTENT_ACTION_TTS_SERVICE} in the {@code queries} elements of their
68  * manifest:
69  *
70  * <pre>
71  * &lt;queries&gt;
72  *   ...
73  *  &lt;intent&gt;
74  *      &lt;action android:name="android.intent.action.TTS_SERVICE" /&gt;
75  *  &lt;/intent&gt;
76  * &lt;/queries&gt;
77  * </pre>
78  */
79 public class TextToSpeech {
80 
81     private static final String TAG = "TextToSpeech";
82 
83     /**
84      * Denotes a successful operation.
85      */
86     public static final int SUCCESS = 0;
87     /**
88      * Denotes a generic operation failure.
89      */
90     public static final int ERROR = -1;
91 
92     /**
93      * Denotes a stop requested by a client. It's used only on the service side of the API,
94      * client should never expect to see this result code.
95      */
96     public static final int STOPPED = -2;
97 
98     /** @hide */
99     @IntDef(prefix = { "ERROR_" }, value = {
100             ERROR_SYNTHESIS,
101             ERROR_SERVICE,
102             ERROR_OUTPUT,
103             ERROR_NETWORK,
104             ERROR_NETWORK_TIMEOUT,
105             ERROR_INVALID_REQUEST,
106             ERROR_NOT_INSTALLED_YET
107     })
108     @Retention(RetentionPolicy.SOURCE)
109     public @interface Error {}
110 
111     /**
112      * Denotes a failure of a TTS engine to synthesize the given input.
113      */
114     public static final int ERROR_SYNTHESIS = -3;
115 
116     /**
117      * Denotes a failure of a TTS service.
118      */
119     public static final int ERROR_SERVICE = -4;
120 
121     /**
122      * Denotes a failure related to the output (audio device or a file).
123      */
124     public static final int ERROR_OUTPUT = -5;
125 
126     /**
127      * Denotes a failure caused by a network connectivity problems.
128      */
129     public static final int ERROR_NETWORK = -6;
130 
131     /**
132      * Denotes a failure caused by network timeout.
133      */
134     public static final int ERROR_NETWORK_TIMEOUT = -7;
135 
136     /**
137      * Denotes a failure caused by an invalid request.
138      */
139     public static final int ERROR_INVALID_REQUEST = -8;
140 
141     /**
142      * Denotes a failure caused by an unfinished download of the voice data.
143      * @see Engine#KEY_FEATURE_NOT_INSTALLED
144      */
145     public static final int ERROR_NOT_INSTALLED_YET = -9;
146 
147     /**
148      * Queue mode where all entries in the playback queue (media to be played
149      * and text to be synthesized) are dropped and replaced by the new entry.
150      * Queues are flushed with respect to a given calling app. Entries in the queue
151      * from other callees are not discarded.
152      */
153     public static final int QUEUE_FLUSH = 0;
154     /**
155      * Queue mode where the new entry is added at the end of the playback queue.
156      */
157     public static final int QUEUE_ADD = 1;
158     /**
159      * Queue mode where the entire playback queue is purged. This is different
160      * from {@link #QUEUE_FLUSH} in that all entries are purged, not just entries
161      * from a given caller.
162      *
163      * @hide
164      */
165     static final int QUEUE_DESTROY = 2;
166 
167     /**
168      * Denotes the language is available exactly as specified by the locale.
169      */
170     public static final int LANG_COUNTRY_VAR_AVAILABLE = 2;
171 
172     /**
173      * Denotes the language is available for the language and country specified
174      * by the locale, but not the variant.
175      */
176     public static final int LANG_COUNTRY_AVAILABLE = 1;
177 
178     /**
179      * Denotes the language is available for the language by the locale,
180      * but not the country and variant.
181      */
182     public static final int LANG_AVAILABLE = 0;
183 
184     /**
185      * Denotes the language data is missing.
186      */
187     public static final int LANG_MISSING_DATA = -1;
188 
189     /**
190      * Denotes the language is not supported.
191      */
192     public static final int LANG_NOT_SUPPORTED = -2;
193 
194     /**
195      * Broadcast Action: The TextToSpeech synthesizer has completed processing
196      * of all the text in the speech queue.
197      *
198      * Note that this notifies callers when the <b>engine</b> has finished has
199      * processing text data. Audio playback might not have completed (or even started)
200      * at this point. If you wish to be notified when this happens, see
201      * {@link OnUtteranceCompletedListener}.
202      */
203     @SdkConstant(SdkConstantType.BROADCAST_INTENT_ACTION)
204     public static final String ACTION_TTS_QUEUE_PROCESSING_COMPLETED =
205             "android.speech.tts.TTS_QUEUE_PROCESSING_COMPLETED";
206 
207     /**
208      * Interface definition of a callback to be invoked indicating the completion of the
209      * TextToSpeech engine initialization.
210      */
211     public interface OnInitListener {
212         /**
213          * Called to signal the completion of the TextToSpeech engine initialization.
214          *
215          * @param status {@link TextToSpeech#SUCCESS} or {@link TextToSpeech#ERROR}.
216          */
onInit(int status)217         void onInit(int status);
218     }
219 
220     /**
221      * Listener that will be called when the TTS service has
222      * completed synthesizing an utterance. This is only called if the utterance
223      * has an utterance ID (see {@link TextToSpeech.Engine#KEY_PARAM_UTTERANCE_ID}).
224      *
225      * @deprecated Use {@link UtteranceProgressListener} instead.
226      */
227     @Deprecated
228     public interface OnUtteranceCompletedListener {
229         /**
230          * Called when an utterance has been synthesized.
231          *
232          * @param utteranceId the identifier of the utterance.
233          */
onUtteranceCompleted(String utteranceId)234         void onUtteranceCompleted(String utteranceId);
235     }
236 
237     /**
238      * Constants and parameter names for controlling text-to-speech. These include:
239      *
240      * <ul>
241      *     <li>
242      *         Intents to ask engine to install data or check its data and
243      *         extras for a TTS engine's check data activity.
244      *     </li>
245      *     <li>
246      *         Keys for the parameters passed with speak commands, e.g.
247      *         {@link Engine#KEY_PARAM_UTTERANCE_ID}, {@link Engine#KEY_PARAM_STREAM}.
248      *     </li>
249      *     <li>
250      *         A list of feature strings that engines might support, e.g
251      *         {@link Engine#KEY_FEATURE_NETWORK_SYNTHESIS}. These values may be passed in to
252      *         {@link TextToSpeech#speak} and {@link TextToSpeech#synthesizeToFile} to modify
253      *         engine behaviour. The engine can be queried for the set of features it supports
254      *         through {@link TextToSpeech#getFeatures(java.util.Locale)}.
255      *     </li>
256      * </ul>
257      *
258      * Apps targeting Android 11 that use text-to-speech should declare {@link
259      * TextToSpeech.Engine#INTENT_ACTION_TTS_SERVICE} in the {@code queries} elements of their
260      * manifest:
261      *
262      * <pre>
263      * &lt;queries&gt;
264      *   ...
265      *  &lt;intent&gt;
266      *      &lt;action android:name="android.intent.action.TTS_SERVICE" /&gt;
267      *  &lt;/intent&gt;
268      * &lt;/queries&gt;
269      * </pre>
270      */
271     public class Engine {
272 
273         /**
274          * Default speech rate.
275          * @hide
276          */
277         public static final int DEFAULT_RATE = 100;
278 
279         /**
280          * Default pitch.
281          * @hide
282          */
283         public static final int DEFAULT_PITCH = 100;
284 
285         /**
286          * Default volume.
287          * @hide
288          */
289         public static final float DEFAULT_VOLUME = 1.0f;
290 
291         /**
292          * Default pan (centered).
293          * @hide
294          */
295         public static final float DEFAULT_PAN = 0.0f;
296 
297         /**
298          * Default value for {@link Settings.Secure#TTS_USE_DEFAULTS}.
299          * @hide
300          */
301         public static final int USE_DEFAULTS = 0; // false
302 
303         /**
304          * Package name of the default TTS engine.
305          *
306          * @hide
307          * @deprecated No longer in use, the default engine is determined by
308          *         the sort order defined in {@link TtsEngines}. Note that
309          *         this doesn't "break" anything because there is no guarantee that
310          *         the engine specified below is installed on a given build, let
311          *         alone be the default.
312          */
313         @Deprecated
314         public static final String DEFAULT_ENGINE = "com.svox.pico";
315 
316         /**
317          * Default audio stream used when playing synthesized speech.
318          */
319         public static final int DEFAULT_STREAM = AudioManager.STREAM_MUSIC;
320 
321         /**
322          * Indicates success when checking the installation status of the resources used by the
323          * TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
324          */
325         public static final int CHECK_VOICE_DATA_PASS = 1;
326 
327         /**
328          * Indicates failure when checking the installation status of the resources used by the
329          * TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
330          */
331         public static final int CHECK_VOICE_DATA_FAIL = 0;
332 
333         /**
334          * Indicates erroneous data when checking the installation status of the resources used by
335          * the TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
336          *
337          * @deprecated Use CHECK_VOICE_DATA_FAIL instead.
338          */
339         @Deprecated
340         public static final int CHECK_VOICE_DATA_BAD_DATA = -1;
341 
342         /**
343          * Indicates missing resources when checking the installation status of the resources used
344          * by the TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
345          *
346          * @deprecated Use CHECK_VOICE_DATA_FAIL instead.
347          */
348         @Deprecated
349         public static final int CHECK_VOICE_DATA_MISSING_DATA = -2;
350 
351         /**
352          * Indicates missing storage volume when checking the installation status of the resources
353          * used by the TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
354          *
355          * @deprecated Use CHECK_VOICE_DATA_FAIL instead.
356          */
357         @Deprecated
358         public static final int CHECK_VOICE_DATA_MISSING_VOLUME = -3;
359 
360         /**
361          * Intent for starting a TTS service. Services that handle this intent must
362          * extend {@link TextToSpeechService}. Normal applications should not use this intent
363          * directly, instead they should talk to the TTS service using the the methods in this
364          * class.
365          */
366         @SdkConstant(SdkConstantType.SERVICE_ACTION)
367         public static final String INTENT_ACTION_TTS_SERVICE =
368                 "android.intent.action.TTS_SERVICE";
369 
370         /**
371          * Name under which a text to speech engine publishes information about itself.
372          * This meta-data should reference an XML resource containing a
373          * <code>&lt;{@link android.R.styleable#TextToSpeechEngine tts-engine}&gt;</code>
374          * tag.
375          */
376         public static final String SERVICE_META_DATA = "android.speech.tts";
377 
378         // intents to ask engine to install data or check its data
379         /**
380          * Activity Action: Triggers the platform TextToSpeech engine to
381          * start the activity that installs the resource files on the device
382          * that are required for TTS to be operational. Since the installation
383          * of the data can be interrupted or declined by the user, the application
384          * shouldn't expect successful installation upon return from that intent,
385          * and if need be, should check installation status with
386          * {@link #ACTION_CHECK_TTS_DATA}.
387          */
388         @SdkConstant(SdkConstantType.ACTIVITY_INTENT_ACTION)
389         public static final String ACTION_INSTALL_TTS_DATA =
390                 "android.speech.tts.engine.INSTALL_TTS_DATA";
391 
392         /**
393          * Broadcast Action: broadcast to signal the change in the list of available
394          * languages or/and their features.
395          */
396         @SdkConstant(SdkConstantType.BROADCAST_INTENT_ACTION)
397         public static final String ACTION_TTS_DATA_INSTALLED =
398                 "android.speech.tts.engine.TTS_DATA_INSTALLED";
399 
400         /**
401          * Activity Action: Starts the activity from the platform TextToSpeech
402          * engine to verify the proper installation and availability of the
403          * resource files on the system. Upon completion, the activity will
404          * return one of the following codes:
405          * {@link #CHECK_VOICE_DATA_PASS},
406          * {@link #CHECK_VOICE_DATA_FAIL},
407          * <p> Moreover, the data received in the activity result will contain the following
408          * fields:
409          * <ul>
410          *   <li>{@link #EXTRA_AVAILABLE_VOICES} which contains an ArrayList<String> of all the
411          *   available voices. The format of each voice is: lang-COUNTRY-variant where COUNTRY and
412          *   variant are optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").</li>
413          *   <li>{@link #EXTRA_UNAVAILABLE_VOICES} which contains an ArrayList<String> of all the
414          *   unavailable voices (ones that user can install). The format of each voice is:
415          *   lang-COUNTRY-variant where COUNTRY and variant are optional (ie, "eng" or
416          *   "eng-USA" or "eng-USA-FEMALE").</li>
417          * </ul>
418          */
419         @SdkConstant(SdkConstantType.ACTIVITY_INTENT_ACTION)
420         public static final String ACTION_CHECK_TTS_DATA =
421                 "android.speech.tts.engine.CHECK_TTS_DATA";
422 
423         /**
424          * Activity intent for getting some sample text to use for demonstrating TTS. Specific
425          * locale have to be requested by passing following extra parameters:
426          * <ul>
427          *   <li>language</li>
428          *   <li>country</li>
429          *   <li>variant</li>
430          * </ul>
431          *
432          * Upon completion, the activity result may contain the following fields:
433          * <ul>
434          *   <li>{@link #EXTRA_SAMPLE_TEXT} which contains an String with sample text.</li>
435          * </ul>
436          */
437         @SdkConstant(SdkConstantType.ACTIVITY_INTENT_ACTION)
438         public static final String ACTION_GET_SAMPLE_TEXT =
439                 "android.speech.tts.engine.GET_SAMPLE_TEXT";
440 
441         /**
442          * Extra information received with the {@link #ACTION_GET_SAMPLE_TEXT} intent result where
443          * the TextToSpeech engine returns an String with sample text for requested voice
444          */
445         public static final String EXTRA_SAMPLE_TEXT = "sampleText";
446 
447 
448         // extras for a TTS engine's check data activity
449         /**
450          * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
451          * the TextToSpeech engine returns an ArrayList<String> of all the available voices.
452          * The format of each voice is: lang-COUNTRY-variant where COUNTRY and variant are
453          * optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").
454          */
455         public static final String EXTRA_AVAILABLE_VOICES = "availableVoices";
456 
457         /**
458          * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
459          * the TextToSpeech engine returns an ArrayList<String> of all the unavailable voices.
460          * The format of each voice is: lang-COUNTRY-variant where COUNTRY and variant are
461          * optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").
462          */
463         public static final String EXTRA_UNAVAILABLE_VOICES = "unavailableVoices";
464 
465         /**
466          * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
467          * the TextToSpeech engine specifies the path to its resources.
468          *
469          * It may be used by language packages to find out where to put their data.
470          *
471          * @deprecated TTS engine implementation detail, this information has no use for
472          * text-to-speech API client.
473          */
474         @Deprecated
475         public static final String EXTRA_VOICE_DATA_ROOT_DIRECTORY = "dataRoot";
476 
477         /**
478          * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
479          * the TextToSpeech engine specifies the file names of its resources under the
480          * resource path.
481          *
482          * @deprecated TTS engine implementation detail, this information has no use for
483          * text-to-speech API client.
484          */
485         @Deprecated
486         public static final String EXTRA_VOICE_DATA_FILES = "dataFiles";
487 
488         /**
489          * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
490          * the TextToSpeech engine specifies the locale associated with each resource file.
491          *
492          * @deprecated TTS engine implementation detail, this information has no use for
493          * text-to-speech API client.
494          */
495         @Deprecated
496         public static final String EXTRA_VOICE_DATA_FILES_INFO = "dataFilesInfo";
497 
498         /**
499          * Extra information sent with the {@link #ACTION_CHECK_TTS_DATA} intent where the
500          * caller indicates to the TextToSpeech engine which specific sets of voice data to
501          * check for by sending an ArrayList<String> of the voices that are of interest.
502          * The format of each voice is: lang-COUNTRY-variant where COUNTRY and variant are
503          * optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").
504          *
505          * @deprecated Redundant functionality, checking for existence of specific sets of voice
506          * data can be done on client side.
507          */
508         @Deprecated
509         public static final String EXTRA_CHECK_VOICE_DATA_FOR = "checkVoiceDataFor";
510 
511         // extras for a TTS engine's data installation
512         /**
513          * Extra information received with the {@link #ACTION_TTS_DATA_INSTALLED} intent result.
514          * It indicates whether the data files for the synthesis engine were successfully
515          * installed. The installation was initiated with the  {@link #ACTION_INSTALL_TTS_DATA}
516          * intent. The possible values for this extra are
517          * {@link TextToSpeech#SUCCESS} and {@link TextToSpeech#ERROR}.
518          *
519          * @deprecated No longer in use. If client is interested in information about what
520          * changed, it should use the ACTION_CHECK_TTS_DATA
521          * intent to discover available voices.
522          */
523         @Deprecated
524         public static final String EXTRA_TTS_DATA_INSTALLED = "dataInstalled";
525 
526         // keys for the parameters passed with speak commands. Hidden keys are used internally
527         // to maintain engine state for each TextToSpeech instance.
528         /**
529          * @hide
530          */
531         public static final String KEY_PARAM_RATE = "rate";
532 
533         /**
534          * @hide
535          */
536         public static final String KEY_PARAM_VOICE_NAME = "voiceName";
537 
538         /**
539          * @hide
540          */
541         public static final String KEY_PARAM_LANGUAGE = "language";
542 
543         /**
544          * @hide
545          */
546         public static final String KEY_PARAM_COUNTRY = "country";
547 
548         /**
549          * @hide
550          */
551         public static final String KEY_PARAM_VARIANT = "variant";
552 
553         /**
554          * @hide
555          */
556         public static final String KEY_PARAM_ENGINE = "engine";
557 
558         /**
559          * @hide
560          */
561         public static final String KEY_PARAM_PITCH = "pitch";
562 
563         /**
564          * Parameter key to specify the audio stream type to be used when speaking text
565          * or playing back a file. The value should be one of the STREAM_ constants
566          * defined in {@link AudioManager}.
567          *
568          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
569          * @see TextToSpeech#playEarcon(String, int, HashMap)
570          */
571         public static final String KEY_PARAM_STREAM = "streamType";
572 
573         /**
574          * Parameter key to specify the audio attributes to be used when
575          * speaking text or playing back a file. The value should be set
576          * using {@link TextToSpeech#setAudioAttributes(AudioAttributes)}.
577          *
578          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
579          * @see TextToSpeech#playEarcon(String, int, HashMap)
580          * @hide
581          */
582         public static final String KEY_PARAM_AUDIO_ATTRIBUTES = "audioAttributes";
583 
584         /**
585          * Parameter key to identify an utterance in the
586          * {@link TextToSpeech.OnUtteranceCompletedListener} after text has been
587          * spoken, a file has been played back or a silence duration has elapsed.
588          *
589          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
590          * @see TextToSpeech#playEarcon(String, int, HashMap)
591          * @see TextToSpeech#synthesizeToFile(String, HashMap, String)
592          */
593         public static final String KEY_PARAM_UTTERANCE_ID = "utteranceId";
594 
595         /**
596          * Parameter key to specify the speech volume relative to the current stream type
597          * volume used when speaking text. Volume is specified as a float ranging from 0 to 1
598          * where 0 is silence, and 1 is the maximum volume (the default behavior).
599          *
600          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
601          * @see TextToSpeech#playEarcon(String, int, HashMap)
602          */
603         public static final String KEY_PARAM_VOLUME = "volume";
604 
605         /**
606          * Parameter key to specify how the speech is panned from left to right when speaking text.
607          * Pan is specified as a float ranging from -1 to +1 where -1 maps to a hard-left pan,
608          * 0 to center (the default behavior), and +1 to hard-right.
609          *
610          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
611          * @see TextToSpeech#playEarcon(String, int, HashMap)
612          */
613         public static final String KEY_PARAM_PAN = "pan";
614 
615         /**
616          * Feature key for network synthesis. See {@link TextToSpeech#getFeatures(Locale)}
617          * for a description of how feature keys work. If set (and supported by the engine
618          * as per {@link TextToSpeech#getFeatures(Locale)}, the engine must
619          * use network based synthesis.
620          *
621          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
622          * @see TextToSpeech#synthesizeToFile(String, java.util.HashMap, String)
623          * @see TextToSpeech#getFeatures(java.util.Locale)
624          *
625          * @deprecated Starting from API level 21, to select network synthesis, call
626          * {@link TextToSpeech#getVoices()}, find a suitable network voice
627          * ({@link Voice#isNetworkConnectionRequired()}) and pass it
628          * to {@link TextToSpeech#setVoice(Voice)}.
629          */
630         @Deprecated
631         public static final String KEY_FEATURE_NETWORK_SYNTHESIS = "networkTts";
632 
633         /**
634          * Feature key for embedded synthesis. See {@link TextToSpeech#getFeatures(Locale)}
635          * for a description of how feature keys work. If set and supported by the engine
636          * as per {@link TextToSpeech#getFeatures(Locale)}, the engine must synthesize
637          * text on-device (without making network requests).
638          *
639          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
640          * @see TextToSpeech#synthesizeToFile(String, java.util.HashMap, String)
641          * @see TextToSpeech#getFeatures(java.util.Locale)
642 
643          * @deprecated Starting from API level 21, to select embedded synthesis, call
644          * ({@link TextToSpeech#getVoices()}, find a suitable embedded voice
645          * ({@link Voice#isNetworkConnectionRequired()}) and pass it
646          * to {@link TextToSpeech#setVoice(Voice)}).
647          */
648         @Deprecated
649         public static final String KEY_FEATURE_EMBEDDED_SYNTHESIS = "embeddedTts";
650 
651         /**
652          * Parameter key to specify an audio session identifier (obtained from
653          * {@link AudioManager#generateAudioSessionId()}) that will be used by the request audio
654          * output. It can be used to associate one of the {@link android.media.audiofx.AudioEffect}
655          * objects with the synthesis (or earcon) output.
656          *
657          * @see TextToSpeech#speak(CharSequence, int, Bundle, String)
658          * @see TextToSpeech#playEarcon(String, int, HashMap)
659          */
660         public static final String KEY_PARAM_SESSION_ID = "sessionId";
661 
662         /**
663          * Feature key that indicates that the voice may need to download additional data to be fully
664          * functional. The download will be triggered by calling
665          * {@link TextToSpeech#setVoice(Voice)} or {@link TextToSpeech#setLanguage(Locale)}.
666          * Until download is complete, each synthesis request will either report
667          * {@link TextToSpeech#ERROR_NOT_INSTALLED_YET} error, or use a different voice to synthesize
668          * the request. This feature should NOT be used as a key of a request parameter.
669          *
670          * @see TextToSpeech#getFeatures(java.util.Locale)
671          * @see Voice#getFeatures()
672          */
673         public static final String KEY_FEATURE_NOT_INSTALLED = "notInstalled";
674 
675         /**
676          * Feature key that indicate that a network timeout can be set for the request. If set and
677          * supported as per {@link TextToSpeech#getFeatures(Locale)} or {@link Voice#getFeatures()},
678          * it can be used as request parameter to set the maximum allowed time for a single
679          * request attempt, in milliseconds, before synthesis fails. When used as a key of
680          * a request parameter, its value should be a string with an integer value.
681          *
682          * @see TextToSpeech#getFeatures(java.util.Locale)
683          * @see Voice#getFeatures()
684          */
685         public static final String KEY_FEATURE_NETWORK_TIMEOUT_MS = "networkTimeoutMs";
686 
687         /**
688          * Feature key that indicates that network request retries count can be set for the request.
689          * If set and supported as per {@link TextToSpeech#getFeatures(Locale)} or
690          * {@link Voice#getFeatures()}, it can be used as a request parameter to set the
691          * number of network request retries that are attempted in case of failure. When used as
692          * a key of a request parameter, its value should be a string with an integer value.
693          *
694          * @see TextToSpeech#getFeatures(java.util.Locale)
695          * @see Voice#getFeatures()
696          */
697         public static final String KEY_FEATURE_NETWORK_RETRIES_COUNT = "networkRetriesCount";
698     }
699 
700     private static final boolean DEBUG = false;
701 
702     private final Context mContext;
703     @UnsupportedAppUsage
704     private Connection mConnectingServiceConnection;
705     private Connection mServiceConnection;
706     @UnsupportedAppUsage
707     private OnInitListener mInitListener;
708     // Written from an unspecified application thread, read from
709     // a binder thread.
710     @Nullable private volatile UtteranceProgressListener mUtteranceProgressListener;
711     private final Object mStartLock = new Object();
712 
713     private String mRequestedEngine;
714     // Whether to initialize this TTS object with the default engine,
715     // if the requested engine is not available. Valid only if mRequestedEngine
716     // is not null. Used only for testing, though potentially useful API wise
717     // too.
718     private final boolean mUseFallback;
719     private final Map<String, Uri> mEarcons;
720     private final Map<CharSequence, Uri> mUtterances;
721     private final Bundle mParams = new Bundle();
722     private final TtsEngines mEnginesHelper;
723     private final boolean mIsSystem;
724     @Nullable private final Executor mInitExecutor;
725 
726     @UnsupportedAppUsage
727     private volatile String mCurrentEngine = null;
728 
729     /**
730      * The constructor for the TextToSpeech class, using the default TTS engine.
731      * This will also initialize the associated TextToSpeech engine if it isn't already running.
732      *
733      * @param context
734      *            The context this instance is running in.
735      * @param listener
736      *            The {@link TextToSpeech.OnInitListener} that will be called when the
737      *            TextToSpeech engine has initialized. In a case of a failure the listener
738      *            may be called immediately, before TextToSpeech instance is fully constructed.
739      */
TextToSpeech(Context context, OnInitListener listener)740     public TextToSpeech(Context context, OnInitListener listener) {
741         this(context, listener, null);
742     }
743 
744     /**
745      * The constructor for the TextToSpeech class, using the given TTS engine.
746      * This will also initialize the associated TextToSpeech engine if it isn't already running.
747      *
748      * @param context
749      *            The context this instance is running in.
750      * @param listener
751      *            The {@link TextToSpeech.OnInitListener} that will be called when the
752      *            TextToSpeech engine has initialized. In a case of a failure the listener
753      *            may be called immediately, before TextToSpeech instance is fully constructed.
754      * @param engine Package name of the TTS engine to use.
755      */
TextToSpeech(Context context, OnInitListener listener, String engine)756     public TextToSpeech(Context context, OnInitListener listener, String engine) {
757         this(context, listener, engine, null, true);
758     }
759 
760     /**
761      * Used by the framework to instantiate TextToSpeech objects with a supplied
762      * package name, instead of using {@link android.content.Context#getPackageName()}
763      *
764      * @hide
765      */
TextToSpeech(Context context, OnInitListener listener, String engine, String packageName, boolean useFallback)766     public TextToSpeech(Context context, OnInitListener listener, String engine,
767             String packageName, boolean useFallback) {
768         this(context, /* initExecutor= */ null, listener, engine, packageName,
769                 useFallback, /* isSystem= */ true);
770     }
771 
772     /**
773      * Used internally to instantiate TextToSpeech objects.
774      *
775      * @hide
776      */
TextToSpeech(Context context, @Nullable Executor initExecutor, OnInitListener initListener, String engine, String packageName, boolean useFallback, boolean isSystem)777     private TextToSpeech(Context context, @Nullable Executor initExecutor,
778             OnInitListener initListener, String engine, String packageName, boolean useFallback,
779             boolean isSystem) {
780         mContext = context;
781         mInitExecutor = initExecutor;
782         mInitListener = initListener;
783         mRequestedEngine = engine;
784         mUseFallback = useFallback;
785 
786         mEarcons = new HashMap<String, Uri>();
787         mUtterances = new HashMap<CharSequence, Uri>();
788         mUtteranceProgressListener = null;
789 
790         mEnginesHelper = new TtsEngines(mContext);
791 
792         mIsSystem = isSystem;
793 
794         initTts();
795     }
796 
runActionNoReconnect(Action<R> action, R errorResult, String method, boolean onlyEstablishedConnection)797     private <R> R runActionNoReconnect(Action<R> action, R errorResult, String method,
798             boolean onlyEstablishedConnection) {
799         return runAction(action, errorResult, method, false, onlyEstablishedConnection);
800     }
801 
runAction(Action<R> action, R errorResult, String method)802     private <R> R runAction(Action<R> action, R errorResult, String method) {
803         return runAction(action, errorResult, method, true, true);
804     }
805 
runAction(Action<R> action, R errorResult, String method, boolean reconnect, boolean onlyEstablishedConnection)806     private <R> R runAction(Action<R> action, R errorResult, String method,
807             boolean reconnect, boolean onlyEstablishedConnection) {
808         synchronized (mStartLock) {
809             if (mServiceConnection == null) {
810                 Log.w(TAG, method + " failed: not bound to TTS engine");
811                 return errorResult;
812             }
813             return mServiceConnection.runAction(action, errorResult, method, reconnect,
814                     onlyEstablishedConnection);
815         }
816     }
817 
initTts()818     private int initTts() {
819         // Step 1: Try connecting to the engine that was requested.
820         if (mRequestedEngine != null) {
821             if (mEnginesHelper.isEngineInstalled(mRequestedEngine)) {
822                 if (connectToEngine(mRequestedEngine)) {
823                     mCurrentEngine = mRequestedEngine;
824                     return SUCCESS;
825                 } else if (!mUseFallback) {
826                     mCurrentEngine = null;
827                     dispatchOnInit(ERROR);
828                     return ERROR;
829                 }
830             } else if (!mUseFallback) {
831                 Log.i(TAG, "Requested engine not installed: " + mRequestedEngine);
832                 mCurrentEngine = null;
833                 dispatchOnInit(ERROR);
834                 return ERROR;
835             }
836         }
837 
838         // Step 2: Try connecting to the user's default engine.
839         final String defaultEngine = getDefaultEngine();
840         if (defaultEngine != null && !defaultEngine.equals(mRequestedEngine)) {
841             if (connectToEngine(defaultEngine)) {
842                 mCurrentEngine = defaultEngine;
843                 return SUCCESS;
844             }
845         }
846 
847         // Step 3: Try connecting to the highest ranked engine in the
848         // system.
849         final String highestRanked = mEnginesHelper.getHighestRankedEngineName();
850         if (highestRanked != null && !highestRanked.equals(mRequestedEngine) &&
851                 !highestRanked.equals(defaultEngine)) {
852             if (connectToEngine(highestRanked)) {
853                 mCurrentEngine = highestRanked;
854                 return SUCCESS;
855             }
856         }
857 
858         // NOTE: The API currently does not allow the caller to query whether
859         // they are actually connected to any engine. This might fail for various
860         // reasons like if the user disables all their TTS engines.
861 
862         mCurrentEngine = null;
863         dispatchOnInit(ERROR);
864         return ERROR;
865     }
866 
connectToEngine(String engine)867     private boolean connectToEngine(String engine) {
868         Connection connection;
869         if (mIsSystem) {
870             connection = new SystemConnection();
871         } else {
872             connection = new DirectConnection();
873         }
874 
875         boolean bound = connection.connect(engine);
876         if (!bound) {
877             Log.e(TAG, "Failed to bind to " + engine);
878             return false;
879         } else {
880             Log.i(TAG, "Sucessfully bound to " + engine);
881             mConnectingServiceConnection = connection;
882             return true;
883         }
884     }
885 
dispatchOnInit(int result)886     private void dispatchOnInit(int result) {
887         Runnable onInitCommand = () -> {
888             synchronized (mStartLock) {
889                 if (mInitListener != null) {
890                     mInitListener.onInit(result);
891                     mInitListener = null;
892                 }
893             }
894         };
895 
896         if (mInitExecutor != null) {
897             mInitExecutor.execute(onInitCommand);
898         } else {
899             onInitCommand.run();
900         }
901     }
902 
getCallerIdentity()903     private IBinder getCallerIdentity() {
904         return mServiceConnection.getCallerIdentity();
905     }
906 
907     /**
908      * Releases the resources used by the TextToSpeech engine.
909      * It is good practice for instance to call this method in the onDestroy() method of an Activity
910      * so the TextToSpeech engine can be cleanly stopped.
911      */
shutdown()912     public void shutdown() {
913         // Special case, we are asked to shutdown connection that did finalize its connection.
914         synchronized (mStartLock) {
915             if (mConnectingServiceConnection != null) {
916                 mConnectingServiceConnection.disconnect();
917                 mConnectingServiceConnection = null;
918                 return;
919             }
920         }
921 
922         // Post connection case
923         runActionNoReconnect((ITextToSpeechService service) -> {
924             service.setCallback(getCallerIdentity(), null);
925             service.stop(getCallerIdentity());
926             mServiceConnection.disconnect();
927             // Context#unbindService does not result in a call to
928             // ServiceConnection#onServiceDisconnected. As a result, the
929             // service ends up being destroyed (if there are no other open
930             // connections to it) but the process lives on and the
931             // ServiceConnection continues to refer to the destroyed service.
932             //
933             // This leads to tons of log spam about SynthThread being dead.
934             mServiceConnection = null;
935             mCurrentEngine = null;
936             return null;
937         }, null, "shutdown", false);
938     }
939 
940     /**
941      * Adds a mapping between a string of text and a sound resource in a
942      * package. After a call to this method, subsequent calls to
943      * {@link #speak(CharSequence, int, Bundle, String)} will play the specified sound resource
944      * if it is available, or synthesize the text it is missing.
945      *
946      * @param text
947      *            The string of text. Example: <code>"south_south_east"</code>
948      *
949      * @param packagename
950      *            Pass the packagename of the application that contains the
951      *            resource. If the resource is in your own application (this is
952      *            the most common case), then put the packagename of your
953      *            application here.<br/>
954      *            Example: <b>"com.google.marvin.compass"</b><br/>
955      *            The packagename can be found in the AndroidManifest.xml of
956      *            your application.
957      *            <p>
958      *            <code>&lt;manifest xmlns:android=&quot;...&quot;
959      *      package=&quot;<b>com.google.marvin.compass</b>&quot;&gt;</code>
960      *            </p>
961      *
962      * @param resourceId
963      *            Example: <code>R.raw.south_south_east</code>
964      *
965      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
966      */
addSpeech(String text, String packagename, @RawRes int resourceId)967     public int addSpeech(String text, String packagename, @RawRes int resourceId) {
968         return addSpeech(text, makeResourceUri(packagename, resourceId));
969     }
970 
971     /**
972      * Adds a mapping between a CharSequence (may be spanned with TtsSpans) of text
973      * and a sound resource in a package. After a call to this method, subsequent calls to
974      * {@link #speak(CharSequence, int, Bundle, String)} will play the specified sound resource
975      * if it is available, or synthesize the text it is missing.
976      *
977      * @param text
978      *            The string of text. Example: <code>"south_south_east"</code>
979      *
980      * @param packagename
981      *            Pass the packagename of the application that contains the
982      *            resource. If the resource is in your own application (this is
983      *            the most common case), then put the packagename of your
984      *            application here.<br/>
985      *            Example: <b>"com.google.marvin.compass"</b><br/>
986      *            The packagename can be found in the AndroidManifest.xml of
987      *            your application.
988      *            <p>
989      *            <code>&lt;manifest xmlns:android=&quot;...&quot;
990      *      package=&quot;<b>com.google.marvin.compass</b>&quot;&gt;</code>
991      *            </p>
992      *
993      * @param resourceId
994      *            Example: <code>R.raw.south_south_east</code>
995      *
996      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
997      */
addSpeech(CharSequence text, String packagename, @RawRes int resourceId)998     public int addSpeech(CharSequence text, String packagename, @RawRes int resourceId) {
999         return addSpeech(text, makeResourceUri(packagename, resourceId));
1000     }
1001 
1002     /**
1003      * Adds a mapping between a string of text and a sound file. Using this, it is possible to
1004      * add custom pronounciations for a string of text. After a call to this method, subsequent
1005      * calls to {@link #speak(CharSequence, int, Bundle, String)} will play the specified sound
1006      * resource if it is available, or synthesize the text it is missing.
1007      *
1008      * @param text
1009      *            The string of text. Example: <code>"south_south_east"</code>
1010      * @param filename
1011      *            The full path to the sound file (for example:
1012      *            "/sdcard/mysounds/hello.wav")
1013      *
1014      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
1015      */
addSpeech(String text, String filename)1016     public int addSpeech(String text, String filename) {
1017         return addSpeech(text, Uri.parse(filename));
1018     }
1019 
1020     /**
1021      * Adds a mapping between a CharSequence (may be spanned with TtsSpans) and a sound file.
1022      * Using this, it is possible to add custom pronounciations for a string of text. After a call
1023      * to this method, subsequent calls to {@link #speak(CharSequence, int, Bundle, String)}
1024      * will play the specified sound resource if it is available, or synthesize the text it is
1025      * missing.
1026      *
1027      * @param text
1028      *            The string of text. Example: <code>"south_south_east"</code>
1029      * @param file
1030      *            File object pointing to the sound file.
1031      *
1032      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
1033      */
addSpeech(CharSequence text, File file)1034     public int addSpeech(CharSequence text, File file) {
1035         return addSpeech(text, Uri.fromFile(file));
1036     }
1037 
1038      /**
1039      * Adds a mapping between a CharSequence (may be spanned with TtsSpans) and a sound file.
1040      * Using this, it is possible to add custom pronounciations for a string of text. After a call
1041      * to this method, subsequent calls to {@link #speak(CharSequence, int, Bundle, String)}
1042      * will play the specified sound resource if it is available, or synthesize the text it is
1043      * missing.
1044      *
1045      * @param text
1046      *            The string of text. Example: <code>"south_south_east"</code>
1047      * @param uri
1048      *            Uri object pointing to the sound file.
1049      *
1050      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
1051      */
addSpeech(@onNull CharSequence text, @NonNull Uri uri)1052     public int addSpeech(@NonNull CharSequence text, @NonNull Uri uri) {
1053         synchronized (mStartLock) {
1054             mUtterances.put(text, uri);
1055             return SUCCESS;
1056         }
1057     }
1058 
1059     /**
1060      * Adds a mapping between a string of text and a sound resource in a
1061      * package. Use this to add custom earcons.
1062      *
1063      * @see #playEarcon(String, int, HashMap)
1064      *
1065      * @param earcon The name of the earcon.
1066      *            Example: <code>"[tick]"</code><br/>
1067      *
1068      * @param packagename
1069      *            the package name of the application that contains the
1070      *            resource. This can for instance be the package name of your own application.
1071      *            Example: <b>"com.google.marvin.compass"</b><br/>
1072      *            The package name can be found in the AndroidManifest.xml of
1073      *            the application containing the resource.
1074      *            <p>
1075      *            <code>&lt;manifest xmlns:android=&quot;...&quot;
1076      *      package=&quot;<b>com.google.marvin.compass</b>&quot;&gt;</code>
1077      *            </p>
1078      *
1079      * @param resourceId
1080      *            Example: <code>R.raw.tick_snd</code>
1081      *
1082      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
1083      */
addEarcon(String earcon, String packagename, @RawRes int resourceId)1084     public int addEarcon(String earcon, String packagename, @RawRes int resourceId) {
1085         return addEarcon(earcon, makeResourceUri(packagename, resourceId));
1086     }
1087 
1088     /**
1089      * Adds a mapping between a string of text and a sound file.
1090      * Use this to add custom earcons.
1091      *
1092      * @see #playEarcon(String, int, HashMap)
1093      *
1094      * @param earcon
1095      *            The name of the earcon.
1096      *            Example: <code>"[tick]"</code>
1097      * @param filename
1098      *            The full path to the sound file (for example:
1099      *            "/sdcard/mysounds/tick.wav")
1100      *
1101      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
1102      *
1103      * @deprecated As of API level 21, replaced by
1104      *         {@link #addEarcon(String, File)}.
1105      */
1106     @Deprecated
addEarcon(String earcon, String filename)1107     public int addEarcon(String earcon, String filename) {
1108         return addEarcon(earcon, Uri.parse(filename));
1109     }
1110 
1111     /**
1112      * Adds a mapping between a string of text and a sound file.
1113      * Use this to add custom earcons.
1114      *
1115      * @see #playEarcon(String, int, HashMap)
1116      *
1117      * @param earcon
1118      *            The name of the earcon.
1119      *            Example: <code>"[tick]"</code>
1120      * @param file
1121      *            File object pointing to the sound file.
1122      *
1123      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
1124      */
addEarcon(String earcon, File file)1125     public int addEarcon(String earcon, File file) {
1126         return addEarcon(earcon, Uri.fromFile(file));
1127     }
1128 
1129     /**
1130      * Adds a mapping between a string of text and a sound file.
1131      * Use this to add custom earcons.
1132      *
1133      * @see #playEarcon(String, int, HashMap)
1134      *
1135      * @param earcon
1136      *            The name of the earcon.
1137      *            Example: <code>"[tick]"</code>
1138      * @param uri
1139      *            Uri object pointing to the sound file.
1140      *
1141      * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
1142      */
addEarcon(@onNull String earcon, @NonNull Uri uri)1143     public int addEarcon(@NonNull String earcon, @NonNull Uri uri) {
1144         synchronized(mStartLock) {
1145             mEarcons.put(earcon, uri);
1146             return SUCCESS;
1147         }
1148     }
1149 
makeResourceUri(String packageName, int resourceId)1150     private Uri makeResourceUri(String packageName, int resourceId) {
1151         return new Uri.Builder()
1152                 .scheme(ContentResolver.SCHEME_ANDROID_RESOURCE)
1153                 .encodedAuthority(packageName)
1154                 .appendEncodedPath(String.valueOf(resourceId))
1155                 .build();
1156     }
1157 
1158     /**
1159      * Speaks the text using the specified queuing strategy and speech parameters, the text may
1160      * be spanned with TtsSpans.
1161      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1162      * requests and then returns. The synthesis might not have finished (or even started!) at the
1163      * time when this method returns. In order to reliably detect errors during synthesis,
1164      * we recommend setting an utterance progress listener (see
1165      * {@link #setOnUtteranceProgressListener}) and using the
1166      * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1167      *
1168      * @param text The string of text to be spoken. No longer than
1169      *            {@link #getMaxSpeechInputLength()} characters.
1170      * @param queueMode The queuing strategy to use, {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1171      * @param params Parameters for the request. Can be null.
1172      *            Supported parameter names:
1173      *            {@link Engine#KEY_PARAM_STREAM},
1174      *            {@link Engine#KEY_PARAM_VOLUME},
1175      *            {@link Engine#KEY_PARAM_PAN}.
1176      *            Engine specific parameters may be passed in but the parameter keys
1177      *            must be prefixed by the name of the engine they are intended for. For example
1178      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1179      *            engine named "com.svox.pico" if it is being used.
1180      * @param utteranceId An unique identifier for this request.
1181      *
1182      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the speak operation.
1183      */
speak(final CharSequence text, final int queueMode, final Bundle params, final String utteranceId)1184     public int speak(final CharSequence text,
1185                      final int queueMode,
1186                      final Bundle params,
1187                      final String utteranceId) {
1188         return runAction((ITextToSpeechService service) -> {
1189             Uri utteranceUri = mUtterances.get(text);
1190             if (utteranceUri != null) {
1191                 return service.playAudio(getCallerIdentity(), utteranceUri, queueMode,
1192                         getParams(params), utteranceId);
1193             } else {
1194                 return service.speak(getCallerIdentity(), text, queueMode, getParams(params),
1195                         utteranceId);
1196             }
1197         }, ERROR, "speak");
1198     }
1199 
1200     /**
1201      * Speaks the string using the specified queuing strategy and speech parameters.
1202      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1203      * requests and then returns. The synthesis might not have finished (or even started!) at the
1204      * time when this method returns. In order to reliably detect errors during synthesis,
1205      * we recommend setting an utterance progress listener (see
1206      * {@link #setOnUtteranceProgressListener}) and using the
1207      * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1208      *
1209      * @param text The string of text to be spoken. No longer than
1210      *            {@link #getMaxSpeechInputLength()} characters.
1211      * @param queueMode The queuing strategy to use, {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1212      * @param params Parameters for the request. Can be null.
1213      *            Supported parameter names:
1214      *            {@link Engine#KEY_PARAM_STREAM},
1215      *            {@link Engine#KEY_PARAM_UTTERANCE_ID},
1216      *            {@link Engine#KEY_PARAM_VOLUME},
1217      *            {@link Engine#KEY_PARAM_PAN}.
1218      *            Engine specific parameters may be passed in but the parameter keys
1219      *            must be prefixed by the name of the engine they are intended for. For example
1220      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1221      *            engine named "com.svox.pico" if it is being used.
1222      *
1223      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the speak operation.
1224      * @deprecated As of API level 21, replaced by
1225      *         {@link #speak(CharSequence, int, Bundle, String)}.
1226      */
1227     @Deprecated
speak(final String text, final int queueMode, final HashMap<String, String> params)1228     public int speak(final String text, final int queueMode, final HashMap<String, String> params) {
1229         return speak(text, queueMode, convertParamsHashMaptoBundle(params),
1230                      params == null ? null : params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1231     }
1232 
1233     /**
1234      * Plays the earcon using the specified queueing mode and parameters.
1235      * The earcon must already have been added with {@link #addEarcon(String, String)} or
1236      * {@link #addEarcon(String, String, int)}.
1237      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1238      * requests and then returns. The synthesis might not have finished (or even started!) at the
1239      * time when this method returns. In order to reliably detect errors during synthesis,
1240      * we recommend setting an utterance progress listener (see
1241      * {@link #setOnUtteranceProgressListener}) and using the
1242      * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1243      *
1244      * @param earcon The earcon that should be played
1245      * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1246      * @param params Parameters for the request. Can be null.
1247      *            Supported parameter names:
1248      *            {@link Engine#KEY_PARAM_STREAM},
1249      *            Engine specific parameters may be passed in but the parameter keys
1250      *            must be prefixed by the name of the engine they are intended for. For example
1251      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1252      *            engine named "com.svox.pico" if it is being used.
1253      *
1254      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playEarcon operation.
1255      */
playEarcon(final String earcon, final int queueMode, final Bundle params, final String utteranceId)1256     public int playEarcon(final String earcon, final int queueMode,
1257             final Bundle params, final String utteranceId) {
1258         return runAction((ITextToSpeechService service) -> {
1259             Uri earconUri = mEarcons.get(earcon);
1260             if (earconUri == null) {
1261                 return ERROR;
1262             }
1263             return service.playAudio(getCallerIdentity(), earconUri, queueMode,
1264                     getParams(params), utteranceId);
1265         }, ERROR, "playEarcon");
1266     }
1267 
1268     /**
1269      * Plays the earcon using the specified queueing mode and parameters.
1270      * The earcon must already have been added with {@link #addEarcon(String, String)} or
1271      * {@link #addEarcon(String, String, int)}.
1272      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1273      * requests and then returns. The synthesis might not have finished (or even started!) at the
1274      * time when this method returns. In order to reliably detect errors during synthesis,
1275      * we recommend setting an utterance progress listener (see
1276      * {@link #setOnUtteranceProgressListener}) and using the
1277      * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1278      *
1279      * @param earcon The earcon that should be played
1280      * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1281      * @param params Parameters for the request. Can be null.
1282      *            Supported parameter names:
1283      *            {@link Engine#KEY_PARAM_STREAM},
1284      *            {@link Engine#KEY_PARAM_UTTERANCE_ID}.
1285      *            Engine specific parameters may be passed in but the parameter keys
1286      *            must be prefixed by the name of the engine they are intended for. For example
1287      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1288      *            engine named "com.svox.pico" if it is being used.
1289      *
1290      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playEarcon operation.
1291      * @deprecated As of API level 21, replaced by
1292      *         {@link #playEarcon(String, int, Bundle, String)}.
1293      */
1294     @Deprecated
1295     public int playEarcon(final String earcon, final int queueMode,
1296             final HashMap<String, String> params) {
1297         return playEarcon(earcon, queueMode, convertParamsHashMaptoBundle(params),
1298                           params == null ? null : params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1299     }
1300 
1301     /**
1302      * Plays silence for the specified amount of time using the specified
1303      * queue mode.
1304      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1305      * requests and then returns. The synthesis might not have finished (or even started!) at the
1306      * time when this method returns. In order to reliably detect errors during synthesis,
1307      * we recommend setting an utterance progress listener (see
1308      * {@link #setOnUtteranceProgressListener}) and using the
1309      * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1310      *
1311      * @param durationInMs The duration of the silence.
1312      * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1313      * @param utteranceId An unique identifier for this request.
1314      *
1315      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playSilentUtterance operation.
1316      */
1317     public int playSilentUtterance(final long durationInMs, final int queueMode,
1318             final String utteranceId) {
1319         return runAction((ITextToSpeechService service) -> {
1320             return service.playSilence(getCallerIdentity(), durationInMs,
1321                                         queueMode, utteranceId);
1322         }, ERROR, "playSilentUtterance");
1323     }
1324 
1325     /**
1326      * Plays silence for the specified amount of time using the specified
1327      * queue mode.
1328      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1329      * requests and then returns. The synthesis might not have finished (or even started!) at the
1330      * time when this method returns. In order to reliably detect errors during synthesis,
1331      * we recommend setting an utterance progress listener (see
1332      * {@link #setOnUtteranceProgressListener}) and using the
1333      * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1334      *
1335      * @param durationInMs The duration of the silence.
1336      * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1337      * @param params Parameters for the request. Can be null.
1338      *            Supported parameter names:
1339      *            {@link Engine#KEY_PARAM_UTTERANCE_ID}.
1340      *            Engine specific parameters may be passed in but the parameter keys
1341      *            must be prefixed by the name of the engine they are intended for. For example
1342      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1343      *            engine named "com.svox.pico" if it is being used.
1344      *
1345      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playSilence operation.
1346      * @deprecated As of API level 21, replaced by
1347      *         {@link #playSilentUtterance(long, int, String)}.
1348      */
1349     @Deprecated
1350     public int playSilence(final long durationInMs, final int queueMode,
1351             final HashMap<String, String> params) {
1352         return playSilentUtterance(durationInMs, queueMode,
1353                            params == null ? null : params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1354     }
1355 
1356     /**
1357      * Queries the engine for the set of features it supports for a given locale.
1358      * Features can either be framework defined, e.g.
1359      * {@link TextToSpeech.Engine#KEY_FEATURE_NETWORK_SYNTHESIS} or engine specific.
1360      * Engine specific keys must be prefixed by the name of the engine they
1361      * are intended for. These keys can be used as parameters to
1362      * {@link TextToSpeech#speak(String, int, java.util.HashMap)} and
1363      * {@link TextToSpeech#synthesizeToFile(String, java.util.HashMap, String)}.
1364      *
1365      * Features values are strings and their values must meet restrictions described in their
1366      * documentation.
1367      *
1368      * @param locale The locale to query features for.
1369      * @return Set instance. May return {@code null} on error.
1370      * @deprecated As of API level 21, please use voices. In order to query features of the voice,
1371      * call {@link #getVoices()} to retrieve the list of available voices and
1372      * {@link Voice#getFeatures()} to retrieve the set of features.
1373      */
1374     @Deprecated
1375     public Set<String> getFeatures(final Locale locale) {
1376         return runAction((ITextToSpeechService service) -> {
1377             String[] features = null;
1378             try {
1379                 features = service.getFeaturesForLanguage(
1380                     locale.getISO3Language(), locale.getISO3Country(), locale.getVariant());
1381             } catch (MissingResourceException e) {
1382                 Log.w(TAG, "Couldn't retrieve 3 letter ISO 639-2/T language and/or ISO 3166 "
1383                         + "country code for locale: " + locale, e);
1384                 return null;
1385             }
1386 
1387             if (features != null) {
1388                 final Set<String> featureSet = new HashSet<String>();
1389                 Collections.addAll(featureSet, features);
1390                 return featureSet;
1391             }
1392             return null;
1393         }, null, "getFeatures");
1394     }
1395 
1396     /**
1397      * Checks whether the TTS engine is busy speaking. Note that a speech item is
1398      * considered complete once it's audio data has been sent to the audio mixer, or
1399      * written to a file. There might be a finite lag between this point, and when
1400      * the audio hardware completes playback.
1401      *
1402      * @return {@code true} if the TTS engine is speaking.
1403      */
1404     public boolean isSpeaking() {
1405         return runAction((ITextToSpeechService service) -> {
1406             return service.isSpeaking();
1407         }, false, "isSpeaking");
1408     }
1409 
1410     /**
1411      * Interrupts the current utterance (whether played or rendered to file) and discards other
1412      * utterances in the queue.
1413      *
1414      * @return {@link #ERROR} or {@link #SUCCESS}.
1415      */
1416     public int stop() {
1417         return runAction((ITextToSpeechService service) -> {
1418             return service.stop(getCallerIdentity());
1419         }, ERROR, "stop");
1420     }
1421 
1422     /**
1423      * Sets the speech rate.
1424      *
1425      * This has no effect on any pre-recorded speech.
1426      *
1427      * @param speechRate Speech rate. {@code 1.0} is the normal speech rate,
1428      *            lower values slow down the speech ({@code 0.5} is half the normal speech rate),
1429      *            greater values accelerate it ({@code 2.0} is twice the normal speech rate).
1430      *
1431      * @return {@link #ERROR} or {@link #SUCCESS}.
1432      */
1433     public int setSpeechRate(float speechRate) {
1434         if (speechRate > 0.0f) {
1435             int intRate = (int)(speechRate * 100);
1436             if (intRate > 0) {
1437                 synchronized (mStartLock) {
1438                     mParams.putInt(Engine.KEY_PARAM_RATE, intRate);
1439                 }
1440                 return SUCCESS;
1441             }
1442         }
1443         return ERROR;
1444     }
1445 
1446     /**
1447      * Sets the speech pitch for the TextToSpeech engine.
1448      *
1449      * This has no effect on any pre-recorded speech.
1450      *
1451      * @param pitch Speech pitch. {@code 1.0} is the normal pitch,
1452      *            lower values lower the tone of the synthesized voice,
1453      *            greater values increase it.
1454      *
1455      * @return {@link #ERROR} or {@link #SUCCESS}.
1456      */
1457     public int setPitch(float pitch) {
1458         if (pitch > 0.0f) {
1459             int intPitch = (int)(pitch * 100);
1460             if (intPitch > 0) {
1461                 synchronized (mStartLock) {
1462                     mParams.putInt(Engine.KEY_PARAM_PITCH, intPitch);
1463                 }
1464                 return SUCCESS;
1465             }
1466         }
1467         return ERROR;
1468     }
1469 
1470     /**
1471      * Sets the audio attributes to be used when speaking text or playing
1472      * back a file.
1473      *
1474      * @param audioAttributes Valid AudioAttributes instance.
1475      *
1476      * @return {@link #ERROR} or {@link #SUCCESS}.
1477      */
1478     public int setAudioAttributes(AudioAttributes audioAttributes) {
1479         if (audioAttributes != null) {
1480             synchronized (mStartLock) {
1481                 mParams.putParcelable(Engine.KEY_PARAM_AUDIO_ATTRIBUTES,
1482                     audioAttributes);
1483             }
1484             return SUCCESS;
1485         }
1486         return ERROR;
1487     }
1488 
1489     /**
1490      * @return the engine currently in use by this TextToSpeech instance.
1491      * @hide
1492      */
1493     @UnsupportedAppUsage
1494     public String getCurrentEngine() {
1495         return mCurrentEngine;
1496     }
1497 
1498     /**
1499      * Returns a Locale instance describing the language currently being used as the default
1500      * Text-to-speech language.
1501      *
1502      * The locale object returned by this method is NOT a valid one. It has identical form to the
1503      * one in {@link #getLanguage()}. Please refer to {@link #getLanguage()} for more information.
1504      *
1505      * @return language, country (if any) and variant (if any) used by the client stored in a
1506      *     Locale instance, or {@code null} on error.
1507      * @deprecated As of API level 21, use <code>getDefaultVoice().getLocale()</code> ({@link
1508      *   #getDefaultVoice()})
1509      */
1510     @Deprecated
1511     public Locale getDefaultLanguage() {
1512         return runAction((ITextToSpeechService service) -> {
1513             String[] defaultLanguage = service.getClientDefaultLanguage();
1514 
1515             return new Locale(defaultLanguage[0], defaultLanguage[1], defaultLanguage[2]);
1516         }, null, "getDefaultLanguage");
1517     }
1518 
1519     /**
1520      * Sets the text-to-speech language.
1521      * The TTS engine will try to use the closest match to the specified
1522      * language as represented by the Locale, but there is no guarantee that the exact same Locale
1523      * will be used. Use {@link #isLanguageAvailable(Locale)} to check the level of support
1524      * before choosing the language to use for the next utterances.
1525      *
1526      * This method sets the current voice to the default one for the given Locale;
1527      * {@link #getVoice()} can be used to retrieve it.
1528      *
1529      * @param loc The locale describing the language to be used.
1530      *
1531      * @return Code indicating the support status for the locale. See {@link #LANG_AVAILABLE},
1532      *         {@link #LANG_COUNTRY_AVAILABLE}, {@link #LANG_COUNTRY_VAR_AVAILABLE},
1533      *         {@link #LANG_MISSING_DATA} and {@link #LANG_NOT_SUPPORTED}.
1534      */
1535     public int setLanguage(final Locale loc) {
1536         return runAction((ITextToSpeechService service) -> {
1537             if (loc == null) {
1538                 return LANG_NOT_SUPPORTED;
1539             }
1540             String language = null, country = null;
1541             try {
1542                 language = loc.getISO3Language();
1543             } catch (MissingResourceException e) {
1544                 Log.w(TAG, "Couldn't retrieve ISO 639-2/T language code for locale: " + loc, e);
1545                 return LANG_NOT_SUPPORTED;
1546             }
1547 
1548             try {
1549                 country = loc.getISO3Country();
1550             } catch (MissingResourceException e) {
1551                 Log.w(TAG, "Couldn't retrieve ISO 3166 country code for locale: " + loc, e);
1552                 return LANG_NOT_SUPPORTED;
1553             }
1554 
1555             String variant = loc.getVariant();
1556 
1557             // As of API level 21, setLanguage is implemented using setVoice.
1558             // (which, in the default implementation, will call loadLanguage on the service
1559             // interface).
1560 
1561             // Sanitize locale using isLanguageAvailable.
1562             int result = service.isLanguageAvailable(language, country, variant);
1563             if (result >= LANG_AVAILABLE) {
1564                 // Get the default voice for the locale.
1565                 String voiceName = service.getDefaultVoiceNameFor(language, country, variant);
1566                 if (TextUtils.isEmpty(voiceName)) {
1567                     Log.w(TAG, "Couldn't find the default voice for " + language + "-"
1568                             + country + "-" + variant);
1569                     return LANG_NOT_SUPPORTED;
1570                 }
1571 
1572                 // Load it.
1573                 if (service.loadVoice(getCallerIdentity(), voiceName) == TextToSpeech.ERROR) {
1574                     Log.w(TAG, "The service claimed " + language + "-" + country + "-"
1575                             + variant + " was available with voice name " + voiceName
1576                             + " but loadVoice returned ERROR");
1577                     return LANG_NOT_SUPPORTED;
1578                 }
1579 
1580                 // Set the language/country/variant of the voice, so #getLanguage will return
1581                 // the currently set voice locale when called.
1582                 Voice voice = getVoice(service, voiceName);
1583                 if (voice == null) {
1584                     Log.w(TAG, "getDefaultVoiceNameFor returned " + voiceName + " for locale "
1585                             + language + "-" + country + "-" + variant
1586                             + " but getVoice returns null");
1587                     return LANG_NOT_SUPPORTED;
1588                 }
1589                 String voiceLanguage = "";
1590                 try {
1591                     voiceLanguage = voice.getLocale().getISO3Language();
1592                 } catch (MissingResourceException e) {
1593                     Log.w(TAG, "Couldn't retrieve ISO 639-2/T language code for locale: "
1594                             + voice.getLocale(), e);
1595                 }
1596 
1597                 String voiceCountry = "";
1598                 try {
1599                     voiceCountry = voice.getLocale().getISO3Country();
1600                 } catch (MissingResourceException e) {
1601                     Log.w(TAG, "Couldn't retrieve ISO 3166 country code for locale: "
1602                             + voice.getLocale(), e);
1603                 }
1604                 mParams.putString(Engine.KEY_PARAM_VOICE_NAME, voiceName);
1605                 mParams.putString(Engine.KEY_PARAM_LANGUAGE, voiceLanguage);
1606                 mParams.putString(Engine.KEY_PARAM_COUNTRY, voiceCountry);
1607                 mParams.putString(Engine.KEY_PARAM_VARIANT, voice.getLocale().getVariant());
1608             }
1609             return result;
1610         }, LANG_NOT_SUPPORTED, "setLanguage");
1611     }
1612 
1613     /**
1614      * Returns a Locale instance describing the language currently being used for synthesis
1615      * requests sent to the TextToSpeech engine.
1616      *
1617      * In Android 4.2 and before (API <= 17) this function returns the language that is currently
1618      * being used by the TTS engine. That is the last language set by this or any other
1619      * client by a {@link TextToSpeech#setLanguage} call to the same engine.
1620      *
1621      * In Android versions after 4.2 this function returns the language that is currently being
1622      * used for the synthesis requests sent from this client. That is the last language set
1623      * by a {@link TextToSpeech#setLanguage} call on this instance.
1624      *
1625      * If a voice is set (by {@link #setVoice(Voice)}), getLanguage will return the language of
1626      * the currently set voice.
1627      *
1628      * Please note that the Locale object returned by this method is NOT a valid Locale object. Its
1629      * language field contains a three-letter ISO 639-2/T code (where a proper Locale would use
1630      * a two-letter ISO 639-1 code), and the country field contains a three-letter ISO 3166 country
1631      * code (where a proper Locale would use a two-letter ISO 3166-1 code).
1632      *
1633      * @return language, country (if any) and variant (if any) used by the client stored in a
1634      *     Locale instance, or {@code null} on error.
1635      *
1636      * @deprecated As of API level 21, please use <code>getVoice().getLocale()</code>
1637      * ({@link #getVoice()}).
1638      */
1639     @Deprecated
1640     public Locale getLanguage() {
1641         return runAction((ITextToSpeechService service) -> {
1642             /* No service call, but we're accessing mParams, hence need for
1643                wrapping it as an Action instance */
1644             String lang = mParams.getString(Engine.KEY_PARAM_LANGUAGE, "");
1645             String country = mParams.getString(Engine.KEY_PARAM_COUNTRY, "");
1646             String variant = mParams.getString(Engine.KEY_PARAM_VARIANT, "");
1647             return new Locale(lang, country, variant);
1648         }, null, "getLanguage");
1649     }
1650 
1651     /**
1652      * Query the engine about the set of available languages.
1653      */
1654     public Set<Locale> getAvailableLanguages() {
1655         return runAction((ITextToSpeechService service) -> {
1656             List<Voice> voices = service.getVoices();
1657             if (voices == null) {
1658                 return new HashSet<Locale>();
1659             }
1660             HashSet<Locale> locales = new HashSet<Locale>();
1661             for (Voice voice : voices) {
1662                 locales.add(voice.getLocale());
1663             }
1664             return locales;
1665         }, null, "getAvailableLanguages");
1666     }
1667 
1668     /**
1669      * Query the engine about the set of available voices.
1670      *
1671      * Each TTS Engine can expose multiple voices for each locale, each with a different set of
1672      * features.
1673      *
1674      * @see #setVoice(Voice)
1675      * @see Voice
1676      */
1677     public Set<Voice> getVoices() {
1678         return runAction((ITextToSpeechService service) -> {
1679             List<Voice> voices = service.getVoices();
1680             return (voices != null)  ? new HashSet<Voice>(voices) : new HashSet<Voice>();
1681         }, null, "getVoices");
1682     }
1683 
1684     /**
1685      * Sets the text-to-speech voice.
1686      *
1687      * @param voice One of objects returned by {@link #getVoices()}.
1688      *
1689      * @return {@link #ERROR} or {@link #SUCCESS}.
1690      *
1691      * @see #getVoices
1692      * @see Voice
1693      */
1694     public int setVoice(final Voice voice) {
1695         return runAction((ITextToSpeechService service) -> {
1696             int result = service.loadVoice(getCallerIdentity(), voice.getName());
1697             if (result == SUCCESS) {
1698                 mParams.putString(Engine.KEY_PARAM_VOICE_NAME, voice.getName());
1699 
1700                 // Set the language/country/variant, so #getLanguage will return the voice
1701                 // locale when called.
1702                 String language = "";
1703                 try {
1704                     language = voice.getLocale().getISO3Language();
1705                 } catch (MissingResourceException e) {
1706                     Log.w(TAG, "Couldn't retrieve ISO 639-2/T language code for locale: "
1707                             + voice.getLocale(), e);
1708                 }
1709 
1710                 String country = "";
1711                 try {
1712                     country = voice.getLocale().getISO3Country();
1713                 } catch (MissingResourceException e) {
1714                     Log.w(TAG, "Couldn't retrieve ISO 3166 country code for locale: "
1715                             + voice.getLocale(), e);
1716                 }
1717                 mParams.putString(Engine.KEY_PARAM_LANGUAGE, language);
1718                 mParams.putString(Engine.KEY_PARAM_COUNTRY, country);
1719                 mParams.putString(Engine.KEY_PARAM_VARIANT, voice.getLocale().getVariant());
1720             }
1721             return result;
1722         }, LANG_NOT_SUPPORTED, "setVoice");
1723     }
1724 
1725     /**
1726      * Returns a Voice instance describing the voice currently being used for synthesis
1727      * requests sent to the TextToSpeech engine.
1728      *
1729      * @return Voice instance used by the client, or {@code null} if not set or on error.
1730      *
1731      * @see #getVoices
1732      * @see #setVoice
1733      * @see Voice
1734      */
1735     public Voice getVoice() {
1736         return runAction((ITextToSpeechService service) -> {
1737             String voiceName = mParams.getString(Engine.KEY_PARAM_VOICE_NAME, "");
1738             if (TextUtils.isEmpty(voiceName)) {
1739                 return null;
1740             }
1741             return getVoice(service, voiceName);
1742         }, null, "getVoice");
1743     }
1744 
1745 
1746     /**
1747      * Returns a Voice instance of the voice with the given voice name.
1748      *
1749      * @return Voice instance with the given voice name, or {@code null} if not set or on error.
1750      *
1751      * @see Voice
1752      */
1753     private Voice getVoice(ITextToSpeechService service, String voiceName) throws RemoteException {
1754         List<Voice> voices = service.getVoices();
1755         if (voices == null) {
1756             Log.w(TAG, "getVoices returned null");
1757             return null;
1758         }
1759         for (Voice voice : voices) {
1760             if (voice.getName().equals(voiceName)) {
1761                 return voice;
1762             }
1763         }
1764         Log.w(TAG, "Could not find voice " + voiceName + " in voice list");
1765         return null;
1766     }
1767 
1768     /**
1769      * Returns a Voice instance that's the default voice for the default Text-to-speech language.
1770      * @return The default voice instance for the default language, or {@code null} if not set or
1771      *     on error.
1772      */
1773     public Voice getDefaultVoice() {
1774         return runAction((ITextToSpeechService service) -> {
1775 
1776             String[] defaultLanguage = service.getClientDefaultLanguage();
1777 
1778             if (defaultLanguage == null || defaultLanguage.length == 0) {
1779                 Log.e(TAG, "service.getClientDefaultLanguage() returned empty array");
1780                 return null;
1781             }
1782             String language = defaultLanguage[0];
1783             String country = (defaultLanguage.length > 1) ? defaultLanguage[1] : "";
1784             String variant = (defaultLanguage.length > 2) ? defaultLanguage[2] : "";
1785 
1786             // Sanitize the locale using isLanguageAvailable.
1787             int result = service.isLanguageAvailable(language, country, variant);
1788             if (result < LANG_AVAILABLE) {
1789                 // The default language is not supported.
1790                 return null;
1791             }
1792 
1793             // Get the default voice name
1794             String voiceName = service.getDefaultVoiceNameFor(language, country, variant);
1795             if (TextUtils.isEmpty(voiceName)) {
1796                 return null;
1797             }
1798 
1799             // Find it
1800             List<Voice> voices = service.getVoices();
1801             if (voices == null) {
1802                 return null;
1803             }
1804             for (Voice voice : voices) {
1805                 if (voice.getName().equals(voiceName)) {
1806                     return voice;
1807                 }
1808             }
1809             return null;
1810         }, null, "getDefaultVoice");
1811     }
1812 
1813 
1814 
1815     /**
1816      * Checks if the specified language as represented by the Locale is available and supported.
1817      *
1818      * @param loc The Locale describing the language to be used.
1819      *
1820      * @return Code indicating the support status for the locale. See {@link #LANG_AVAILABLE},
1821      *         {@link #LANG_COUNTRY_AVAILABLE}, {@link #LANG_COUNTRY_VAR_AVAILABLE},
1822      *         {@link #LANG_MISSING_DATA} and {@link #LANG_NOT_SUPPORTED}.
1823      */
1824     public int isLanguageAvailable(final Locale loc) {
1825         return runAction((ITextToSpeechService service) -> {
1826             String language = null, country = null;
1827 
1828             try {
1829                 language = loc.getISO3Language();
1830             } catch (MissingResourceException e) {
1831                 Log.w(TAG, "Couldn't retrieve ISO 639-2/T language code for locale: " + loc, e);
1832                 return LANG_NOT_SUPPORTED;
1833             }
1834 
1835             try {
1836                 country = loc.getISO3Country();
1837             } catch (MissingResourceException e) {
1838                 Log.w(TAG, "Couldn't retrieve ISO 3166 country code for locale: " + loc, e);
1839                 return LANG_NOT_SUPPORTED;
1840             }
1841 
1842             return service.isLanguageAvailable(language, country, loc.getVariant());
1843         }, LANG_NOT_SUPPORTED, "isLanguageAvailable");
1844     }
1845 
1846     /**
1847      * Synthesizes the given text to a ParcelFileDescriptor using the specified parameters.
1848      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1849      * requests and then returns. The synthesis might not have finished (or even started!) at the
1850      * time when this method returns. In order to reliably detect errors during synthesis,
1851      * we recommend setting an utterance progress listener (see
1852      * {@link #setOnUtteranceProgressListener}).
1853      *
1854      * @param text The text that should be synthesized. No longer than
1855      *            {@link #getMaxSpeechInputLength()} characters.
1856      * @param params Parameters for the request.
1857      *            Engine specific parameters may be passed in but the parameter keys
1858      *            must be prefixed by the name of the engine they are intended for. For example
1859      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the engine
1860      *            named "com.svox.pico" if it is being used.
1861      * @param fileDescriptor ParcelFileDescriptor to write the generated audio data to.
1862      * @param utteranceId An unique identifier for this request.
1863      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the synthesizeToFile operation.
1864      */
1865     public int synthesizeToFile(@NonNull final CharSequence text, @NonNull final Bundle params,
1866             @NonNull final ParcelFileDescriptor fileDescriptor, @NonNull final String utteranceId) {
1867         return runAction((ITextToSpeechService service) -> {
1868             return service.synthesizeToFileDescriptor(getCallerIdentity(), text,
1869                     fileDescriptor, getParams(params), utteranceId);
1870         }, ERROR, "synthesizeToFile");
1871     }
1872 
1873     /**
1874      * Synthesizes the given text to a file using the specified parameters.
1875      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1876      * requests and then returns. The synthesis might not have finished (or even started!) at the
1877      * time when this method returns. In order to reliably detect errors during synthesis,
1878      * we recommend setting an utterance progress listener (see
1879      * {@link #setOnUtteranceProgressListener}).
1880      *
1881      * @param text The text that should be synthesized. No longer than
1882      *            {@link #getMaxSpeechInputLength()} characters.
1883      * @param params Parameters for the request. Cannot be null.
1884      *            Engine specific parameters may be passed in but the parameter keys
1885      *            must be prefixed by the name of the engine they are intended for. For example
1886      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1887      *            engine named "com.svox.pico" if it is being used.
1888      * @param file File to write the generated audio data to.
1889      * @param utteranceId An unique identifier for this request.
1890      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the synthesizeToFile operation.
1891      */
1892     public int synthesizeToFile(final CharSequence text, final Bundle params,
1893             final File file, final String utteranceId) {
1894         if (file.exists() && !file.canWrite()) {
1895             Log.e(TAG, "Can't write to " + file);
1896             return ERROR;
1897         }
1898         try (
1899             ParcelFileDescriptor fileDescriptor = ParcelFileDescriptor.open(file,
1900                 ParcelFileDescriptor.MODE_WRITE_ONLY
1901                 | ParcelFileDescriptor.MODE_CREATE
1902                 | ParcelFileDescriptor.MODE_TRUNCATE);
1903         ) {
1904             int returnValue = synthesizeToFile(text, params, fileDescriptor, utteranceId);
1905             fileDescriptor.close();
1906             return returnValue;
1907         } catch (FileNotFoundException e) {
1908             Log.e(TAG, "Opening file " + file + " failed", e);
1909             return ERROR;
1910         } catch (IOException e) {
1911             Log.e(TAG, "Closing file " + file + " failed", e);
1912             return ERROR;
1913         }
1914     }
1915 
1916     /**
1917      * Synthesizes the given text to a file using the specified parameters.
1918      * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1919      * requests and then returns. The synthesis might not have finished (or even started!) at the
1920      * time when this method returns. In order to reliably detect errors during synthesis,
1921      * we recommend setting an utterance progress listener (see
1922      * {@link #setOnUtteranceProgressListener}) and using the
1923      * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1924      *
1925      * @param text The text that should be synthesized. No longer than
1926      *            {@link #getMaxSpeechInputLength()} characters.
1927      * @param params Parameters for the request. Cannot be null.
1928      *            Supported parameter names:
1929      *            {@link Engine#KEY_PARAM_UTTERANCE_ID}.
1930      *            Engine specific parameters may be passed in but the parameter keys
1931      *            must be prefixed by the name of the engine they are intended for. For example
1932      *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1933      *            engine named "com.svox.pico" if it is being used.
1934      * @param filename Absolute file filename to write the generated audio data to.It should be
1935      *            something like "/sdcard/myappsounds/mysound.wav".
1936      *
1937      * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the synthesizeToFile operation.
1938      * @deprecated As of API level 21, replaced by
1939      *         {@link #synthesizeToFile(CharSequence, Bundle, File, String)}.
1940      */
1941     @Deprecated
1942     public int synthesizeToFile(final String text, final HashMap<String, String> params,
1943             final String filename) {
1944         return synthesizeToFile(text, convertParamsHashMaptoBundle(params),
1945                 new File(filename), params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1946     }
1947 
1948     private Bundle convertParamsHashMaptoBundle(HashMap<String, String> params) {
1949         if (params != null && !params.isEmpty()) {
1950             Bundle bundle = new Bundle();
1951             copyIntParam(bundle, params, Engine.KEY_PARAM_STREAM);
1952             copyIntParam(bundle, params, Engine.KEY_PARAM_SESSION_ID);
1953             copyStringParam(bundle, params, Engine.KEY_PARAM_UTTERANCE_ID);
1954             copyFloatParam(bundle, params, Engine.KEY_PARAM_VOLUME);
1955             copyFloatParam(bundle, params, Engine.KEY_PARAM_PAN);
1956 
1957             // Copy feature strings defined by the framework.
1958             copyStringParam(bundle, params, Engine.KEY_FEATURE_NETWORK_SYNTHESIS);
1959             copyStringParam(bundle, params, Engine.KEY_FEATURE_EMBEDDED_SYNTHESIS);
1960             copyIntParam(bundle, params, Engine.KEY_FEATURE_NETWORK_TIMEOUT_MS);
1961             copyIntParam(bundle, params, Engine.KEY_FEATURE_NETWORK_RETRIES_COUNT);
1962 
1963             // Copy over all parameters that start with the name of the
1964             // engine that we are currently connected to. The engine is
1965             // free to interpret them as it chooses.
1966             if (!TextUtils.isEmpty(mCurrentEngine)) {
1967                 for (Map.Entry<String, String> entry : params.entrySet()) {
1968                     final String key = entry.getKey();
1969                     if (key != null && key.startsWith(mCurrentEngine)) {
1970                         bundle.putString(key, entry.getValue());
1971                     }
1972                 }
1973             }
1974 
1975             return bundle;
1976         }
1977         return null;
1978     }
1979 
1980     private Bundle getParams(Bundle params) {
1981         if (params != null && !params.isEmpty()) {
1982             Bundle bundle = new Bundle(mParams);
1983             bundle.putAll(params);
1984 
1985             verifyIntegerBundleParam(bundle, Engine.KEY_PARAM_STREAM);
1986             verifyIntegerBundleParam(bundle, Engine.KEY_PARAM_SESSION_ID);
1987             verifyStringBundleParam(bundle, Engine.KEY_PARAM_UTTERANCE_ID);
1988             verifyFloatBundleParam(bundle, Engine.KEY_PARAM_VOLUME);
1989             verifyFloatBundleParam(bundle, Engine.KEY_PARAM_PAN);
1990 
1991             // Copy feature strings defined by the framework.
1992             verifyBooleanBundleParam(bundle, Engine.KEY_FEATURE_NETWORK_SYNTHESIS);
1993             verifyBooleanBundleParam(bundle, Engine.KEY_FEATURE_EMBEDDED_SYNTHESIS);
1994             verifyIntegerBundleParam(bundle, Engine.KEY_FEATURE_NETWORK_TIMEOUT_MS);
1995             verifyIntegerBundleParam(bundle, Engine.KEY_FEATURE_NETWORK_RETRIES_COUNT);
1996 
1997             return bundle;
1998         } else {
1999             return mParams;
2000         }
2001     }
2002 
2003     private static boolean verifyIntegerBundleParam(Bundle bundle, String key) {
2004         if (bundle.containsKey(key)) {
2005             if (!(bundle.get(key) instanceof Integer ||
2006                     bundle.get(key) instanceof Long)) {
2007                 bundle.remove(key);
2008                 Log.w(TAG, "Synthesis request paramter " + key + " containst value "
2009                         + " with invalid type. Should be an Integer or a Long");
2010                 return false;
2011             }
2012         }
2013         return true;
2014     }
2015 
2016     private static boolean verifyStringBundleParam(Bundle bundle, String key) {
2017         if (bundle.containsKey(key)) {
2018             if (!(bundle.get(key) instanceof String)) {
2019                 bundle.remove(key);
2020                 Log.w(TAG, "Synthesis request paramter " + key + " containst value "
2021                         + " with invalid type. Should be a String");
2022                 return false;
2023             }
2024         }
2025         return true;
2026     }
2027 
2028     private static boolean verifyBooleanBundleParam(Bundle bundle, String key) {
2029         if (bundle.containsKey(key)) {
2030             if (!(bundle.get(key) instanceof Boolean ||
2031                     bundle.get(key) instanceof String)) {
2032                 bundle.remove(key);
2033                 Log.w(TAG, "Synthesis request paramter " + key + " containst value "
2034                         + " with invalid type. Should be a Boolean or String");
2035                 return false;
2036             }
2037         }
2038         return true;
2039     }
2040 
2041 
2042     private static boolean verifyFloatBundleParam(Bundle bundle, String key) {
2043         if (bundle.containsKey(key)) {
2044             if (!(bundle.get(key) instanceof Float ||
2045                     bundle.get(key) instanceof Double)) {
2046                 bundle.remove(key);
2047                 Log.w(TAG, "Synthesis request paramter " + key + " containst value "
2048                         + " with invalid type. Should be a Float or a Double");
2049                 return false;
2050             }
2051         }
2052         return true;
2053     }
2054 
2055     private void copyStringParam(Bundle bundle, HashMap<String, String> params, String key) {
2056         String value = params.get(key);
2057         if (value != null) {
2058             bundle.putString(key, value);
2059         }
2060     }
2061 
2062     private void copyIntParam(Bundle bundle, HashMap<String, String> params, String key) {
2063         String valueString = params.get(key);
2064         if (!TextUtils.isEmpty(valueString)) {
2065             try {
2066                 int value = Integer.parseInt(valueString);
2067                 bundle.putInt(key, value);
2068             } catch (NumberFormatException ex) {
2069                 // don't set the value in the bundle
2070             }
2071         }
2072     }
2073 
2074     private void copyFloatParam(Bundle bundle, HashMap<String, String> params, String key) {
2075         String valueString = params.get(key);
2076         if (!TextUtils.isEmpty(valueString)) {
2077             try {
2078                 float value = Float.parseFloat(valueString);
2079                 bundle.putFloat(key, value);
2080             } catch (NumberFormatException ex) {
2081                 // don't set the value in the bundle
2082             }
2083         }
2084     }
2085 
2086     /**
2087      * Sets the listener that will be notified when synthesis of an utterance completes.
2088      *
2089      * @param listener The listener to use.
2090      *
2091      * @return {@link #ERROR} or {@link #SUCCESS}.
2092      *
2093      * @deprecated Use {@link #setOnUtteranceProgressListener(UtteranceProgressListener)}
2094      *        instead.
2095      */
2096     @Deprecated
2097     public int setOnUtteranceCompletedListener(final OnUtteranceCompletedListener listener) {
2098         mUtteranceProgressListener = UtteranceProgressListener.from(listener);
2099         return TextToSpeech.SUCCESS;
2100     }
2101 
2102     /**
2103      * Sets the listener that will be notified of various events related to the
2104      * synthesis of a given utterance.
2105      *
2106      * See {@link UtteranceProgressListener} and
2107      * {@link TextToSpeech.Engine#KEY_PARAM_UTTERANCE_ID}.
2108      *
2109      * @param listener the listener to use.
2110      * @return {@link #ERROR} or {@link #SUCCESS}
2111      */
2112     public int setOnUtteranceProgressListener(UtteranceProgressListener listener) {
2113         mUtteranceProgressListener = listener;
2114         return TextToSpeech.SUCCESS;
2115     }
2116 
2117     /**
2118      * Sets the TTS engine to use.
2119      *
2120      * @deprecated This doesn't inform callers when the TTS engine has been
2121      *        initialized. {@link #TextToSpeech(Context, OnInitListener, String)}
2122      *        can be used with the appropriate engine name. Also, there is no
2123      *        guarantee that the engine specified will be loaded. If it isn't
2124      *        installed or disabled, the user / system wide defaults will apply.
2125      *
2126      * @param enginePackageName The package name for the synthesis engine (e.g. "com.svox.pico")
2127      *
2128      * @return {@link #ERROR} or {@link #SUCCESS}.
2129      */
2130     @Deprecated
2131     public int setEngineByPackageName(String enginePackageName) {
2132         mRequestedEngine = enginePackageName;
2133         return initTts();
2134     }
2135 
2136     /**
2137      * Gets the package name of the default speech synthesis engine.
2138      *
2139      * @return Package name of the TTS engine that the user has chosen
2140      *        as their default.
2141      */
2142     public String getDefaultEngine() {
2143         return mEnginesHelper.getDefaultEngine();
2144     }
2145 
2146     /**
2147      * Checks whether the user's settings should override settings requested
2148      * by the calling application. As of the Ice cream sandwich release,
2149      * user settings never forcibly override the app's settings.
2150      */
2151     @Deprecated
2152     public boolean areDefaultsEnforced() {
2153         return false;
2154     }
2155 
2156     /**
2157      * Gets a list of all installed TTS engines.
2158      *
2159      * @return A list of engine info objects. The list can be empty, but never {@code null}.
2160      */
2161     public List<EngineInfo> getEngines() {
2162         return mEnginesHelper.getEngines();
2163     }
2164 
2165     private abstract class Connection implements ServiceConnection {
2166         private ITextToSpeechService mService;
2167 
2168         private SetupConnectionAsyncTask mOnSetupConnectionAsyncTask;
2169 
2170         private boolean mEstablished;
2171 
2172         abstract boolean connect(String engine);
2173 
2174         abstract void disconnect();
2175 
2176         private final ITextToSpeechCallback.Stub mCallback =
2177                 new ITextToSpeechCallback.Stub() {
2178                     public void onStop(String utteranceId, boolean isStarted)
2179                             throws RemoteException {
2180                         UtteranceProgressListener listener = mUtteranceProgressListener;
2181                         if (listener != null) {
2182                             listener.onStop(utteranceId, isStarted);
2183                         }
2184                     };
2185 
2186                     @Override
2187                     public void onSuccess(String utteranceId) {
2188                         UtteranceProgressListener listener = mUtteranceProgressListener;
2189                         if (listener != null) {
2190                             listener.onDone(utteranceId);
2191                         }
2192                     }
2193 
2194                     @Override
2195                     public void onError(String utteranceId, int errorCode) {
2196                         UtteranceProgressListener listener = mUtteranceProgressListener;
2197                         if (listener != null) {
2198                             listener.onError(utteranceId, errorCode);
2199                         }
2200                     }
2201 
2202                     @Override
2203                     public void onStart(String utteranceId) {
2204                         UtteranceProgressListener listener = mUtteranceProgressListener;
2205                         if (listener != null) {
2206                             listener.onStart(utteranceId);
2207                         }
2208                     }
2209 
2210                     @Override
2211                     public void onBeginSynthesis(
2212                             String utteranceId,
2213                             int sampleRateInHz,
2214                             int audioFormat,
2215                             int channelCount) {
2216                         UtteranceProgressListener listener = mUtteranceProgressListener;
2217                         if (listener != null) {
2218                             listener.onBeginSynthesis(
2219                                     utteranceId, sampleRateInHz, audioFormat, channelCount);
2220                         }
2221                     }
2222 
2223                     @Override
2224                     public void onAudioAvailable(String utteranceId, byte[] audio) {
2225                         UtteranceProgressListener listener = mUtteranceProgressListener;
2226                         if (listener != null) {
2227                             listener.onAudioAvailable(utteranceId, audio);
2228                         }
2229                     }
2230 
2231                     @Override
2232                     public void onRangeStart(String utteranceId, int start, int end, int frame) {
2233                         UtteranceProgressListener listener = mUtteranceProgressListener;
2234                         if (listener != null) {
2235                             listener.onRangeStart(utteranceId, start, end, frame);
2236                         }
2237                     }
2238                 };
2239 
2240         private class SetupConnectionAsyncTask extends AsyncTask<Void, Void, Integer> {
2241 
2242             @Override
2243             protected Integer doInBackground(Void... params) {
2244                 synchronized(mStartLock) {
2245                     if (isCancelled()) {
2246                         return null;
2247                     }
2248 
2249                     try {
2250                         mService.setCallback(getCallerIdentity(), mCallback);
2251 
2252                         if (mParams.getString(Engine.KEY_PARAM_LANGUAGE) == null) {
2253                             String[] defaultLanguage = mService.getClientDefaultLanguage();
2254                             mParams.putString(Engine.KEY_PARAM_LANGUAGE, defaultLanguage[0]);
2255                             mParams.putString(Engine.KEY_PARAM_COUNTRY, defaultLanguage[1]);
2256                             mParams.putString(Engine.KEY_PARAM_VARIANT, defaultLanguage[2]);
2257 
2258                             // Get the default voice for the locale.
2259                             String defaultVoiceName = mService.getDefaultVoiceNameFor(
2260                                 defaultLanguage[0], defaultLanguage[1], defaultLanguage[2]);
2261                             mParams.putString(Engine.KEY_PARAM_VOICE_NAME, defaultVoiceName);
2262                         }
2263 
2264                         Log.i(TAG, "Setting up the connection to TTS engine...");
2265                         return SUCCESS;
2266                     } catch (RemoteException re) {
2267                         Log.e(TAG, "Error connecting to service, setCallback() failed");
2268                         return ERROR;
2269                     }
2270                 }
2271             }
2272 
2273             @Override
2274             protected void onPostExecute(Integer result) {
2275                 synchronized(mStartLock) {
2276                     if (mOnSetupConnectionAsyncTask == this) {
2277                         mOnSetupConnectionAsyncTask = null;
2278                     }
2279                     mEstablished = true;
2280                     dispatchOnInit(result);
2281                 }
2282             }
2283         }
2284 
2285         @Override
2286         public void onServiceConnected(ComponentName componentName, IBinder service) {
2287             synchronized(mStartLock) {
2288                 mConnectingServiceConnection = null;
2289 
2290                 Log.i(TAG, "Connected to TTS engine");
2291 
2292                 if (mOnSetupConnectionAsyncTask != null) {
2293                     mOnSetupConnectionAsyncTask.cancel(false);
2294                 }
2295 
2296                 mService = ITextToSpeechService.Stub.asInterface(service);
2297                 mServiceConnection = Connection.this;
2298 
2299                 mEstablished = false;
2300                 mOnSetupConnectionAsyncTask = new SetupConnectionAsyncTask();
2301                 mOnSetupConnectionAsyncTask.execute();
2302             }
2303         }
2304 
2305         public IBinder getCallerIdentity() {
2306             return mCallback;
2307         }
2308 
2309         /**
2310          * Clear connection related fields and cancel mOnServiceConnectedAsyncTask if set.
2311          *
2312          * @return true if we cancel mOnSetupConnectionAsyncTask in progress.
2313          */
2314         protected boolean clearServiceConnection() {
2315             synchronized(mStartLock) {
2316                 boolean result = false;
2317                 if (mOnSetupConnectionAsyncTask != null) {
2318                     result = mOnSetupConnectionAsyncTask.cancel(false);
2319                     mOnSetupConnectionAsyncTask = null;
2320                 }
2321 
2322                 mService = null;
2323                 // If this is the active connection, clear it
2324                 if (mServiceConnection == this) {
2325                     mServiceConnection = null;
2326                 }
2327                 return result;
2328             }
2329         }
2330 
2331         @Override
2332         public void onServiceDisconnected(ComponentName componentName) {
2333             Log.i(TAG, "Disconnected from TTS engine");
2334             if (clearServiceConnection()) {
2335                 /* We need to protect against a rare case where engine
2336                  * dies just after successful connection - and we process onServiceDisconnected
2337                  * before OnServiceConnectedAsyncTask.onPostExecute. onServiceDisconnected cancels
2338                  * OnServiceConnectedAsyncTask.onPostExecute and we don't call dispatchOnInit
2339                  * with ERROR as argument.
2340                  */
2341                 dispatchOnInit(ERROR);
2342             }
2343         }
2344 
2345         public boolean isEstablished() {
2346             return mService != null && mEstablished;
2347         }
2348 
2349         public <R> R runAction(Action<R> action, R errorResult, String method,
2350                 boolean reconnect, boolean onlyEstablishedConnection) {
2351             synchronized (mStartLock) {
2352                 try {
2353                     if (mService == null) {
2354                         Log.w(TAG, method + " failed: not connected to TTS engine");
2355                         return errorResult;
2356                     }
2357                     if (onlyEstablishedConnection && !isEstablished()) {
2358                         Log.w(TAG, method + " failed: TTS engine connection not fully set up");
2359                         return errorResult;
2360                     }
2361                     return action.run(mService);
2362                 } catch (RemoteException ex) {
2363                     Log.e(TAG, method + " failed", ex);
2364                     if (reconnect) {
2365                         disconnect();
2366                         initTts();
2367                     }
2368                     return errorResult;
2369                 }
2370             }
2371         }
2372     }
2373 
2374     // Currently all the clients are routed through the System connection. Direct connection
2375     // is left for debugging, testing and benchmarking purposes.
2376     // TODO(b/179599071): Remove direct connection once system one is fully tested.
2377     private class DirectConnection extends Connection {
2378         @Override
2379         boolean connect(String engine) {
2380             Intent intent = new Intent(Engine.INTENT_ACTION_TTS_SERVICE);
2381             intent.setPackage(engine);
2382             return mContext.bindService(intent, this, Context.BIND_AUTO_CREATE);
2383         }
2384 
2385         @Override
2386         void disconnect() {
2387             mContext.unbindService(this);
2388             clearServiceConnection();
2389         }
2390     }
2391 
2392     private class SystemConnection extends Connection {
2393 
2394         @Nullable
2395         private volatile ITextToSpeechSession mSession;
2396 
2397         @Override
2398         boolean connect(String engine) {
2399             IBinder binder = ServiceManager.getService(Context.TEXT_TO_SPEECH_MANAGER_SERVICE);
2400 
2401             ITextToSpeechManager manager = ITextToSpeechManager.Stub.asInterface(binder);
2402 
2403             if (manager == null) {
2404                 Log.e(TAG, "System service is not available!");
2405                 return false;
2406             }
2407 
2408             if (DEBUG) {
2409                 Log.d(TAG, "Connecting to engine: " + engine);
2410             }
2411 
2412             try {
2413                 manager.createSession(engine, new ITextToSpeechSessionCallback.Stub() {
2414                     @Override
2415                     public void onConnected(ITextToSpeechSession session, IBinder serviceBinder) {
2416                         mSession = session;
2417                         onServiceConnected(
2418                                 /* componentName= */ null,
2419                                 serviceBinder);
2420                     }
2421 
2422                     @Override
2423                     public void onDisconnected() {
2424                         onServiceDisconnected(/* componentName= */ null);
2425                     }
2426 
2427                     @Override
2428                     public void onError(String errorInfo) {
2429                         Log.w(TAG, "System TTS connection error: " + errorInfo);
2430                         // There is an error connecting to the engine - notify the listener.
2431                         dispatchOnInit(ERROR);
2432                     }
2433                 });
2434 
2435                 return true;
2436             } catch (RemoteException ex) {
2437                 Log.e(TAG, "Error communicating with the System Server: ", ex);
2438                 throw ex.rethrowFromSystemServer();
2439             }
2440         }
2441 
2442         @Override
2443         void disconnect() {
2444             ITextToSpeechSession session = mSession;
2445 
2446             if (session != null) {
2447                 try {
2448                     session.disconnect();
2449                 } catch (RemoteException ex) {
2450                     Log.w(TAG, "Error disconnecting session", ex);
2451                 }
2452 
2453                 clearServiceConnection();
2454             }
2455         }
2456     }
2457 
2458     private interface Action<R> {
2459         R run(ITextToSpeechService service) throws RemoteException;
2460     }
2461 
2462     /**
2463      * Information about an installed text-to-speech engine.
2464      *
2465      * @see TextToSpeech#getEngines
2466      */
2467     public static class EngineInfo {
2468         /**
2469          * Engine package name..
2470          */
2471         public String name;
2472         /**
2473          * Localized label for the engine.
2474          */
2475         public String label;
2476         /**
2477          * Icon for the engine.
2478          */
2479         public int icon;
2480         /**
2481          * Whether this engine is a part of the system
2482          * image.
2483          *
2484          * @hide
2485          */
2486         public boolean system;
2487         /**
2488          * The priority the engine declares for the the intent filter
2489          * {@code android.intent.action.TTS_SERVICE}
2490          *
2491          * @hide
2492          */
2493         public int priority;
2494 
2495         @Override
2496         public String toString() {
2497             return "EngineInfo{name=" + name + "}";
2498         }
2499 
2500     }
2501 
2502     /**
2503      * Limit of length of input string passed to speak and synthesizeToFile.
2504      *
2505      * @see #speak
2506      * @see #synthesizeToFile
2507      */
2508     public static int getMaxSpeechInputLength() {
2509         return 4000;
2510     }
2511 }
2512