• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // Generated by the protocol buffer compiler.  DO NOT EDIT!
17 // source: google/cloud/speech/v1/cloud_speech.proto
18 
19 package com.google.cloud.speech.v1;
20 
21 /**
22  *
23  *
24  * <pre>
25  * Provides information to the recognizer that specifies how to process the
26  * request.
27  * </pre>
28  *
29  * Protobuf type {@code google.cloud.speech.v1.RecognitionConfig}
30  */
31 public final class RecognitionConfig extends com.google.protobuf.GeneratedMessageV3
32     implements
33     // @@protoc_insertion_point(message_implements:google.cloud.speech.v1.RecognitionConfig)
34     RecognitionConfigOrBuilder {
35   private static final long serialVersionUID = 0L;
36   // Use RecognitionConfig.newBuilder() to construct.
RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder)37   private RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
38     super(builder);
39   }
40 
RecognitionConfig()41   private RecognitionConfig() {
42     encoding_ = 0;
43     languageCode_ = "";
44     alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
45     speechContexts_ = java.util.Collections.emptyList();
46     model_ = "";
47   }
48 
49   @java.lang.Override
50   @SuppressWarnings({"unused"})
newInstance(UnusedPrivateParameter unused)51   protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
52     return new RecognitionConfig();
53   }
54 
55   @java.lang.Override
getUnknownFields()56   public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
57     return this.unknownFields;
58   }
59 
getDescriptor()60   public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
61     return com.google.cloud.speech.v1.SpeechProto
62         .internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor;
63   }
64 
65   @java.lang.Override
66   protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()67       internalGetFieldAccessorTable() {
68     return com.google.cloud.speech.v1.SpeechProto
69         .internal_static_google_cloud_speech_v1_RecognitionConfig_fieldAccessorTable
70         .ensureFieldAccessorsInitialized(
71             com.google.cloud.speech.v1.RecognitionConfig.class,
72             com.google.cloud.speech.v1.RecognitionConfig.Builder.class);
73   }
74 
75   /**
76    *
77    *
78    * <pre>
79    * The encoding of the audio data sent in the request.
80    * All encodings support only 1 channel (mono) audio, unless the
81    * `audio_channel_count` and `enable_separate_recognition_per_channel` fields
82    * are set.
83    * For best results, the audio source should be captured and transmitted using
84    * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
85    * recognition can be reduced if lossy codecs are used to capture or transmit
86    * audio, particularly if background noise is present. Lossy codecs include
87    * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`,
88    * and `WEBM_OPUS`.
89    * The `FLAC` and `WAV` audio file formats include a header that describes the
90    * included audio content. You can request recognition for `WAV` files that
91    * contain either `LINEAR16` or `MULAW` encoded audio.
92    * If you send `FLAC` or `WAV` audio file format in
93    * your request, you do not need to specify an `AudioEncoding`; the audio
94    * encoding format is determined from the file header. If you specify
95    * an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
96    * encoding configuration must match the encoding described in the audio
97    * header; otherwise the request returns an
98    * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
99    * code.
100    * </pre>
101    *
102    * Protobuf enum {@code google.cloud.speech.v1.RecognitionConfig.AudioEncoding}
103    */
104   public enum AudioEncoding implements com.google.protobuf.ProtocolMessageEnum {
105     /**
106      *
107      *
108      * <pre>
109      * Not specified.
110      * </pre>
111      *
112      * <code>ENCODING_UNSPECIFIED = 0;</code>
113      */
114     ENCODING_UNSPECIFIED(0),
115     /**
116      *
117      *
118      * <pre>
119      * Uncompressed 16-bit signed little-endian samples (Linear PCM).
120      * </pre>
121      *
122      * <code>LINEAR16 = 1;</code>
123      */
124     LINEAR16(1),
125     /**
126      *
127      *
128      * <pre>
129      * `FLAC` (Free Lossless Audio
130      * Codec) is the recommended encoding because it is
131      * lossless--therefore recognition is not compromised--and
132      * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
133      * encoding supports 16-bit and 24-bit samples, however, not all fields in
134      * `STREAMINFO` are supported.
135      * </pre>
136      *
137      * <code>FLAC = 2;</code>
138      */
139     FLAC(2),
140     /**
141      *
142      *
143      * <pre>
144      * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
145      * </pre>
146      *
147      * <code>MULAW = 3;</code>
148      */
149     MULAW(3),
150     /**
151      *
152      *
153      * <pre>
154      * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
155      * </pre>
156      *
157      * <code>AMR = 4;</code>
158      */
159     AMR(4),
160     /**
161      *
162      *
163      * <pre>
164      * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
165      * </pre>
166      *
167      * <code>AMR_WB = 5;</code>
168      */
169     AMR_WB(5),
170     /**
171      *
172      *
173      * <pre>
174      * Opus encoded audio frames in Ogg container
175      * ([OggOpus](https://wiki.xiph.org/OggOpus)).
176      * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
177      * </pre>
178      *
179      * <code>OGG_OPUS = 6;</code>
180      */
181     OGG_OPUS(6),
182     /**
183      *
184      *
185      * <pre>
186      * Although the use of lossy encodings is not recommended, if a very low
187      * bitrate encoding is required, `OGG_OPUS` is highly preferred over
188      * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
189      * Cloud Speech API has a header byte in each block, as in MIME type
190      * `audio/x-speex-with-header-byte`.
191      * It is a variant of the RTP Speex encoding defined in
192      * [RFC 5574](https://tools.ietf.org/html/rfc5574).
193      * The stream is a sequence of blocks, one block per RTP packet. Each block
194      * starts with a byte containing the length of the block, in bytes, followed
195      * by one or more frames of Speex data, padded to an integral number of
196      * bytes (octets) as specified in RFC 5574. In other words, each RTP header
197      * is replaced with a single byte containing the block length. Only Speex
198      * wideband is supported. `sample_rate_hertz` must be 16000.
199      * </pre>
200      *
201      * <code>SPEEX_WITH_HEADER_BYTE = 7;</code>
202      */
203     SPEEX_WITH_HEADER_BYTE(7),
204     /**
205      *
206      *
207      * <pre>
208      * Opus encoded audio frames in WebM container
209      * ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
210      * one of 8000, 12000, 16000, 24000, or 48000.
211      * </pre>
212      *
213      * <code>WEBM_OPUS = 9;</code>
214      */
215     WEBM_OPUS(9),
216     UNRECOGNIZED(-1),
217     ;
218 
219     /**
220      *
221      *
222      * <pre>
223      * Not specified.
224      * </pre>
225      *
226      * <code>ENCODING_UNSPECIFIED = 0;</code>
227      */
228     public static final int ENCODING_UNSPECIFIED_VALUE = 0;
229     /**
230      *
231      *
232      * <pre>
233      * Uncompressed 16-bit signed little-endian samples (Linear PCM).
234      * </pre>
235      *
236      * <code>LINEAR16 = 1;</code>
237      */
238     public static final int LINEAR16_VALUE = 1;
239     /**
240      *
241      *
242      * <pre>
243      * `FLAC` (Free Lossless Audio
244      * Codec) is the recommended encoding because it is
245      * lossless--therefore recognition is not compromised--and
246      * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
247      * encoding supports 16-bit and 24-bit samples, however, not all fields in
248      * `STREAMINFO` are supported.
249      * </pre>
250      *
251      * <code>FLAC = 2;</code>
252      */
253     public static final int FLAC_VALUE = 2;
254     /**
255      *
256      *
257      * <pre>
258      * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
259      * </pre>
260      *
261      * <code>MULAW = 3;</code>
262      */
263     public static final int MULAW_VALUE = 3;
264     /**
265      *
266      *
267      * <pre>
268      * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
269      * </pre>
270      *
271      * <code>AMR = 4;</code>
272      */
273     public static final int AMR_VALUE = 4;
274     /**
275      *
276      *
277      * <pre>
278      * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
279      * </pre>
280      *
281      * <code>AMR_WB = 5;</code>
282      */
283     public static final int AMR_WB_VALUE = 5;
284     /**
285      *
286      *
287      * <pre>
288      * Opus encoded audio frames in Ogg container
289      * ([OggOpus](https://wiki.xiph.org/OggOpus)).
290      * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
291      * </pre>
292      *
293      * <code>OGG_OPUS = 6;</code>
294      */
295     public static final int OGG_OPUS_VALUE = 6;
296     /**
297      *
298      *
299      * <pre>
300      * Although the use of lossy encodings is not recommended, if a very low
301      * bitrate encoding is required, `OGG_OPUS` is highly preferred over
302      * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
303      * Cloud Speech API has a header byte in each block, as in MIME type
304      * `audio/x-speex-with-header-byte`.
305      * It is a variant of the RTP Speex encoding defined in
306      * [RFC 5574](https://tools.ietf.org/html/rfc5574).
307      * The stream is a sequence of blocks, one block per RTP packet. Each block
308      * starts with a byte containing the length of the block, in bytes, followed
309      * by one or more frames of Speex data, padded to an integral number of
310      * bytes (octets) as specified in RFC 5574. In other words, each RTP header
311      * is replaced with a single byte containing the block length. Only Speex
312      * wideband is supported. `sample_rate_hertz` must be 16000.
313      * </pre>
314      *
315      * <code>SPEEX_WITH_HEADER_BYTE = 7;</code>
316      */
317     public static final int SPEEX_WITH_HEADER_BYTE_VALUE = 7;
318     /**
319      *
320      *
321      * <pre>
322      * Opus encoded audio frames in WebM container
323      * ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
324      * one of 8000, 12000, 16000, 24000, or 48000.
325      * </pre>
326      *
327      * <code>WEBM_OPUS = 9;</code>
328      */
329     public static final int WEBM_OPUS_VALUE = 9;
330 
getNumber()331     public final int getNumber() {
332       if (this == UNRECOGNIZED) {
333         throw new java.lang.IllegalArgumentException(
334             "Can't get the number of an unknown enum value.");
335       }
336       return value;
337     }
338 
339     /**
340      * @param value The numeric wire value of the corresponding enum entry.
341      * @return The enum associated with the given numeric wire value.
342      * @deprecated Use {@link #forNumber(int)} instead.
343      */
344     @java.lang.Deprecated
valueOf(int value)345     public static AudioEncoding valueOf(int value) {
346       return forNumber(value);
347     }
348 
349     /**
350      * @param value The numeric wire value of the corresponding enum entry.
351      * @return The enum associated with the given numeric wire value.
352      */
forNumber(int value)353     public static AudioEncoding forNumber(int value) {
354       switch (value) {
355         case 0:
356           return ENCODING_UNSPECIFIED;
357         case 1:
358           return LINEAR16;
359         case 2:
360           return FLAC;
361         case 3:
362           return MULAW;
363         case 4:
364           return AMR;
365         case 5:
366           return AMR_WB;
367         case 6:
368           return OGG_OPUS;
369         case 7:
370           return SPEEX_WITH_HEADER_BYTE;
371         case 9:
372           return WEBM_OPUS;
373         default:
374           return null;
375       }
376     }
377 
internalGetValueMap()378     public static com.google.protobuf.Internal.EnumLiteMap<AudioEncoding> internalGetValueMap() {
379       return internalValueMap;
380     }
381 
382     private static final com.google.protobuf.Internal.EnumLiteMap<AudioEncoding> internalValueMap =
383         new com.google.protobuf.Internal.EnumLiteMap<AudioEncoding>() {
384           public AudioEncoding findValueByNumber(int number) {
385             return AudioEncoding.forNumber(number);
386           }
387         };
388 
getValueDescriptor()389     public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
390       if (this == UNRECOGNIZED) {
391         throw new java.lang.IllegalStateException(
392             "Can't get the descriptor of an unrecognized enum value.");
393       }
394       return getDescriptor().getValues().get(ordinal());
395     }
396 
getDescriptorForType()397     public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
398       return getDescriptor();
399     }
400 
getDescriptor()401     public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
402       return com.google.cloud.speech.v1.RecognitionConfig.getDescriptor().getEnumTypes().get(0);
403     }
404 
405     private static final AudioEncoding[] VALUES = values();
406 
valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc)407     public static AudioEncoding valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
408       if (desc.getType() != getDescriptor()) {
409         throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
410       }
411       if (desc.getIndex() == -1) {
412         return UNRECOGNIZED;
413       }
414       return VALUES[desc.getIndex()];
415     }
416 
417     private final int value;
418 
AudioEncoding(int value)419     private AudioEncoding(int value) {
420       this.value = value;
421     }
422 
423     // @@protoc_insertion_point(enum_scope:google.cloud.speech.v1.RecognitionConfig.AudioEncoding)
424   }
425 
426   public static final int ENCODING_FIELD_NUMBER = 1;
427   private int encoding_ = 0;
428   /**
429    *
430    *
431    * <pre>
432    * Encoding of audio data sent in all `RecognitionAudio` messages.
433    * This field is optional for `FLAC` and `WAV` audio files and required
434    * for all other audio formats. For details, see
435    * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
436    * </pre>
437    *
438    * <code>.google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1;</code>
439    *
440    * @return The enum numeric value on the wire for encoding.
441    */
442   @java.lang.Override
getEncodingValue()443   public int getEncodingValue() {
444     return encoding_;
445   }
446   /**
447    *
448    *
449    * <pre>
450    * Encoding of audio data sent in all `RecognitionAudio` messages.
451    * This field is optional for `FLAC` and `WAV` audio files and required
452    * for all other audio formats. For details, see
453    * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
454    * </pre>
455    *
456    * <code>.google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1;</code>
457    *
458    * @return The encoding.
459    */
460   @java.lang.Override
getEncoding()461   public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() {
462     com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding result =
463         com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.forNumber(encoding_);
464     return result == null
465         ? com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.UNRECOGNIZED
466         : result;
467   }
468 
469   public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2;
470   private int sampleRateHertz_ = 0;
471   /**
472    *
473    *
474    * <pre>
475    * Sample rate in Hertz of the audio data sent in all
476    * `RecognitionAudio` messages. Valid values are: 8000-48000.
477    * 16000 is optimal. For best results, set the sampling rate of the audio
478    * source to 16000 Hz. If that's not possible, use the native sample rate of
479    * the audio source (instead of re-sampling).
480    * This field is optional for FLAC and WAV audio files, but is
481    * required for all other audio formats. For details, see
482    * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
483    * </pre>
484    *
485    * <code>int32 sample_rate_hertz = 2;</code>
486    *
487    * @return The sampleRateHertz.
488    */
489   @java.lang.Override
getSampleRateHertz()490   public int getSampleRateHertz() {
491     return sampleRateHertz_;
492   }
493 
494   public static final int AUDIO_CHANNEL_COUNT_FIELD_NUMBER = 7;
495   private int audioChannelCount_ = 0;
496   /**
497    *
498    *
499    * <pre>
500    * The number of channels in the input audio data.
501    * ONLY set this for MULTI-CHANNEL recognition.
502    * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
503    * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
504    * If `0` or omitted, defaults to one channel (mono).
505    * Note: We only recognize the first channel by default.
506    * To perform independent recognition on each channel set
507    * `enable_separate_recognition_per_channel` to 'true'.
508    * </pre>
509    *
510    * <code>int32 audio_channel_count = 7;</code>
511    *
512    * @return The audioChannelCount.
513    */
514   @java.lang.Override
getAudioChannelCount()515   public int getAudioChannelCount() {
516     return audioChannelCount_;
517   }
518 
519   public static final int ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER = 12;
520   private boolean enableSeparateRecognitionPerChannel_ = false;
521   /**
522    *
523    *
524    * <pre>
525    * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
526    * to get each channel recognized separately. The recognition result will
527    * contain a `channel_tag` field to state which channel that result belongs
528    * to. If this is not true, we will only recognize the first channel. The
529    * request is billed cumulatively for all channels recognized:
530    * `audio_channel_count` multiplied by the length of the audio.
531    * </pre>
532    *
533    * <code>bool enable_separate_recognition_per_channel = 12;</code>
534    *
535    * @return The enableSeparateRecognitionPerChannel.
536    */
537   @java.lang.Override
getEnableSeparateRecognitionPerChannel()538   public boolean getEnableSeparateRecognitionPerChannel() {
539     return enableSeparateRecognitionPerChannel_;
540   }
541 
542   public static final int LANGUAGE_CODE_FIELD_NUMBER = 3;
543 
544   @SuppressWarnings("serial")
545   private volatile java.lang.Object languageCode_ = "";
546   /**
547    *
548    *
549    * <pre>
550    * Required. The language of the supplied audio as a
551    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
552    * Example: "en-US".
553    * See [Language
554    * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
555    * of the currently supported language codes.
556    * </pre>
557    *
558    * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
559    *
560    * @return The languageCode.
561    */
562   @java.lang.Override
getLanguageCode()563   public java.lang.String getLanguageCode() {
564     java.lang.Object ref = languageCode_;
565     if (ref instanceof java.lang.String) {
566       return (java.lang.String) ref;
567     } else {
568       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
569       java.lang.String s = bs.toStringUtf8();
570       languageCode_ = s;
571       return s;
572     }
573   }
574   /**
575    *
576    *
577    * <pre>
578    * Required. The language of the supplied audio as a
579    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
580    * Example: "en-US".
581    * See [Language
582    * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
583    * of the currently supported language codes.
584    * </pre>
585    *
586    * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
587    *
588    * @return The bytes for languageCode.
589    */
590   @java.lang.Override
getLanguageCodeBytes()591   public com.google.protobuf.ByteString getLanguageCodeBytes() {
592     java.lang.Object ref = languageCode_;
593     if (ref instanceof java.lang.String) {
594       com.google.protobuf.ByteString b =
595           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
596       languageCode_ = b;
597       return b;
598     } else {
599       return (com.google.protobuf.ByteString) ref;
600     }
601   }
602 
603   public static final int ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER = 18;
604 
605   @SuppressWarnings("serial")
606   private com.google.protobuf.LazyStringList alternativeLanguageCodes_;
607   /**
608    *
609    *
610    * <pre>
611    * A list of up to 3 additional
612    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
613    * listing possible alternative languages of the supplied audio.
614    * See [Language
615    * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
616    * of the currently supported language codes. If alternative languages are
617    * listed, recognition result will contain recognition in the most likely
618    * language detected including the main language_code. The recognition result
619    * will include the language tag of the language detected in the audio. Note:
620    * This feature is only supported for Voice Command and Voice Search use cases
621    * and performance may vary for other use cases (e.g., phone call
622    * transcription).
623    * </pre>
624    *
625    * <code>repeated string alternative_language_codes = 18;</code>
626    *
627    * @return A list containing the alternativeLanguageCodes.
628    */
getAlternativeLanguageCodesList()629   public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() {
630     return alternativeLanguageCodes_;
631   }
632   /**
633    *
634    *
635    * <pre>
636    * A list of up to 3 additional
637    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
638    * listing possible alternative languages of the supplied audio.
639    * See [Language
640    * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
641    * of the currently supported language codes. If alternative languages are
642    * listed, recognition result will contain recognition in the most likely
643    * language detected including the main language_code. The recognition result
644    * will include the language tag of the language detected in the audio. Note:
645    * This feature is only supported for Voice Command and Voice Search use cases
646    * and performance may vary for other use cases (e.g., phone call
647    * transcription).
648    * </pre>
649    *
650    * <code>repeated string alternative_language_codes = 18;</code>
651    *
652    * @return The count of alternativeLanguageCodes.
653    */
getAlternativeLanguageCodesCount()654   public int getAlternativeLanguageCodesCount() {
655     return alternativeLanguageCodes_.size();
656   }
657   /**
658    *
659    *
660    * <pre>
661    * A list of up to 3 additional
662    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
663    * listing possible alternative languages of the supplied audio.
664    * See [Language
665    * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
666    * of the currently supported language codes. If alternative languages are
667    * listed, recognition result will contain recognition in the most likely
668    * language detected including the main language_code. The recognition result
669    * will include the language tag of the language detected in the audio. Note:
670    * This feature is only supported for Voice Command and Voice Search use cases
671    * and performance may vary for other use cases (e.g., phone call
672    * transcription).
673    * </pre>
674    *
675    * <code>repeated string alternative_language_codes = 18;</code>
676    *
677    * @param index The index of the element to return.
678    * @return The alternativeLanguageCodes at the given index.
679    */
getAlternativeLanguageCodes(int index)680   public java.lang.String getAlternativeLanguageCodes(int index) {
681     return alternativeLanguageCodes_.get(index);
682   }
683   /**
684    *
685    *
686    * <pre>
687    * A list of up to 3 additional
688    * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
689    * listing possible alternative languages of the supplied audio.
690    * See [Language
691    * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
692    * of the currently supported language codes. If alternative languages are
693    * listed, recognition result will contain recognition in the most likely
694    * language detected including the main language_code. The recognition result
695    * will include the language tag of the language detected in the audio. Note:
696    * This feature is only supported for Voice Command and Voice Search use cases
697    * and performance may vary for other use cases (e.g., phone call
698    * transcription).
699    * </pre>
700    *
701    * <code>repeated string alternative_language_codes = 18;</code>
702    *
703    * @param index The index of the value to return.
704    * @return The bytes of the alternativeLanguageCodes at the given index.
705    */
getAlternativeLanguageCodesBytes(int index)706   public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) {
707     return alternativeLanguageCodes_.getByteString(index);
708   }
709 
710   public static final int MAX_ALTERNATIVES_FIELD_NUMBER = 4;
711   private int maxAlternatives_ = 0;
712   /**
713    *
714    *
715    * <pre>
716    * Maximum number of recognition hypotheses to be returned.
717    * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
718    * within each `SpeechRecognitionResult`.
719    * The server may return fewer than `max_alternatives`.
720    * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
721    * one. If omitted, will return a maximum of one.
722    * </pre>
723    *
724    * <code>int32 max_alternatives = 4;</code>
725    *
726    * @return The maxAlternatives.
727    */
728   @java.lang.Override
getMaxAlternatives()729   public int getMaxAlternatives() {
730     return maxAlternatives_;
731   }
732 
733   public static final int PROFANITY_FILTER_FIELD_NUMBER = 5;
734   private boolean profanityFilter_ = false;
735   /**
736    *
737    *
738    * <pre>
739    * If set to `true`, the server will attempt to filter out
740    * profanities, replacing all but the initial character in each filtered word
741    * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
742    * won't be filtered out.
743    * </pre>
744    *
745    * <code>bool profanity_filter = 5;</code>
746    *
747    * @return The profanityFilter.
748    */
749   @java.lang.Override
getProfanityFilter()750   public boolean getProfanityFilter() {
751     return profanityFilter_;
752   }
753 
754   public static final int ADAPTATION_FIELD_NUMBER = 20;
755   private com.google.cloud.speech.v1.SpeechAdaptation adaptation_;
756   /**
757    *
758    *
759    * <pre>
760    * Speech adaptation configuration improves the accuracy of speech
761    * recognition. For more information, see the [speech
762    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
763    * documentation.
764    * When speech adaptation is set it supersedes the `speech_contexts` field.
765    * </pre>
766    *
767    * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
768    *
769    * @return Whether the adaptation field is set.
770    */
771   @java.lang.Override
hasAdaptation()772   public boolean hasAdaptation() {
773     return adaptation_ != null;
774   }
775   /**
776    *
777    *
778    * <pre>
779    * Speech adaptation configuration improves the accuracy of speech
780    * recognition. For more information, see the [speech
781    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
782    * documentation.
783    * When speech adaptation is set it supersedes the `speech_contexts` field.
784    * </pre>
785    *
786    * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
787    *
788    * @return The adaptation.
789    */
790   @java.lang.Override
getAdaptation()791   public com.google.cloud.speech.v1.SpeechAdaptation getAdaptation() {
792     return adaptation_ == null
793         ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance()
794         : adaptation_;
795   }
796   /**
797    *
798    *
799    * <pre>
800    * Speech adaptation configuration improves the accuracy of speech
801    * recognition. For more information, see the [speech
802    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
803    * documentation.
804    * When speech adaptation is set it supersedes the `speech_contexts` field.
805    * </pre>
806    *
807    * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
808    */
809   @java.lang.Override
getAdaptationOrBuilder()810   public com.google.cloud.speech.v1.SpeechAdaptationOrBuilder getAdaptationOrBuilder() {
811     return adaptation_ == null
812         ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance()
813         : adaptation_;
814   }
815 
816   public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 6;
817 
818   @SuppressWarnings("serial")
819   private java.util.List<com.google.cloud.speech.v1.SpeechContext> speechContexts_;
820   /**
821    *
822    *
823    * <pre>
824    * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
825    * A means to provide context to assist the speech recognition. For more
826    * information, see
827    * [speech
828    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
829    * </pre>
830    *
831    * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
832    */
833   @java.lang.Override
getSpeechContextsList()834   public java.util.List<com.google.cloud.speech.v1.SpeechContext> getSpeechContextsList() {
835     return speechContexts_;
836   }
837   /**
838    *
839    *
840    * <pre>
841    * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
842    * A means to provide context to assist the speech recognition. For more
843    * information, see
844    * [speech
845    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
846    * </pre>
847    *
848    * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
849    */
850   @java.lang.Override
851   public java.util.List<? extends com.google.cloud.speech.v1.SpeechContextOrBuilder>
getSpeechContextsOrBuilderList()852       getSpeechContextsOrBuilderList() {
853     return speechContexts_;
854   }
855   /**
856    *
857    *
858    * <pre>
859    * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
860    * A means to provide context to assist the speech recognition. For more
861    * information, see
862    * [speech
863    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
864    * </pre>
865    *
866    * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
867    */
868   @java.lang.Override
getSpeechContextsCount()869   public int getSpeechContextsCount() {
870     return speechContexts_.size();
871   }
872   /**
873    *
874    *
875    * <pre>
876    * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
877    * A means to provide context to assist the speech recognition. For more
878    * information, see
879    * [speech
880    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
881    * </pre>
882    *
883    * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
884    */
885   @java.lang.Override
getSpeechContexts(int index)886   public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) {
887     return speechContexts_.get(index);
888   }
889   /**
890    *
891    *
892    * <pre>
893    * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
894    * A means to provide context to assist the speech recognition. For more
895    * information, see
896    * [speech
897    * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
898    * </pre>
899    *
900    * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
901    */
902   @java.lang.Override
getSpeechContextsOrBuilder(int index)903   public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuilder(int index) {
904     return speechContexts_.get(index);
905   }
906 
907   public static final int ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER = 8;
908   private boolean enableWordTimeOffsets_ = false;
909   /**
910    *
911    *
912    * <pre>
913    * If `true`, the top result includes a list of words and
914    * the start and end time offsets (timestamps) for those words. If
915    * `false`, no word-level time offset information is returned. The default is
916    * `false`.
917    * </pre>
918    *
919    * <code>bool enable_word_time_offsets = 8;</code>
920    *
921    * @return The enableWordTimeOffsets.
922    */
923   @java.lang.Override
getEnableWordTimeOffsets()924   public boolean getEnableWordTimeOffsets() {
925     return enableWordTimeOffsets_;
926   }
927 
928   public static final int ENABLE_WORD_CONFIDENCE_FIELD_NUMBER = 15;
929   private boolean enableWordConfidence_ = false;
930   /**
931    *
932    *
933    * <pre>
934    * If `true`, the top result includes a list of words and the
935    * confidence for those words. If `false`, no word-level confidence
936    * information is returned. The default is `false`.
937    * </pre>
938    *
939    * <code>bool enable_word_confidence = 15;</code>
940    *
941    * @return The enableWordConfidence.
942    */
943   @java.lang.Override
getEnableWordConfidence()944   public boolean getEnableWordConfidence() {
945     return enableWordConfidence_;
946   }
947 
948   public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 11;
949   private boolean enableAutomaticPunctuation_ = false;
950   /**
951    *
952    *
953    * <pre>
954    * If 'true', adds punctuation to recognition result hypotheses.
955    * This feature is only available in select languages. Setting this for
956    * requests in other languages has no effect at all.
957    * The default 'false' value does not add punctuation to result hypotheses.
958    * </pre>
959    *
960    * <code>bool enable_automatic_punctuation = 11;</code>
961    *
962    * @return The enableAutomaticPunctuation.
963    */
964   @java.lang.Override
getEnableAutomaticPunctuation()965   public boolean getEnableAutomaticPunctuation() {
966     return enableAutomaticPunctuation_;
967   }
968 
969   public static final int ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER = 22;
970   private com.google.protobuf.BoolValue enableSpokenPunctuation_;
971   /**
972    *
973    *
974    * <pre>
975    * The spoken punctuation behavior for the call
976    * If not set, uses default behavior based on model of choice
977    * e.g. command_and_search will enable spoken punctuation by default
978    * If 'true', replaces spoken punctuation with the corresponding symbols in
979    * the request. For example, "how are you question mark" becomes "how are
980    * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
981    * for support. If 'false', spoken punctuation is not replaced.
982    * </pre>
983    *
984    * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
985    *
986    * @return Whether the enableSpokenPunctuation field is set.
987    */
988   @java.lang.Override
hasEnableSpokenPunctuation()989   public boolean hasEnableSpokenPunctuation() {
990     return enableSpokenPunctuation_ != null;
991   }
992   /**
993    *
994    *
995    * <pre>
996    * The spoken punctuation behavior for the call
997    * If not set, uses default behavior based on model of choice
998    * e.g. command_and_search will enable spoken punctuation by default
999    * If 'true', replaces spoken punctuation with the corresponding symbols in
1000    * the request. For example, "how are you question mark" becomes "how are
1001    * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
1002    * for support. If 'false', spoken punctuation is not replaced.
1003    * </pre>
1004    *
1005    * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
1006    *
1007    * @return The enableSpokenPunctuation.
1008    */
1009   @java.lang.Override
getEnableSpokenPunctuation()1010   public com.google.protobuf.BoolValue getEnableSpokenPunctuation() {
1011     return enableSpokenPunctuation_ == null
1012         ? com.google.protobuf.BoolValue.getDefaultInstance()
1013         : enableSpokenPunctuation_;
1014   }
1015   /**
1016    *
1017    *
1018    * <pre>
1019    * The spoken punctuation behavior for the call
1020    * If not set, uses default behavior based on model of choice
1021    * e.g. command_and_search will enable spoken punctuation by default
1022    * If 'true', replaces spoken punctuation with the corresponding symbols in
1023    * the request. For example, "how are you question mark" becomes "how are
1024    * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
1025    * for support. If 'false', spoken punctuation is not replaced.
1026    * </pre>
1027    *
1028    * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
1029    */
1030   @java.lang.Override
getEnableSpokenPunctuationOrBuilder()1031   public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() {
1032     return enableSpokenPunctuation_ == null
1033         ? com.google.protobuf.BoolValue.getDefaultInstance()
1034         : enableSpokenPunctuation_;
1035   }
1036 
1037   public static final int ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER = 23;
1038   private com.google.protobuf.BoolValue enableSpokenEmojis_;
1039   /**
1040    *
1041    *
1042    * <pre>
1043    * The spoken emoji behavior for the call
1044    * If not set, uses default behavior based on model of choice
1045    * If 'true', adds spoken emoji formatting for the request. This will replace
1046    * spoken emojis with the corresponding Unicode symbols in the final
1047    * transcript. If 'false', spoken emojis are not replaced.
1048    * </pre>
1049    *
1050    * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
1051    *
1052    * @return Whether the enableSpokenEmojis field is set.
1053    */
1054   @java.lang.Override
hasEnableSpokenEmojis()1055   public boolean hasEnableSpokenEmojis() {
1056     return enableSpokenEmojis_ != null;
1057   }
1058   /**
1059    *
1060    *
1061    * <pre>
1062    * The spoken emoji behavior for the call
1063    * If not set, uses default behavior based on model of choice
1064    * If 'true', adds spoken emoji formatting for the request. This will replace
1065    * spoken emojis with the corresponding Unicode symbols in the final
1066    * transcript. If 'false', spoken emojis are not replaced.
1067    * </pre>
1068    *
1069    * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
1070    *
1071    * @return The enableSpokenEmojis.
1072    */
1073   @java.lang.Override
getEnableSpokenEmojis()1074   public com.google.protobuf.BoolValue getEnableSpokenEmojis() {
1075     return enableSpokenEmojis_ == null
1076         ? com.google.protobuf.BoolValue.getDefaultInstance()
1077         : enableSpokenEmojis_;
1078   }
1079   /**
1080    *
1081    *
1082    * <pre>
1083    * The spoken emoji behavior for the call
1084    * If not set, uses default behavior based on model of choice
1085    * If 'true', adds spoken emoji formatting for the request. This will replace
1086    * spoken emojis with the corresponding Unicode symbols in the final
1087    * transcript. If 'false', spoken emojis are not replaced.
1088    * </pre>
1089    *
1090    * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
1091    */
1092   @java.lang.Override
getEnableSpokenEmojisOrBuilder()1093   public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() {
1094     return enableSpokenEmojis_ == null
1095         ? com.google.protobuf.BoolValue.getDefaultInstance()
1096         : enableSpokenEmojis_;
1097   }
1098 
1099   public static final int DIARIZATION_CONFIG_FIELD_NUMBER = 19;
1100   private com.google.cloud.speech.v1.SpeakerDiarizationConfig diarizationConfig_;
1101   /**
1102    *
1103    *
1104    * <pre>
1105    * Config to enable speaker diarization and set additional
1106    * parameters to make diarization better suited for your application.
1107    * Note: When this is enabled, we send all the words from the beginning of the
1108    * audio for the top alternative in every consecutive STREAMING responses.
1109    * This is done in order to improve our speaker tags as our models learn to
1110    * identify the speakers in the conversation over time.
1111    * For non-streaming requests, the diarization results will be provided only
1112    * in the top alternative of the FINAL SpeechRecognitionResult.
1113    * </pre>
1114    *
1115    * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
1116    *
1117    * @return Whether the diarizationConfig field is set.
1118    */
1119   @java.lang.Override
hasDiarizationConfig()1120   public boolean hasDiarizationConfig() {
1121     return diarizationConfig_ != null;
1122   }
1123   /**
1124    *
1125    *
1126    * <pre>
1127    * Config to enable speaker diarization and set additional
1128    * parameters to make diarization better suited for your application.
1129    * Note: When this is enabled, we send all the words from the beginning of the
1130    * audio for the top alternative in every consecutive STREAMING responses.
1131    * This is done in order to improve our speaker tags as our models learn to
1132    * identify the speakers in the conversation over time.
1133    * For non-streaming requests, the diarization results will be provided only
1134    * in the top alternative of the FINAL SpeechRecognitionResult.
1135    * </pre>
1136    *
1137    * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
1138    *
1139    * @return The diarizationConfig.
1140    */
1141   @java.lang.Override
getDiarizationConfig()1142   public com.google.cloud.speech.v1.SpeakerDiarizationConfig getDiarizationConfig() {
1143     return diarizationConfig_ == null
1144         ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance()
1145         : diarizationConfig_;
1146   }
1147   /**
1148    *
1149    *
1150    * <pre>
1151    * Config to enable speaker diarization and set additional
1152    * parameters to make diarization better suited for your application.
1153    * Note: When this is enabled, we send all the words from the beginning of the
1154    * audio for the top alternative in every consecutive STREAMING responses.
1155    * This is done in order to improve our speaker tags as our models learn to
1156    * identify the speakers in the conversation over time.
1157    * For non-streaming requests, the diarization results will be provided only
1158    * in the top alternative of the FINAL SpeechRecognitionResult.
1159    * </pre>
1160    *
1161    * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
1162    */
1163   @java.lang.Override
1164   public com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder
getDiarizationConfigOrBuilder()1165       getDiarizationConfigOrBuilder() {
1166     return diarizationConfig_ == null
1167         ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance()
1168         : diarizationConfig_;
1169   }
1170 
1171   public static final int METADATA_FIELD_NUMBER = 9;
1172   private com.google.cloud.speech.v1.RecognitionMetadata metadata_;
1173   /**
1174    *
1175    *
1176    * <pre>
1177    * Metadata regarding this request.
1178    * </pre>
1179    *
1180    * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
1181    *
1182    * @return Whether the metadata field is set.
1183    */
1184   @java.lang.Override
hasMetadata()1185   public boolean hasMetadata() {
1186     return metadata_ != null;
1187   }
1188   /**
1189    *
1190    *
1191    * <pre>
1192    * Metadata regarding this request.
1193    * </pre>
1194    *
1195    * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
1196    *
1197    * @return The metadata.
1198    */
1199   @java.lang.Override
getMetadata()1200   public com.google.cloud.speech.v1.RecognitionMetadata getMetadata() {
1201     return metadata_ == null
1202         ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance()
1203         : metadata_;
1204   }
1205   /**
1206    *
1207    *
1208    * <pre>
1209    * Metadata regarding this request.
1210    * </pre>
1211    *
1212    * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
1213    */
1214   @java.lang.Override
getMetadataOrBuilder()1215   public com.google.cloud.speech.v1.RecognitionMetadataOrBuilder getMetadataOrBuilder() {
1216     return metadata_ == null
1217         ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance()
1218         : metadata_;
1219   }
1220 
1221   public static final int MODEL_FIELD_NUMBER = 13;
1222 
1223   @SuppressWarnings("serial")
1224   private volatile java.lang.Object model_ = "";
1225   /**
1226    *
1227    *
1228    * <pre>
1229    * Which model to select for the given request. Select the model
1230    * best suited to your domain to get best results. If a model is not
1231    * explicitly specified, then we auto-select a model based on the parameters
1232    * in the RecognitionConfig.
1233    * &lt;table&gt;
1234    *   &lt;tr&gt;
1235    *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
1236    *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
1237    *   &lt;/tr&gt;
1238    *   &lt;tr&gt;
1239    *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
1240    *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
1241    *   &lt;/tr&gt;
1242    *   &lt;tr&gt;
1243    *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
1244    *     &lt;td&gt;Best for short form content like commands or single shot directed
1245    *     speech.&lt;/td&gt;
1246    *   &lt;/tr&gt;
1247    *   &lt;tr&gt;
1248    *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
1249    *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
1250    *   &lt;/tr&gt;
1251    *   &lt;tr&gt;
1252    *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
1253    *     &lt;td&gt;Best for audio that originated from a phone call (typically
1254    *     recorded at an 8khz sampling rate).&lt;/td&gt;
1255    *   &lt;/tr&gt;
1256    *   &lt;tr&gt;
1257    *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
1258    *     &lt;td&gt;Best for audio that originated from video or includes multiple
1259    *         speakers. Ideally the audio is recorded at a 16khz or greater
1260    *         sampling rate. This is a premium model that costs more than the
1261    *         standard rate.&lt;/td&gt;
1262    *   &lt;/tr&gt;
1263    *   &lt;tr&gt;
1264    *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
1265    *     &lt;td&gt;Best for audio that is not one of the specific audio models.
1266    *         For example, long-form audio. Ideally the audio is high-fidelity,
1267    *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
1268    *   &lt;/tr&gt;
1269    *   &lt;tr&gt;
1270    *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
1271    *     &lt;td&gt;Best for audio that originated from a conversation between a
1272    *         medical provider and patient.&lt;/td&gt;
1273    *   &lt;/tr&gt;
1274    *   &lt;tr&gt;
1275    *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
1276    *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
1277    *         provider.&lt;/td&gt;
1278    *   &lt;/tr&gt;
1279    * &lt;/table&gt;
1280    * </pre>
1281    *
1282    * <code>string model = 13;</code>
1283    *
1284    * @return The model.
1285    */
1286   @java.lang.Override
getModel()1287   public java.lang.String getModel() {
1288     java.lang.Object ref = model_;
1289     if (ref instanceof java.lang.String) {
1290       return (java.lang.String) ref;
1291     } else {
1292       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
1293       java.lang.String s = bs.toStringUtf8();
1294       model_ = s;
1295       return s;
1296     }
1297   }
1298   /**
1299    *
1300    *
1301    * <pre>
1302    * Which model to select for the given request. Select the model
1303    * best suited to your domain to get best results. If a model is not
1304    * explicitly specified, then we auto-select a model based on the parameters
1305    * in the RecognitionConfig.
1306    * &lt;table&gt;
1307    *   &lt;tr&gt;
1308    *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
1309    *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
1310    *   &lt;/tr&gt;
1311    *   &lt;tr&gt;
1312    *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
1313    *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
1314    *   &lt;/tr&gt;
1315    *   &lt;tr&gt;
1316    *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
1317    *     &lt;td&gt;Best for short form content like commands or single shot directed
1318    *     speech.&lt;/td&gt;
1319    *   &lt;/tr&gt;
1320    *   &lt;tr&gt;
1321    *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
1322    *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
1323    *   &lt;/tr&gt;
1324    *   &lt;tr&gt;
1325    *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
1326    *     &lt;td&gt;Best for audio that originated from a phone call (typically
1327    *     recorded at an 8khz sampling rate).&lt;/td&gt;
1328    *   &lt;/tr&gt;
1329    *   &lt;tr&gt;
1330    *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
1331    *     &lt;td&gt;Best for audio that originated from video or includes multiple
1332    *         speakers. Ideally the audio is recorded at a 16khz or greater
1333    *         sampling rate. This is a premium model that costs more than the
1334    *         standard rate.&lt;/td&gt;
1335    *   &lt;/tr&gt;
1336    *   &lt;tr&gt;
1337    *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
1338    *     &lt;td&gt;Best for audio that is not one of the specific audio models.
1339    *         For example, long-form audio. Ideally the audio is high-fidelity,
1340    *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
1341    *   &lt;/tr&gt;
1342    *   &lt;tr&gt;
1343    *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
1344    *     &lt;td&gt;Best for audio that originated from a conversation between a
1345    *         medical provider and patient.&lt;/td&gt;
1346    *   &lt;/tr&gt;
1347    *   &lt;tr&gt;
1348    *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
1349    *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
1350    *         provider.&lt;/td&gt;
1351    *   &lt;/tr&gt;
1352    * &lt;/table&gt;
1353    * </pre>
1354    *
1355    * <code>string model = 13;</code>
1356    *
1357    * @return The bytes for model.
1358    */
1359   @java.lang.Override
getModelBytes()1360   public com.google.protobuf.ByteString getModelBytes() {
1361     java.lang.Object ref = model_;
1362     if (ref instanceof java.lang.String) {
1363       com.google.protobuf.ByteString b =
1364           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
1365       model_ = b;
1366       return b;
1367     } else {
1368       return (com.google.protobuf.ByteString) ref;
1369     }
1370   }
1371 
1372   public static final int USE_ENHANCED_FIELD_NUMBER = 14;
1373   private boolean useEnhanced_ = false;
1374   /**
1375    *
1376    *
1377    * <pre>
1378    * Set to true to use an enhanced model for speech recognition.
1379    * If `use_enhanced` is set to true and the `model` field is not set, then
1380    * an appropriate enhanced model is chosen if an enhanced model exists for
1381    * the audio.
1382    * If `use_enhanced` is true and an enhanced version of the specified model
1383    * does not exist, then the speech is recognized using the standard version
1384    * of the specified model.
1385    * </pre>
1386    *
1387    * <code>bool use_enhanced = 14;</code>
1388    *
1389    * @return The useEnhanced.
1390    */
1391   @java.lang.Override
getUseEnhanced()1392   public boolean getUseEnhanced() {
1393     return useEnhanced_;
1394   }
1395 
1396   private byte memoizedIsInitialized = -1;
1397 
1398   @java.lang.Override
isInitialized()1399   public final boolean isInitialized() {
1400     byte isInitialized = memoizedIsInitialized;
1401     if (isInitialized == 1) return true;
1402     if (isInitialized == 0) return false;
1403 
1404     memoizedIsInitialized = 1;
1405     return true;
1406   }
1407 
1408   @java.lang.Override
writeTo(com.google.protobuf.CodedOutputStream output)1409   public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
1410     if (encoding_
1411         != com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED
1412             .getNumber()) {
1413       output.writeEnum(1, encoding_);
1414     }
1415     if (sampleRateHertz_ != 0) {
1416       output.writeInt32(2, sampleRateHertz_);
1417     }
1418     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
1419       com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_);
1420     }
1421     if (maxAlternatives_ != 0) {
1422       output.writeInt32(4, maxAlternatives_);
1423     }
1424     if (profanityFilter_ != false) {
1425       output.writeBool(5, profanityFilter_);
1426     }
1427     for (int i = 0; i < speechContexts_.size(); i++) {
1428       output.writeMessage(6, speechContexts_.get(i));
1429     }
1430     if (audioChannelCount_ != 0) {
1431       output.writeInt32(7, audioChannelCount_);
1432     }
1433     if (enableWordTimeOffsets_ != false) {
1434       output.writeBool(8, enableWordTimeOffsets_);
1435     }
1436     if (metadata_ != null) {
1437       output.writeMessage(9, getMetadata());
1438     }
1439     if (enableAutomaticPunctuation_ != false) {
1440       output.writeBool(11, enableAutomaticPunctuation_);
1441     }
1442     if (enableSeparateRecognitionPerChannel_ != false) {
1443       output.writeBool(12, enableSeparateRecognitionPerChannel_);
1444     }
1445     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
1446       com.google.protobuf.GeneratedMessageV3.writeString(output, 13, model_);
1447     }
1448     if (useEnhanced_ != false) {
1449       output.writeBool(14, useEnhanced_);
1450     }
1451     if (enableWordConfidence_ != false) {
1452       output.writeBool(15, enableWordConfidence_);
1453     }
1454     for (int i = 0; i < alternativeLanguageCodes_.size(); i++) {
1455       com.google.protobuf.GeneratedMessageV3.writeString(
1456           output, 18, alternativeLanguageCodes_.getRaw(i));
1457     }
1458     if (diarizationConfig_ != null) {
1459       output.writeMessage(19, getDiarizationConfig());
1460     }
1461     if (adaptation_ != null) {
1462       output.writeMessage(20, getAdaptation());
1463     }
1464     if (enableSpokenPunctuation_ != null) {
1465       output.writeMessage(22, getEnableSpokenPunctuation());
1466     }
1467     if (enableSpokenEmojis_ != null) {
1468       output.writeMessage(23, getEnableSpokenEmojis());
1469     }
1470     getUnknownFields().writeTo(output);
1471   }
1472 
1473   @java.lang.Override
getSerializedSize()1474   public int getSerializedSize() {
1475     int size = memoizedSize;
1476     if (size != -1) return size;
1477 
1478     size = 0;
1479     if (encoding_
1480         != com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED
1481             .getNumber()) {
1482       size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encoding_);
1483     }
1484     if (sampleRateHertz_ != 0) {
1485       size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_);
1486     }
1487     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
1488       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_);
1489     }
1490     if (maxAlternatives_ != 0) {
1491       size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, maxAlternatives_);
1492     }
1493     if (profanityFilter_ != false) {
1494       size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, profanityFilter_);
1495     }
1496     for (int i = 0; i < speechContexts_.size(); i++) {
1497       size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, speechContexts_.get(i));
1498     }
1499     if (audioChannelCount_ != 0) {
1500       size += com.google.protobuf.CodedOutputStream.computeInt32Size(7, audioChannelCount_);
1501     }
1502     if (enableWordTimeOffsets_ != false) {
1503       size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, enableWordTimeOffsets_);
1504     }
1505     if (metadata_ != null) {
1506       size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getMetadata());
1507     }
1508     if (enableAutomaticPunctuation_ != false) {
1509       size +=
1510           com.google.protobuf.CodedOutputStream.computeBoolSize(11, enableAutomaticPunctuation_);
1511     }
1512     if (enableSeparateRecognitionPerChannel_ != false) {
1513       size +=
1514           com.google.protobuf.CodedOutputStream.computeBoolSize(
1515               12, enableSeparateRecognitionPerChannel_);
1516     }
1517     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
1518       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, model_);
1519     }
1520     if (useEnhanced_ != false) {
1521       size += com.google.protobuf.CodedOutputStream.computeBoolSize(14, useEnhanced_);
1522     }
1523     if (enableWordConfidence_ != false) {
1524       size += com.google.protobuf.CodedOutputStream.computeBoolSize(15, enableWordConfidence_);
1525     }
1526     {
1527       int dataSize = 0;
1528       for (int i = 0; i < alternativeLanguageCodes_.size(); i++) {
1529         dataSize += computeStringSizeNoTag(alternativeLanguageCodes_.getRaw(i));
1530       }
1531       size += dataSize;
1532       size += 2 * getAlternativeLanguageCodesList().size();
1533     }
1534     if (diarizationConfig_ != null) {
1535       size += com.google.protobuf.CodedOutputStream.computeMessageSize(19, getDiarizationConfig());
1536     }
1537     if (adaptation_ != null) {
1538       size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getAdaptation());
1539     }
1540     if (enableSpokenPunctuation_ != null) {
1541       size +=
1542           com.google.protobuf.CodedOutputStream.computeMessageSize(
1543               22, getEnableSpokenPunctuation());
1544     }
1545     if (enableSpokenEmojis_ != null) {
1546       size += com.google.protobuf.CodedOutputStream.computeMessageSize(23, getEnableSpokenEmojis());
1547     }
1548     size += getUnknownFields().getSerializedSize();
1549     memoizedSize = size;
1550     return size;
1551   }
1552 
1553   @java.lang.Override
equals(final java.lang.Object obj)1554   public boolean equals(final java.lang.Object obj) {
1555     if (obj == this) {
1556       return true;
1557     }
1558     if (!(obj instanceof com.google.cloud.speech.v1.RecognitionConfig)) {
1559       return super.equals(obj);
1560     }
1561     com.google.cloud.speech.v1.RecognitionConfig other =
1562         (com.google.cloud.speech.v1.RecognitionConfig) obj;
1563 
1564     if (encoding_ != other.encoding_) return false;
1565     if (getSampleRateHertz() != other.getSampleRateHertz()) return false;
1566     if (getAudioChannelCount() != other.getAudioChannelCount()) return false;
1567     if (getEnableSeparateRecognitionPerChannel() != other.getEnableSeparateRecognitionPerChannel())
1568       return false;
1569     if (!getLanguageCode().equals(other.getLanguageCode())) return false;
1570     if (!getAlternativeLanguageCodesList().equals(other.getAlternativeLanguageCodesList()))
1571       return false;
1572     if (getMaxAlternatives() != other.getMaxAlternatives()) return false;
1573     if (getProfanityFilter() != other.getProfanityFilter()) return false;
1574     if (hasAdaptation() != other.hasAdaptation()) return false;
1575     if (hasAdaptation()) {
1576       if (!getAdaptation().equals(other.getAdaptation())) return false;
1577     }
1578     if (!getSpeechContextsList().equals(other.getSpeechContextsList())) return false;
1579     if (getEnableWordTimeOffsets() != other.getEnableWordTimeOffsets()) return false;
1580     if (getEnableWordConfidence() != other.getEnableWordConfidence()) return false;
1581     if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false;
1582     if (hasEnableSpokenPunctuation() != other.hasEnableSpokenPunctuation()) return false;
1583     if (hasEnableSpokenPunctuation()) {
1584       if (!getEnableSpokenPunctuation().equals(other.getEnableSpokenPunctuation())) return false;
1585     }
1586     if (hasEnableSpokenEmojis() != other.hasEnableSpokenEmojis()) return false;
1587     if (hasEnableSpokenEmojis()) {
1588       if (!getEnableSpokenEmojis().equals(other.getEnableSpokenEmojis())) return false;
1589     }
1590     if (hasDiarizationConfig() != other.hasDiarizationConfig()) return false;
1591     if (hasDiarizationConfig()) {
1592       if (!getDiarizationConfig().equals(other.getDiarizationConfig())) return false;
1593     }
1594     if (hasMetadata() != other.hasMetadata()) return false;
1595     if (hasMetadata()) {
1596       if (!getMetadata().equals(other.getMetadata())) return false;
1597     }
1598     if (!getModel().equals(other.getModel())) return false;
1599     if (getUseEnhanced() != other.getUseEnhanced()) return false;
1600     if (!getUnknownFields().equals(other.getUnknownFields())) return false;
1601     return true;
1602   }
1603 
1604   @java.lang.Override
hashCode()1605   public int hashCode() {
1606     if (memoizedHashCode != 0) {
1607       return memoizedHashCode;
1608     }
1609     int hash = 41;
1610     hash = (19 * hash) + getDescriptor().hashCode();
1611     hash = (37 * hash) + ENCODING_FIELD_NUMBER;
1612     hash = (53 * hash) + encoding_;
1613     hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER;
1614     hash = (53 * hash) + getSampleRateHertz();
1615     hash = (37 * hash) + AUDIO_CHANNEL_COUNT_FIELD_NUMBER;
1616     hash = (53 * hash) + getAudioChannelCount();
1617     hash = (37 * hash) + ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER;
1618     hash =
1619         (53 * hash)
1620             + com.google.protobuf.Internal.hashBoolean(getEnableSeparateRecognitionPerChannel());
1621     hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
1622     hash = (53 * hash) + getLanguageCode().hashCode();
1623     if (getAlternativeLanguageCodesCount() > 0) {
1624       hash = (37 * hash) + ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER;
1625       hash = (53 * hash) + getAlternativeLanguageCodesList().hashCode();
1626     }
1627     hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER;
1628     hash = (53 * hash) + getMaxAlternatives();
1629     hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER;
1630     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getProfanityFilter());
1631     if (hasAdaptation()) {
1632       hash = (37 * hash) + ADAPTATION_FIELD_NUMBER;
1633       hash = (53 * hash) + getAdaptation().hashCode();
1634     }
1635     if (getSpeechContextsCount() > 0) {
1636       hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER;
1637       hash = (53 * hash) + getSpeechContextsList().hashCode();
1638     }
1639     hash = (37 * hash) + ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER;
1640     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordTimeOffsets());
1641     hash = (37 * hash) + ENABLE_WORD_CONFIDENCE_FIELD_NUMBER;
1642     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordConfidence());
1643     hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER;
1644     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation());
1645     if (hasEnableSpokenPunctuation()) {
1646       hash = (37 * hash) + ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER;
1647       hash = (53 * hash) + getEnableSpokenPunctuation().hashCode();
1648     }
1649     if (hasEnableSpokenEmojis()) {
1650       hash = (37 * hash) + ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER;
1651       hash = (53 * hash) + getEnableSpokenEmojis().hashCode();
1652     }
1653     if (hasDiarizationConfig()) {
1654       hash = (37 * hash) + DIARIZATION_CONFIG_FIELD_NUMBER;
1655       hash = (53 * hash) + getDiarizationConfig().hashCode();
1656     }
1657     if (hasMetadata()) {
1658       hash = (37 * hash) + METADATA_FIELD_NUMBER;
1659       hash = (53 * hash) + getMetadata().hashCode();
1660     }
1661     hash = (37 * hash) + MODEL_FIELD_NUMBER;
1662     hash = (53 * hash) + getModel().hashCode();
1663     hash = (37 * hash) + USE_ENHANCED_FIELD_NUMBER;
1664     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getUseEnhanced());
1665     hash = (29 * hash) + getUnknownFields().hashCode();
1666     memoizedHashCode = hash;
1667     return hash;
1668   }
1669 
parseFrom(java.nio.ByteBuffer data)1670   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(java.nio.ByteBuffer data)
1671       throws com.google.protobuf.InvalidProtocolBufferException {
1672     return PARSER.parseFrom(data);
1673   }
1674 
parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1675   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(
1676       java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1677       throws com.google.protobuf.InvalidProtocolBufferException {
1678     return PARSER.parseFrom(data, extensionRegistry);
1679   }
1680 
parseFrom( com.google.protobuf.ByteString data)1681   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(
1682       com.google.protobuf.ByteString data)
1683       throws com.google.protobuf.InvalidProtocolBufferException {
1684     return PARSER.parseFrom(data);
1685   }
1686 
parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1687   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(
1688       com.google.protobuf.ByteString data,
1689       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1690       throws com.google.protobuf.InvalidProtocolBufferException {
1691     return PARSER.parseFrom(data, extensionRegistry);
1692   }
1693 
parseFrom(byte[] data)1694   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(byte[] data)
1695       throws com.google.protobuf.InvalidProtocolBufferException {
1696     return PARSER.parseFrom(data);
1697   }
1698 
parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1699   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(
1700       byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1701       throws com.google.protobuf.InvalidProtocolBufferException {
1702     return PARSER.parseFrom(data, extensionRegistry);
1703   }
1704 
parseFrom(java.io.InputStream input)1705   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(java.io.InputStream input)
1706       throws java.io.IOException {
1707     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
1708   }
1709 
parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1710   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(
1711       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1712       throws java.io.IOException {
1713     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
1714         PARSER, input, extensionRegistry);
1715   }
1716 
parseDelimitedFrom( java.io.InputStream input)1717   public static com.google.cloud.speech.v1.RecognitionConfig parseDelimitedFrom(
1718       java.io.InputStream input) throws java.io.IOException {
1719     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
1720   }
1721 
parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1722   public static com.google.cloud.speech.v1.RecognitionConfig parseDelimitedFrom(
1723       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1724       throws java.io.IOException {
1725     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
1726         PARSER, input, extensionRegistry);
1727   }
1728 
parseFrom( com.google.protobuf.CodedInputStream input)1729   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(
1730       com.google.protobuf.CodedInputStream input) throws java.io.IOException {
1731     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
1732   }
1733 
parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1734   public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(
1735       com.google.protobuf.CodedInputStream input,
1736       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1737       throws java.io.IOException {
1738     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
1739         PARSER, input, extensionRegistry);
1740   }
1741 
1742   @java.lang.Override
newBuilderForType()1743   public Builder newBuilderForType() {
1744     return newBuilder();
1745   }
1746 
newBuilder()1747   public static Builder newBuilder() {
1748     return DEFAULT_INSTANCE.toBuilder();
1749   }
1750 
newBuilder(com.google.cloud.speech.v1.RecognitionConfig prototype)1751   public static Builder newBuilder(com.google.cloud.speech.v1.RecognitionConfig prototype) {
1752     return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
1753   }
1754 
1755   @java.lang.Override
toBuilder()1756   public Builder toBuilder() {
1757     return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
1758   }
1759 
1760   @java.lang.Override
newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)1761   protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
1762     Builder builder = new Builder(parent);
1763     return builder;
1764   }
1765   /**
1766    *
1767    *
1768    * <pre>
1769    * Provides information to the recognizer that specifies how to process the
1770    * request.
1771    * </pre>
1772    *
1773    * Protobuf type {@code google.cloud.speech.v1.RecognitionConfig}
1774    */
1775   public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
1776       implements
1777       // @@protoc_insertion_point(builder_implements:google.cloud.speech.v1.RecognitionConfig)
1778       com.google.cloud.speech.v1.RecognitionConfigOrBuilder {
getDescriptor()1779     public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
1780       return com.google.cloud.speech.v1.SpeechProto
1781           .internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor;
1782     }
1783 
1784     @java.lang.Override
1785     protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()1786         internalGetFieldAccessorTable() {
1787       return com.google.cloud.speech.v1.SpeechProto
1788           .internal_static_google_cloud_speech_v1_RecognitionConfig_fieldAccessorTable
1789           .ensureFieldAccessorsInitialized(
1790               com.google.cloud.speech.v1.RecognitionConfig.class,
1791               com.google.cloud.speech.v1.RecognitionConfig.Builder.class);
1792     }
1793 
1794     // Construct using com.google.cloud.speech.v1.RecognitionConfig.newBuilder()
Builder()1795     private Builder() {}
1796 
Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)1797     private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
1798       super(parent);
1799     }
1800 
1801     @java.lang.Override
clear()1802     public Builder clear() {
1803       super.clear();
1804       bitField0_ = 0;
1805       encoding_ = 0;
1806       sampleRateHertz_ = 0;
1807       audioChannelCount_ = 0;
1808       enableSeparateRecognitionPerChannel_ = false;
1809       languageCode_ = "";
1810       alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
1811       bitField0_ = (bitField0_ & ~0x00000020);
1812       maxAlternatives_ = 0;
1813       profanityFilter_ = false;
1814       adaptation_ = null;
1815       if (adaptationBuilder_ != null) {
1816         adaptationBuilder_.dispose();
1817         adaptationBuilder_ = null;
1818       }
1819       if (speechContextsBuilder_ == null) {
1820         speechContexts_ = java.util.Collections.emptyList();
1821       } else {
1822         speechContexts_ = null;
1823         speechContextsBuilder_.clear();
1824       }
1825       bitField0_ = (bitField0_ & ~0x00000200);
1826       enableWordTimeOffsets_ = false;
1827       enableWordConfidence_ = false;
1828       enableAutomaticPunctuation_ = false;
1829       enableSpokenPunctuation_ = null;
1830       if (enableSpokenPunctuationBuilder_ != null) {
1831         enableSpokenPunctuationBuilder_.dispose();
1832         enableSpokenPunctuationBuilder_ = null;
1833       }
1834       enableSpokenEmojis_ = null;
1835       if (enableSpokenEmojisBuilder_ != null) {
1836         enableSpokenEmojisBuilder_.dispose();
1837         enableSpokenEmojisBuilder_ = null;
1838       }
1839       diarizationConfig_ = null;
1840       if (diarizationConfigBuilder_ != null) {
1841         diarizationConfigBuilder_.dispose();
1842         diarizationConfigBuilder_ = null;
1843       }
1844       metadata_ = null;
1845       if (metadataBuilder_ != null) {
1846         metadataBuilder_.dispose();
1847         metadataBuilder_ = null;
1848       }
1849       model_ = "";
1850       useEnhanced_ = false;
1851       return this;
1852     }
1853 
1854     @java.lang.Override
getDescriptorForType()1855     public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
1856       return com.google.cloud.speech.v1.SpeechProto
1857           .internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor;
1858     }
1859 
1860     @java.lang.Override
getDefaultInstanceForType()1861     public com.google.cloud.speech.v1.RecognitionConfig getDefaultInstanceForType() {
1862       return com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance();
1863     }
1864 
1865     @java.lang.Override
build()1866     public com.google.cloud.speech.v1.RecognitionConfig build() {
1867       com.google.cloud.speech.v1.RecognitionConfig result = buildPartial();
1868       if (!result.isInitialized()) {
1869         throw newUninitializedMessageException(result);
1870       }
1871       return result;
1872     }
1873 
1874     @java.lang.Override
buildPartial()1875     public com.google.cloud.speech.v1.RecognitionConfig buildPartial() {
1876       com.google.cloud.speech.v1.RecognitionConfig result =
1877           new com.google.cloud.speech.v1.RecognitionConfig(this);
1878       buildPartialRepeatedFields(result);
1879       if (bitField0_ != 0) {
1880         buildPartial0(result);
1881       }
1882       onBuilt();
1883       return result;
1884     }
1885 
buildPartialRepeatedFields(com.google.cloud.speech.v1.RecognitionConfig result)1886     private void buildPartialRepeatedFields(com.google.cloud.speech.v1.RecognitionConfig result) {
1887       if (((bitField0_ & 0x00000020) != 0)) {
1888         alternativeLanguageCodes_ = alternativeLanguageCodes_.getUnmodifiableView();
1889         bitField0_ = (bitField0_ & ~0x00000020);
1890       }
1891       result.alternativeLanguageCodes_ = alternativeLanguageCodes_;
1892       if (speechContextsBuilder_ == null) {
1893         if (((bitField0_ & 0x00000200) != 0)) {
1894           speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_);
1895           bitField0_ = (bitField0_ & ~0x00000200);
1896         }
1897         result.speechContexts_ = speechContexts_;
1898       } else {
1899         result.speechContexts_ = speechContextsBuilder_.build();
1900       }
1901     }
1902 
buildPartial0(com.google.cloud.speech.v1.RecognitionConfig result)1903     private void buildPartial0(com.google.cloud.speech.v1.RecognitionConfig result) {
1904       int from_bitField0_ = bitField0_;
1905       if (((from_bitField0_ & 0x00000001) != 0)) {
1906         result.encoding_ = encoding_;
1907       }
1908       if (((from_bitField0_ & 0x00000002) != 0)) {
1909         result.sampleRateHertz_ = sampleRateHertz_;
1910       }
1911       if (((from_bitField0_ & 0x00000004) != 0)) {
1912         result.audioChannelCount_ = audioChannelCount_;
1913       }
1914       if (((from_bitField0_ & 0x00000008) != 0)) {
1915         result.enableSeparateRecognitionPerChannel_ = enableSeparateRecognitionPerChannel_;
1916       }
1917       if (((from_bitField0_ & 0x00000010) != 0)) {
1918         result.languageCode_ = languageCode_;
1919       }
1920       if (((from_bitField0_ & 0x00000040) != 0)) {
1921         result.maxAlternatives_ = maxAlternatives_;
1922       }
1923       if (((from_bitField0_ & 0x00000080) != 0)) {
1924         result.profanityFilter_ = profanityFilter_;
1925       }
1926       if (((from_bitField0_ & 0x00000100) != 0)) {
1927         result.adaptation_ = adaptationBuilder_ == null ? adaptation_ : adaptationBuilder_.build();
1928       }
1929       if (((from_bitField0_ & 0x00000400) != 0)) {
1930         result.enableWordTimeOffsets_ = enableWordTimeOffsets_;
1931       }
1932       if (((from_bitField0_ & 0x00000800) != 0)) {
1933         result.enableWordConfidence_ = enableWordConfidence_;
1934       }
1935       if (((from_bitField0_ & 0x00001000) != 0)) {
1936         result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_;
1937       }
1938       if (((from_bitField0_ & 0x00002000) != 0)) {
1939         result.enableSpokenPunctuation_ =
1940             enableSpokenPunctuationBuilder_ == null
1941                 ? enableSpokenPunctuation_
1942                 : enableSpokenPunctuationBuilder_.build();
1943       }
1944       if (((from_bitField0_ & 0x00004000) != 0)) {
1945         result.enableSpokenEmojis_ =
1946             enableSpokenEmojisBuilder_ == null
1947                 ? enableSpokenEmojis_
1948                 : enableSpokenEmojisBuilder_.build();
1949       }
1950       if (((from_bitField0_ & 0x00008000) != 0)) {
1951         result.diarizationConfig_ =
1952             diarizationConfigBuilder_ == null
1953                 ? diarizationConfig_
1954                 : diarizationConfigBuilder_.build();
1955       }
1956       if (((from_bitField0_ & 0x00010000) != 0)) {
1957         result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build();
1958       }
1959       if (((from_bitField0_ & 0x00020000) != 0)) {
1960         result.model_ = model_;
1961       }
1962       if (((from_bitField0_ & 0x00040000) != 0)) {
1963         result.useEnhanced_ = useEnhanced_;
1964       }
1965     }
1966 
1967     @java.lang.Override
clone()1968     public Builder clone() {
1969       return super.clone();
1970     }
1971 
1972     @java.lang.Override
setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1973     public Builder setField(
1974         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1975       return super.setField(field, value);
1976     }
1977 
1978     @java.lang.Override
clearField(com.google.protobuf.Descriptors.FieldDescriptor field)1979     public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
1980       return super.clearField(field);
1981     }
1982 
1983     @java.lang.Override
clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)1984     public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
1985       return super.clearOneof(oneof);
1986     }
1987 
1988     @java.lang.Override
setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value)1989     public Builder setRepeatedField(
1990         com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
1991       return super.setRepeatedField(field, index, value);
1992     }
1993 
1994     @java.lang.Override
addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1995     public Builder addRepeatedField(
1996         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1997       return super.addRepeatedField(field, value);
1998     }
1999 
2000     @java.lang.Override
mergeFrom(com.google.protobuf.Message other)2001     public Builder mergeFrom(com.google.protobuf.Message other) {
2002       if (other instanceof com.google.cloud.speech.v1.RecognitionConfig) {
2003         return mergeFrom((com.google.cloud.speech.v1.RecognitionConfig) other);
2004       } else {
2005         super.mergeFrom(other);
2006         return this;
2007       }
2008     }
2009 
mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other)2010     public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other) {
2011       if (other == com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance()) return this;
2012       if (other.encoding_ != 0) {
2013         setEncodingValue(other.getEncodingValue());
2014       }
2015       if (other.getSampleRateHertz() != 0) {
2016         setSampleRateHertz(other.getSampleRateHertz());
2017       }
2018       if (other.getAudioChannelCount() != 0) {
2019         setAudioChannelCount(other.getAudioChannelCount());
2020       }
2021       if (other.getEnableSeparateRecognitionPerChannel() != false) {
2022         setEnableSeparateRecognitionPerChannel(other.getEnableSeparateRecognitionPerChannel());
2023       }
2024       if (!other.getLanguageCode().isEmpty()) {
2025         languageCode_ = other.languageCode_;
2026         bitField0_ |= 0x00000010;
2027         onChanged();
2028       }
2029       if (!other.alternativeLanguageCodes_.isEmpty()) {
2030         if (alternativeLanguageCodes_.isEmpty()) {
2031           alternativeLanguageCodes_ = other.alternativeLanguageCodes_;
2032           bitField0_ = (bitField0_ & ~0x00000020);
2033         } else {
2034           ensureAlternativeLanguageCodesIsMutable();
2035           alternativeLanguageCodes_.addAll(other.alternativeLanguageCodes_);
2036         }
2037         onChanged();
2038       }
2039       if (other.getMaxAlternatives() != 0) {
2040         setMaxAlternatives(other.getMaxAlternatives());
2041       }
2042       if (other.getProfanityFilter() != false) {
2043         setProfanityFilter(other.getProfanityFilter());
2044       }
2045       if (other.hasAdaptation()) {
2046         mergeAdaptation(other.getAdaptation());
2047       }
2048       if (speechContextsBuilder_ == null) {
2049         if (!other.speechContexts_.isEmpty()) {
2050           if (speechContexts_.isEmpty()) {
2051             speechContexts_ = other.speechContexts_;
2052             bitField0_ = (bitField0_ & ~0x00000200);
2053           } else {
2054             ensureSpeechContextsIsMutable();
2055             speechContexts_.addAll(other.speechContexts_);
2056           }
2057           onChanged();
2058         }
2059       } else {
2060         if (!other.speechContexts_.isEmpty()) {
2061           if (speechContextsBuilder_.isEmpty()) {
2062             speechContextsBuilder_.dispose();
2063             speechContextsBuilder_ = null;
2064             speechContexts_ = other.speechContexts_;
2065             bitField0_ = (bitField0_ & ~0x00000200);
2066             speechContextsBuilder_ =
2067                 com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
2068                     ? getSpeechContextsFieldBuilder()
2069                     : null;
2070           } else {
2071             speechContextsBuilder_.addAllMessages(other.speechContexts_);
2072           }
2073         }
2074       }
2075       if (other.getEnableWordTimeOffsets() != false) {
2076         setEnableWordTimeOffsets(other.getEnableWordTimeOffsets());
2077       }
2078       if (other.getEnableWordConfidence() != false) {
2079         setEnableWordConfidence(other.getEnableWordConfidence());
2080       }
2081       if (other.getEnableAutomaticPunctuation() != false) {
2082         setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation());
2083       }
2084       if (other.hasEnableSpokenPunctuation()) {
2085         mergeEnableSpokenPunctuation(other.getEnableSpokenPunctuation());
2086       }
2087       if (other.hasEnableSpokenEmojis()) {
2088         mergeEnableSpokenEmojis(other.getEnableSpokenEmojis());
2089       }
2090       if (other.hasDiarizationConfig()) {
2091         mergeDiarizationConfig(other.getDiarizationConfig());
2092       }
2093       if (other.hasMetadata()) {
2094         mergeMetadata(other.getMetadata());
2095       }
2096       if (!other.getModel().isEmpty()) {
2097         model_ = other.model_;
2098         bitField0_ |= 0x00020000;
2099         onChanged();
2100       }
2101       if (other.getUseEnhanced() != false) {
2102         setUseEnhanced(other.getUseEnhanced());
2103       }
2104       this.mergeUnknownFields(other.getUnknownFields());
2105       onChanged();
2106       return this;
2107     }
2108 
2109     @java.lang.Override
isInitialized()2110     public final boolean isInitialized() {
2111       return true;
2112     }
2113 
2114     @java.lang.Override
mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)2115     public Builder mergeFrom(
2116         com.google.protobuf.CodedInputStream input,
2117         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2118         throws java.io.IOException {
2119       if (extensionRegistry == null) {
2120         throw new java.lang.NullPointerException();
2121       }
2122       try {
2123         boolean done = false;
2124         while (!done) {
2125           int tag = input.readTag();
2126           switch (tag) {
2127             case 0:
2128               done = true;
2129               break;
2130             case 8:
2131               {
2132                 encoding_ = input.readEnum();
2133                 bitField0_ |= 0x00000001;
2134                 break;
2135               } // case 8
2136             case 16:
2137               {
2138                 sampleRateHertz_ = input.readInt32();
2139                 bitField0_ |= 0x00000002;
2140                 break;
2141               } // case 16
2142             case 26:
2143               {
2144                 languageCode_ = input.readStringRequireUtf8();
2145                 bitField0_ |= 0x00000010;
2146                 break;
2147               } // case 26
2148             case 32:
2149               {
2150                 maxAlternatives_ = input.readInt32();
2151                 bitField0_ |= 0x00000040;
2152                 break;
2153               } // case 32
2154             case 40:
2155               {
2156                 profanityFilter_ = input.readBool();
2157                 bitField0_ |= 0x00000080;
2158                 break;
2159               } // case 40
2160             case 50:
2161               {
2162                 com.google.cloud.speech.v1.SpeechContext m =
2163                     input.readMessage(
2164                         com.google.cloud.speech.v1.SpeechContext.parser(), extensionRegistry);
2165                 if (speechContextsBuilder_ == null) {
2166                   ensureSpeechContextsIsMutable();
2167                   speechContexts_.add(m);
2168                 } else {
2169                   speechContextsBuilder_.addMessage(m);
2170                 }
2171                 break;
2172               } // case 50
2173             case 56:
2174               {
2175                 audioChannelCount_ = input.readInt32();
2176                 bitField0_ |= 0x00000004;
2177                 break;
2178               } // case 56
2179             case 64:
2180               {
2181                 enableWordTimeOffsets_ = input.readBool();
2182                 bitField0_ |= 0x00000400;
2183                 break;
2184               } // case 64
2185             case 74:
2186               {
2187                 input.readMessage(getMetadataFieldBuilder().getBuilder(), extensionRegistry);
2188                 bitField0_ |= 0x00010000;
2189                 break;
2190               } // case 74
2191             case 88:
2192               {
2193                 enableAutomaticPunctuation_ = input.readBool();
2194                 bitField0_ |= 0x00001000;
2195                 break;
2196               } // case 88
2197             case 96:
2198               {
2199                 enableSeparateRecognitionPerChannel_ = input.readBool();
2200                 bitField0_ |= 0x00000008;
2201                 break;
2202               } // case 96
2203             case 106:
2204               {
2205                 model_ = input.readStringRequireUtf8();
2206                 bitField0_ |= 0x00020000;
2207                 break;
2208               } // case 106
2209             case 112:
2210               {
2211                 useEnhanced_ = input.readBool();
2212                 bitField0_ |= 0x00040000;
2213                 break;
2214               } // case 112
2215             case 120:
2216               {
2217                 enableWordConfidence_ = input.readBool();
2218                 bitField0_ |= 0x00000800;
2219                 break;
2220               } // case 120
2221             case 146:
2222               {
2223                 java.lang.String s = input.readStringRequireUtf8();
2224                 ensureAlternativeLanguageCodesIsMutable();
2225                 alternativeLanguageCodes_.add(s);
2226                 break;
2227               } // case 146
2228             case 154:
2229               {
2230                 input.readMessage(
2231                     getDiarizationConfigFieldBuilder().getBuilder(), extensionRegistry);
2232                 bitField0_ |= 0x00008000;
2233                 break;
2234               } // case 154
2235             case 162:
2236               {
2237                 input.readMessage(getAdaptationFieldBuilder().getBuilder(), extensionRegistry);
2238                 bitField0_ |= 0x00000100;
2239                 break;
2240               } // case 162
2241             case 178:
2242               {
2243                 input.readMessage(
2244                     getEnableSpokenPunctuationFieldBuilder().getBuilder(), extensionRegistry);
2245                 bitField0_ |= 0x00002000;
2246                 break;
2247               } // case 178
2248             case 186:
2249               {
2250                 input.readMessage(
2251                     getEnableSpokenEmojisFieldBuilder().getBuilder(), extensionRegistry);
2252                 bitField0_ |= 0x00004000;
2253                 break;
2254               } // case 186
2255             default:
2256               {
2257                 if (!super.parseUnknownField(input, extensionRegistry, tag)) {
2258                   done = true; // was an endgroup tag
2259                 }
2260                 break;
2261               } // default:
2262           } // switch (tag)
2263         } // while (!done)
2264       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2265         throw e.unwrapIOException();
2266       } finally {
2267         onChanged();
2268       } // finally
2269       return this;
2270     }
2271 
2272     private int bitField0_;
2273 
2274     private int encoding_ = 0;
2275     /**
2276      *
2277      *
2278      * <pre>
2279      * Encoding of audio data sent in all `RecognitionAudio` messages.
2280      * This field is optional for `FLAC` and `WAV` audio files and required
2281      * for all other audio formats. For details, see
2282      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2283      * </pre>
2284      *
2285      * <code>.google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1;</code>
2286      *
2287      * @return The enum numeric value on the wire for encoding.
2288      */
2289     @java.lang.Override
getEncodingValue()2290     public int getEncodingValue() {
2291       return encoding_;
2292     }
2293     /**
2294      *
2295      *
2296      * <pre>
2297      * Encoding of audio data sent in all `RecognitionAudio` messages.
2298      * This field is optional for `FLAC` and `WAV` audio files and required
2299      * for all other audio formats. For details, see
2300      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2301      * </pre>
2302      *
2303      * <code>.google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1;</code>
2304      *
2305      * @param value The enum numeric value on the wire for encoding to set.
2306      * @return This builder for chaining.
2307      */
setEncodingValue(int value)2308     public Builder setEncodingValue(int value) {
2309       encoding_ = value;
2310       bitField0_ |= 0x00000001;
2311       onChanged();
2312       return this;
2313     }
2314     /**
2315      *
2316      *
2317      * <pre>
2318      * Encoding of audio data sent in all `RecognitionAudio` messages.
2319      * This field is optional for `FLAC` and `WAV` audio files and required
2320      * for all other audio formats. For details, see
2321      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2322      * </pre>
2323      *
2324      * <code>.google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1;</code>
2325      *
2326      * @return The encoding.
2327      */
2328     @java.lang.Override
getEncoding()2329     public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() {
2330       com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding result =
2331           com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.forNumber(encoding_);
2332       return result == null
2333           ? com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.UNRECOGNIZED
2334           : result;
2335     }
2336     /**
2337      *
2338      *
2339      * <pre>
2340      * Encoding of audio data sent in all `RecognitionAudio` messages.
2341      * This field is optional for `FLAC` and `WAV` audio files and required
2342      * for all other audio formats. For details, see
2343      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2344      * </pre>
2345      *
2346      * <code>.google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1;</code>
2347      *
2348      * @param value The encoding to set.
2349      * @return This builder for chaining.
2350      */
setEncoding(com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding value)2351     public Builder setEncoding(com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding value) {
2352       if (value == null) {
2353         throw new NullPointerException();
2354       }
2355       bitField0_ |= 0x00000001;
2356       encoding_ = value.getNumber();
2357       onChanged();
2358       return this;
2359     }
2360     /**
2361      *
2362      *
2363      * <pre>
2364      * Encoding of audio data sent in all `RecognitionAudio` messages.
2365      * This field is optional for `FLAC` and `WAV` audio files and required
2366      * for all other audio formats. For details, see
2367      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2368      * </pre>
2369      *
2370      * <code>.google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1;</code>
2371      *
2372      * @return This builder for chaining.
2373      */
clearEncoding()2374     public Builder clearEncoding() {
2375       bitField0_ = (bitField0_ & ~0x00000001);
2376       encoding_ = 0;
2377       onChanged();
2378       return this;
2379     }
2380 
2381     private int sampleRateHertz_;
2382     /**
2383      *
2384      *
2385      * <pre>
2386      * Sample rate in Hertz of the audio data sent in all
2387      * `RecognitionAudio` messages. Valid values are: 8000-48000.
2388      * 16000 is optimal. For best results, set the sampling rate of the audio
2389      * source to 16000 Hz. If that's not possible, use the native sample rate of
2390      * the audio source (instead of re-sampling).
2391      * This field is optional for FLAC and WAV audio files, but is
2392      * required for all other audio formats. For details, see
2393      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2394      * </pre>
2395      *
2396      * <code>int32 sample_rate_hertz = 2;</code>
2397      *
2398      * @return The sampleRateHertz.
2399      */
2400     @java.lang.Override
getSampleRateHertz()2401     public int getSampleRateHertz() {
2402       return sampleRateHertz_;
2403     }
2404     /**
2405      *
2406      *
2407      * <pre>
2408      * Sample rate in Hertz of the audio data sent in all
2409      * `RecognitionAudio` messages. Valid values are: 8000-48000.
2410      * 16000 is optimal. For best results, set the sampling rate of the audio
2411      * source to 16000 Hz. If that's not possible, use the native sample rate of
2412      * the audio source (instead of re-sampling).
2413      * This field is optional for FLAC and WAV audio files, but is
2414      * required for all other audio formats. For details, see
2415      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2416      * </pre>
2417      *
2418      * <code>int32 sample_rate_hertz = 2;</code>
2419      *
2420      * @param value The sampleRateHertz to set.
2421      * @return This builder for chaining.
2422      */
setSampleRateHertz(int value)2423     public Builder setSampleRateHertz(int value) {
2424 
2425       sampleRateHertz_ = value;
2426       bitField0_ |= 0x00000002;
2427       onChanged();
2428       return this;
2429     }
2430     /**
2431      *
2432      *
2433      * <pre>
2434      * Sample rate in Hertz of the audio data sent in all
2435      * `RecognitionAudio` messages. Valid values are: 8000-48000.
2436      * 16000 is optimal. For best results, set the sampling rate of the audio
2437      * source to 16000 Hz. If that's not possible, use the native sample rate of
2438      * the audio source (instead of re-sampling).
2439      * This field is optional for FLAC and WAV audio files, but is
2440      * required for all other audio formats. For details, see
2441      * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
2442      * </pre>
2443      *
2444      * <code>int32 sample_rate_hertz = 2;</code>
2445      *
2446      * @return This builder for chaining.
2447      */
clearSampleRateHertz()2448     public Builder clearSampleRateHertz() {
2449       bitField0_ = (bitField0_ & ~0x00000002);
2450       sampleRateHertz_ = 0;
2451       onChanged();
2452       return this;
2453     }
2454 
2455     private int audioChannelCount_;
2456     /**
2457      *
2458      *
2459      * <pre>
2460      * The number of channels in the input audio data.
2461      * ONLY set this for MULTI-CHANNEL recognition.
2462      * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
2463      * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
2464      * If `0` or omitted, defaults to one channel (mono).
2465      * Note: We only recognize the first channel by default.
2466      * To perform independent recognition on each channel set
2467      * `enable_separate_recognition_per_channel` to 'true'.
2468      * </pre>
2469      *
2470      * <code>int32 audio_channel_count = 7;</code>
2471      *
2472      * @return The audioChannelCount.
2473      */
2474     @java.lang.Override
getAudioChannelCount()2475     public int getAudioChannelCount() {
2476       return audioChannelCount_;
2477     }
2478     /**
2479      *
2480      *
2481      * <pre>
2482      * The number of channels in the input audio data.
2483      * ONLY set this for MULTI-CHANNEL recognition.
2484      * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
2485      * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
2486      * If `0` or omitted, defaults to one channel (mono).
2487      * Note: We only recognize the first channel by default.
2488      * To perform independent recognition on each channel set
2489      * `enable_separate_recognition_per_channel` to 'true'.
2490      * </pre>
2491      *
2492      * <code>int32 audio_channel_count = 7;</code>
2493      *
2494      * @param value The audioChannelCount to set.
2495      * @return This builder for chaining.
2496      */
setAudioChannelCount(int value)2497     public Builder setAudioChannelCount(int value) {
2498 
2499       audioChannelCount_ = value;
2500       bitField0_ |= 0x00000004;
2501       onChanged();
2502       return this;
2503     }
2504     /**
2505      *
2506      *
2507      * <pre>
2508      * The number of channels in the input audio data.
2509      * ONLY set this for MULTI-CHANNEL recognition.
2510      * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
2511      * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
2512      * If `0` or omitted, defaults to one channel (mono).
2513      * Note: We only recognize the first channel by default.
2514      * To perform independent recognition on each channel set
2515      * `enable_separate_recognition_per_channel` to 'true'.
2516      * </pre>
2517      *
2518      * <code>int32 audio_channel_count = 7;</code>
2519      *
2520      * @return This builder for chaining.
2521      */
clearAudioChannelCount()2522     public Builder clearAudioChannelCount() {
2523       bitField0_ = (bitField0_ & ~0x00000004);
2524       audioChannelCount_ = 0;
2525       onChanged();
2526       return this;
2527     }
2528 
2529     private boolean enableSeparateRecognitionPerChannel_;
2530     /**
2531      *
2532      *
2533      * <pre>
2534      * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
2535      * to get each channel recognized separately. The recognition result will
2536      * contain a `channel_tag` field to state which channel that result belongs
2537      * to. If this is not true, we will only recognize the first channel. The
2538      * request is billed cumulatively for all channels recognized:
2539      * `audio_channel_count` multiplied by the length of the audio.
2540      * </pre>
2541      *
2542      * <code>bool enable_separate_recognition_per_channel = 12;</code>
2543      *
2544      * @return The enableSeparateRecognitionPerChannel.
2545      */
2546     @java.lang.Override
getEnableSeparateRecognitionPerChannel()2547     public boolean getEnableSeparateRecognitionPerChannel() {
2548       return enableSeparateRecognitionPerChannel_;
2549     }
2550     /**
2551      *
2552      *
2553      * <pre>
2554      * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
2555      * to get each channel recognized separately. The recognition result will
2556      * contain a `channel_tag` field to state which channel that result belongs
2557      * to. If this is not true, we will only recognize the first channel. The
2558      * request is billed cumulatively for all channels recognized:
2559      * `audio_channel_count` multiplied by the length of the audio.
2560      * </pre>
2561      *
2562      * <code>bool enable_separate_recognition_per_channel = 12;</code>
2563      *
2564      * @param value The enableSeparateRecognitionPerChannel to set.
2565      * @return This builder for chaining.
2566      */
setEnableSeparateRecognitionPerChannel(boolean value)2567     public Builder setEnableSeparateRecognitionPerChannel(boolean value) {
2568 
2569       enableSeparateRecognitionPerChannel_ = value;
2570       bitField0_ |= 0x00000008;
2571       onChanged();
2572       return this;
2573     }
2574     /**
2575      *
2576      *
2577      * <pre>
2578      * This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
2579      * to get each channel recognized separately. The recognition result will
2580      * contain a `channel_tag` field to state which channel that result belongs
2581      * to. If this is not true, we will only recognize the first channel. The
2582      * request is billed cumulatively for all channels recognized:
2583      * `audio_channel_count` multiplied by the length of the audio.
2584      * </pre>
2585      *
2586      * <code>bool enable_separate_recognition_per_channel = 12;</code>
2587      *
2588      * @return This builder for chaining.
2589      */
clearEnableSeparateRecognitionPerChannel()2590     public Builder clearEnableSeparateRecognitionPerChannel() {
2591       bitField0_ = (bitField0_ & ~0x00000008);
2592       enableSeparateRecognitionPerChannel_ = false;
2593       onChanged();
2594       return this;
2595     }
2596 
2597     private java.lang.Object languageCode_ = "";
2598     /**
2599      *
2600      *
2601      * <pre>
2602      * Required. The language of the supplied audio as a
2603      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
2604      * Example: "en-US".
2605      * See [Language
2606      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2607      * of the currently supported language codes.
2608      * </pre>
2609      *
2610      * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
2611      *
2612      * @return The languageCode.
2613      */
getLanguageCode()2614     public java.lang.String getLanguageCode() {
2615       java.lang.Object ref = languageCode_;
2616       if (!(ref instanceof java.lang.String)) {
2617         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
2618         java.lang.String s = bs.toStringUtf8();
2619         languageCode_ = s;
2620         return s;
2621       } else {
2622         return (java.lang.String) ref;
2623       }
2624     }
2625     /**
2626      *
2627      *
2628      * <pre>
2629      * Required. The language of the supplied audio as a
2630      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
2631      * Example: "en-US".
2632      * See [Language
2633      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2634      * of the currently supported language codes.
2635      * </pre>
2636      *
2637      * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
2638      *
2639      * @return The bytes for languageCode.
2640      */
getLanguageCodeBytes()2641     public com.google.protobuf.ByteString getLanguageCodeBytes() {
2642       java.lang.Object ref = languageCode_;
2643       if (ref instanceof String) {
2644         com.google.protobuf.ByteString b =
2645             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
2646         languageCode_ = b;
2647         return b;
2648       } else {
2649         return (com.google.protobuf.ByteString) ref;
2650       }
2651     }
2652     /**
2653      *
2654      *
2655      * <pre>
2656      * Required. The language of the supplied audio as a
2657      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
2658      * Example: "en-US".
2659      * See [Language
2660      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2661      * of the currently supported language codes.
2662      * </pre>
2663      *
2664      * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
2665      *
2666      * @param value The languageCode to set.
2667      * @return This builder for chaining.
2668      */
setLanguageCode(java.lang.String value)2669     public Builder setLanguageCode(java.lang.String value) {
2670       if (value == null) {
2671         throw new NullPointerException();
2672       }
2673       languageCode_ = value;
2674       bitField0_ |= 0x00000010;
2675       onChanged();
2676       return this;
2677     }
2678     /**
2679      *
2680      *
2681      * <pre>
2682      * Required. The language of the supplied audio as a
2683      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
2684      * Example: "en-US".
2685      * See [Language
2686      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2687      * of the currently supported language codes.
2688      * </pre>
2689      *
2690      * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
2691      *
2692      * @return This builder for chaining.
2693      */
clearLanguageCode()2694     public Builder clearLanguageCode() {
2695       languageCode_ = getDefaultInstance().getLanguageCode();
2696       bitField0_ = (bitField0_ & ~0x00000010);
2697       onChanged();
2698       return this;
2699     }
2700     /**
2701      *
2702      *
2703      * <pre>
2704      * Required. The language of the supplied audio as a
2705      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
2706      * Example: "en-US".
2707      * See [Language
2708      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2709      * of the currently supported language codes.
2710      * </pre>
2711      *
2712      * <code>string language_code = 3 [(.google.api.field_behavior) = REQUIRED];</code>
2713      *
2714      * @param value The bytes for languageCode to set.
2715      * @return This builder for chaining.
2716      */
setLanguageCodeBytes(com.google.protobuf.ByteString value)2717     public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) {
2718       if (value == null) {
2719         throw new NullPointerException();
2720       }
2721       checkByteStringIsUtf8(value);
2722       languageCode_ = value;
2723       bitField0_ |= 0x00000010;
2724       onChanged();
2725       return this;
2726     }
2727 
2728     private com.google.protobuf.LazyStringList alternativeLanguageCodes_ =
2729         com.google.protobuf.LazyStringArrayList.EMPTY;
2730 
ensureAlternativeLanguageCodesIsMutable()2731     private void ensureAlternativeLanguageCodesIsMutable() {
2732       if (!((bitField0_ & 0x00000020) != 0)) {
2733         alternativeLanguageCodes_ =
2734             new com.google.protobuf.LazyStringArrayList(alternativeLanguageCodes_);
2735         bitField0_ |= 0x00000020;
2736       }
2737     }
2738     /**
2739      *
2740      *
2741      * <pre>
2742      * A list of up to 3 additional
2743      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2744      * listing possible alternative languages of the supplied audio.
2745      * See [Language
2746      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2747      * of the currently supported language codes. If alternative languages are
2748      * listed, recognition result will contain recognition in the most likely
2749      * language detected including the main language_code. The recognition result
2750      * will include the language tag of the language detected in the audio. Note:
2751      * This feature is only supported for Voice Command and Voice Search use cases
2752      * and performance may vary for other use cases (e.g., phone call
2753      * transcription).
2754      * </pre>
2755      *
2756      * <code>repeated string alternative_language_codes = 18;</code>
2757      *
2758      * @return A list containing the alternativeLanguageCodes.
2759      */
getAlternativeLanguageCodesList()2760     public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() {
2761       return alternativeLanguageCodes_.getUnmodifiableView();
2762     }
2763     /**
2764      *
2765      *
2766      * <pre>
2767      * A list of up to 3 additional
2768      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2769      * listing possible alternative languages of the supplied audio.
2770      * See [Language
2771      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2772      * of the currently supported language codes. If alternative languages are
2773      * listed, recognition result will contain recognition in the most likely
2774      * language detected including the main language_code. The recognition result
2775      * will include the language tag of the language detected in the audio. Note:
2776      * This feature is only supported for Voice Command and Voice Search use cases
2777      * and performance may vary for other use cases (e.g., phone call
2778      * transcription).
2779      * </pre>
2780      *
2781      * <code>repeated string alternative_language_codes = 18;</code>
2782      *
2783      * @return The count of alternativeLanguageCodes.
2784      */
getAlternativeLanguageCodesCount()2785     public int getAlternativeLanguageCodesCount() {
2786       return alternativeLanguageCodes_.size();
2787     }
2788     /**
2789      *
2790      *
2791      * <pre>
2792      * A list of up to 3 additional
2793      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2794      * listing possible alternative languages of the supplied audio.
2795      * See [Language
2796      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2797      * of the currently supported language codes. If alternative languages are
2798      * listed, recognition result will contain recognition in the most likely
2799      * language detected including the main language_code. The recognition result
2800      * will include the language tag of the language detected in the audio. Note:
2801      * This feature is only supported for Voice Command and Voice Search use cases
2802      * and performance may vary for other use cases (e.g., phone call
2803      * transcription).
2804      * </pre>
2805      *
2806      * <code>repeated string alternative_language_codes = 18;</code>
2807      *
2808      * @param index The index of the element to return.
2809      * @return The alternativeLanguageCodes at the given index.
2810      */
getAlternativeLanguageCodes(int index)2811     public java.lang.String getAlternativeLanguageCodes(int index) {
2812       return alternativeLanguageCodes_.get(index);
2813     }
2814     /**
2815      *
2816      *
2817      * <pre>
2818      * A list of up to 3 additional
2819      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2820      * listing possible alternative languages of the supplied audio.
2821      * See [Language
2822      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2823      * of the currently supported language codes. If alternative languages are
2824      * listed, recognition result will contain recognition in the most likely
2825      * language detected including the main language_code. The recognition result
2826      * will include the language tag of the language detected in the audio. Note:
2827      * This feature is only supported for Voice Command and Voice Search use cases
2828      * and performance may vary for other use cases (e.g., phone call
2829      * transcription).
2830      * </pre>
2831      *
2832      * <code>repeated string alternative_language_codes = 18;</code>
2833      *
2834      * @param index The index of the value to return.
2835      * @return The bytes of the alternativeLanguageCodes at the given index.
2836      */
getAlternativeLanguageCodesBytes(int index)2837     public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) {
2838       return alternativeLanguageCodes_.getByteString(index);
2839     }
2840     /**
2841      *
2842      *
2843      * <pre>
2844      * A list of up to 3 additional
2845      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2846      * listing possible alternative languages of the supplied audio.
2847      * See [Language
2848      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2849      * of the currently supported language codes. If alternative languages are
2850      * listed, recognition result will contain recognition in the most likely
2851      * language detected including the main language_code. The recognition result
2852      * will include the language tag of the language detected in the audio. Note:
2853      * This feature is only supported for Voice Command and Voice Search use cases
2854      * and performance may vary for other use cases (e.g., phone call
2855      * transcription).
2856      * </pre>
2857      *
2858      * <code>repeated string alternative_language_codes = 18;</code>
2859      *
2860      * @param index The index to set the value at.
2861      * @param value The alternativeLanguageCodes to set.
2862      * @return This builder for chaining.
2863      */
setAlternativeLanguageCodes(int index, java.lang.String value)2864     public Builder setAlternativeLanguageCodes(int index, java.lang.String value) {
2865       if (value == null) {
2866         throw new NullPointerException();
2867       }
2868       ensureAlternativeLanguageCodesIsMutable();
2869       alternativeLanguageCodes_.set(index, value);
2870       onChanged();
2871       return this;
2872     }
2873     /**
2874      *
2875      *
2876      * <pre>
2877      * A list of up to 3 additional
2878      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2879      * listing possible alternative languages of the supplied audio.
2880      * See [Language
2881      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2882      * of the currently supported language codes. If alternative languages are
2883      * listed, recognition result will contain recognition in the most likely
2884      * language detected including the main language_code. The recognition result
2885      * will include the language tag of the language detected in the audio. Note:
2886      * This feature is only supported for Voice Command and Voice Search use cases
2887      * and performance may vary for other use cases (e.g., phone call
2888      * transcription).
2889      * </pre>
2890      *
2891      * <code>repeated string alternative_language_codes = 18;</code>
2892      *
2893      * @param value The alternativeLanguageCodes to add.
2894      * @return This builder for chaining.
2895      */
addAlternativeLanguageCodes(java.lang.String value)2896     public Builder addAlternativeLanguageCodes(java.lang.String value) {
2897       if (value == null) {
2898         throw new NullPointerException();
2899       }
2900       ensureAlternativeLanguageCodesIsMutable();
2901       alternativeLanguageCodes_.add(value);
2902       onChanged();
2903       return this;
2904     }
2905     /**
2906      *
2907      *
2908      * <pre>
2909      * A list of up to 3 additional
2910      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2911      * listing possible alternative languages of the supplied audio.
2912      * See [Language
2913      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2914      * of the currently supported language codes. If alternative languages are
2915      * listed, recognition result will contain recognition in the most likely
2916      * language detected including the main language_code. The recognition result
2917      * will include the language tag of the language detected in the audio. Note:
2918      * This feature is only supported for Voice Command and Voice Search use cases
2919      * and performance may vary for other use cases (e.g., phone call
2920      * transcription).
2921      * </pre>
2922      *
2923      * <code>repeated string alternative_language_codes = 18;</code>
2924      *
2925      * @param values The alternativeLanguageCodes to add.
2926      * @return This builder for chaining.
2927      */
addAllAlternativeLanguageCodes(java.lang.Iterable<java.lang.String> values)2928     public Builder addAllAlternativeLanguageCodes(java.lang.Iterable<java.lang.String> values) {
2929       ensureAlternativeLanguageCodesIsMutable();
2930       com.google.protobuf.AbstractMessageLite.Builder.addAll(values, alternativeLanguageCodes_);
2931       onChanged();
2932       return this;
2933     }
2934     /**
2935      *
2936      *
2937      * <pre>
2938      * A list of up to 3 additional
2939      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2940      * listing possible alternative languages of the supplied audio.
2941      * See [Language
2942      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2943      * of the currently supported language codes. If alternative languages are
2944      * listed, recognition result will contain recognition in the most likely
2945      * language detected including the main language_code. The recognition result
2946      * will include the language tag of the language detected in the audio. Note:
2947      * This feature is only supported for Voice Command and Voice Search use cases
2948      * and performance may vary for other use cases (e.g., phone call
2949      * transcription).
2950      * </pre>
2951      *
2952      * <code>repeated string alternative_language_codes = 18;</code>
2953      *
2954      * @return This builder for chaining.
2955      */
clearAlternativeLanguageCodes()2956     public Builder clearAlternativeLanguageCodes() {
2957       alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
2958       bitField0_ = (bitField0_ & ~0x00000020);
2959       onChanged();
2960       return this;
2961     }
2962     /**
2963      *
2964      *
2965      * <pre>
2966      * A list of up to 3 additional
2967      * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
2968      * listing possible alternative languages of the supplied audio.
2969      * See [Language
2970      * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
2971      * of the currently supported language codes. If alternative languages are
2972      * listed, recognition result will contain recognition in the most likely
2973      * language detected including the main language_code. The recognition result
2974      * will include the language tag of the language detected in the audio. Note:
2975      * This feature is only supported for Voice Command and Voice Search use cases
2976      * and performance may vary for other use cases (e.g., phone call
2977      * transcription).
2978      * </pre>
2979      *
2980      * <code>repeated string alternative_language_codes = 18;</code>
2981      *
2982      * @param value The bytes of the alternativeLanguageCodes to add.
2983      * @return This builder for chaining.
2984      */
addAlternativeLanguageCodesBytes(com.google.protobuf.ByteString value)2985     public Builder addAlternativeLanguageCodesBytes(com.google.protobuf.ByteString value) {
2986       if (value == null) {
2987         throw new NullPointerException();
2988       }
2989       checkByteStringIsUtf8(value);
2990       ensureAlternativeLanguageCodesIsMutable();
2991       alternativeLanguageCodes_.add(value);
2992       onChanged();
2993       return this;
2994     }
2995 
2996     private int maxAlternatives_;
2997     /**
2998      *
2999      *
3000      * <pre>
3001      * Maximum number of recognition hypotheses to be returned.
3002      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
3003      * within each `SpeechRecognitionResult`.
3004      * The server may return fewer than `max_alternatives`.
3005      * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
3006      * one. If omitted, will return a maximum of one.
3007      * </pre>
3008      *
3009      * <code>int32 max_alternatives = 4;</code>
3010      *
3011      * @return The maxAlternatives.
3012      */
3013     @java.lang.Override
getMaxAlternatives()3014     public int getMaxAlternatives() {
3015       return maxAlternatives_;
3016     }
3017     /**
3018      *
3019      *
3020      * <pre>
3021      * Maximum number of recognition hypotheses to be returned.
3022      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
3023      * within each `SpeechRecognitionResult`.
3024      * The server may return fewer than `max_alternatives`.
3025      * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
3026      * one. If omitted, will return a maximum of one.
3027      * </pre>
3028      *
3029      * <code>int32 max_alternatives = 4;</code>
3030      *
3031      * @param value The maxAlternatives to set.
3032      * @return This builder for chaining.
3033      */
setMaxAlternatives(int value)3034     public Builder setMaxAlternatives(int value) {
3035 
3036       maxAlternatives_ = value;
3037       bitField0_ |= 0x00000040;
3038       onChanged();
3039       return this;
3040     }
3041     /**
3042      *
3043      *
3044      * <pre>
3045      * Maximum number of recognition hypotheses to be returned.
3046      * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
3047      * within each `SpeechRecognitionResult`.
3048      * The server may return fewer than `max_alternatives`.
3049      * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
3050      * one. If omitted, will return a maximum of one.
3051      * </pre>
3052      *
3053      * <code>int32 max_alternatives = 4;</code>
3054      *
3055      * @return This builder for chaining.
3056      */
clearMaxAlternatives()3057     public Builder clearMaxAlternatives() {
3058       bitField0_ = (bitField0_ & ~0x00000040);
3059       maxAlternatives_ = 0;
3060       onChanged();
3061       return this;
3062     }
3063 
3064     private boolean profanityFilter_;
3065     /**
3066      *
3067      *
3068      * <pre>
3069      * If set to `true`, the server will attempt to filter out
3070      * profanities, replacing all but the initial character in each filtered word
3071      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
3072      * won't be filtered out.
3073      * </pre>
3074      *
3075      * <code>bool profanity_filter = 5;</code>
3076      *
3077      * @return The profanityFilter.
3078      */
3079     @java.lang.Override
getProfanityFilter()3080     public boolean getProfanityFilter() {
3081       return profanityFilter_;
3082     }
3083     /**
3084      *
3085      *
3086      * <pre>
3087      * If set to `true`, the server will attempt to filter out
3088      * profanities, replacing all but the initial character in each filtered word
3089      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
3090      * won't be filtered out.
3091      * </pre>
3092      *
3093      * <code>bool profanity_filter = 5;</code>
3094      *
3095      * @param value The profanityFilter to set.
3096      * @return This builder for chaining.
3097      */
setProfanityFilter(boolean value)3098     public Builder setProfanityFilter(boolean value) {
3099 
3100       profanityFilter_ = value;
3101       bitField0_ |= 0x00000080;
3102       onChanged();
3103       return this;
3104     }
3105     /**
3106      *
3107      *
3108      * <pre>
3109      * If set to `true`, the server will attempt to filter out
3110      * profanities, replacing all but the initial character in each filtered word
3111      * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
3112      * won't be filtered out.
3113      * </pre>
3114      *
3115      * <code>bool profanity_filter = 5;</code>
3116      *
3117      * @return This builder for chaining.
3118      */
clearProfanityFilter()3119     public Builder clearProfanityFilter() {
3120       bitField0_ = (bitField0_ & ~0x00000080);
3121       profanityFilter_ = false;
3122       onChanged();
3123       return this;
3124     }
3125 
3126     private com.google.cloud.speech.v1.SpeechAdaptation adaptation_;
3127     private com.google.protobuf.SingleFieldBuilderV3<
3128             com.google.cloud.speech.v1.SpeechAdaptation,
3129             com.google.cloud.speech.v1.SpeechAdaptation.Builder,
3130             com.google.cloud.speech.v1.SpeechAdaptationOrBuilder>
3131         adaptationBuilder_;
3132     /**
3133      *
3134      *
3135      * <pre>
3136      * Speech adaptation configuration improves the accuracy of speech
3137      * recognition. For more information, see the [speech
3138      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3139      * documentation.
3140      * When speech adaptation is set it supersedes the `speech_contexts` field.
3141      * </pre>
3142      *
3143      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3144      *
3145      * @return Whether the adaptation field is set.
3146      */
hasAdaptation()3147     public boolean hasAdaptation() {
3148       return ((bitField0_ & 0x00000100) != 0);
3149     }
3150     /**
3151      *
3152      *
3153      * <pre>
3154      * Speech adaptation configuration improves the accuracy of speech
3155      * recognition. For more information, see the [speech
3156      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3157      * documentation.
3158      * When speech adaptation is set it supersedes the `speech_contexts` field.
3159      * </pre>
3160      *
3161      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3162      *
3163      * @return The adaptation.
3164      */
getAdaptation()3165     public com.google.cloud.speech.v1.SpeechAdaptation getAdaptation() {
3166       if (adaptationBuilder_ == null) {
3167         return adaptation_ == null
3168             ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance()
3169             : adaptation_;
3170       } else {
3171         return adaptationBuilder_.getMessage();
3172       }
3173     }
3174     /**
3175      *
3176      *
3177      * <pre>
3178      * Speech adaptation configuration improves the accuracy of speech
3179      * recognition. For more information, see the [speech
3180      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3181      * documentation.
3182      * When speech adaptation is set it supersedes the `speech_contexts` field.
3183      * </pre>
3184      *
3185      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3186      */
setAdaptation(com.google.cloud.speech.v1.SpeechAdaptation value)3187     public Builder setAdaptation(com.google.cloud.speech.v1.SpeechAdaptation value) {
3188       if (adaptationBuilder_ == null) {
3189         if (value == null) {
3190           throw new NullPointerException();
3191         }
3192         adaptation_ = value;
3193       } else {
3194         adaptationBuilder_.setMessage(value);
3195       }
3196       bitField0_ |= 0x00000100;
3197       onChanged();
3198       return this;
3199     }
3200     /**
3201      *
3202      *
3203      * <pre>
3204      * Speech adaptation configuration improves the accuracy of speech
3205      * recognition. For more information, see the [speech
3206      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3207      * documentation.
3208      * When speech adaptation is set it supersedes the `speech_contexts` field.
3209      * </pre>
3210      *
3211      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3212      */
setAdaptation( com.google.cloud.speech.v1.SpeechAdaptation.Builder builderForValue)3213     public Builder setAdaptation(
3214         com.google.cloud.speech.v1.SpeechAdaptation.Builder builderForValue) {
3215       if (adaptationBuilder_ == null) {
3216         adaptation_ = builderForValue.build();
3217       } else {
3218         adaptationBuilder_.setMessage(builderForValue.build());
3219       }
3220       bitField0_ |= 0x00000100;
3221       onChanged();
3222       return this;
3223     }
3224     /**
3225      *
3226      *
3227      * <pre>
3228      * Speech adaptation configuration improves the accuracy of speech
3229      * recognition. For more information, see the [speech
3230      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3231      * documentation.
3232      * When speech adaptation is set it supersedes the `speech_contexts` field.
3233      * </pre>
3234      *
3235      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3236      */
mergeAdaptation(com.google.cloud.speech.v1.SpeechAdaptation value)3237     public Builder mergeAdaptation(com.google.cloud.speech.v1.SpeechAdaptation value) {
3238       if (adaptationBuilder_ == null) {
3239         if (((bitField0_ & 0x00000100) != 0)
3240             && adaptation_ != null
3241             && adaptation_ != com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance()) {
3242           getAdaptationBuilder().mergeFrom(value);
3243         } else {
3244           adaptation_ = value;
3245         }
3246       } else {
3247         adaptationBuilder_.mergeFrom(value);
3248       }
3249       bitField0_ |= 0x00000100;
3250       onChanged();
3251       return this;
3252     }
3253     /**
3254      *
3255      *
3256      * <pre>
3257      * Speech adaptation configuration improves the accuracy of speech
3258      * recognition. For more information, see the [speech
3259      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3260      * documentation.
3261      * When speech adaptation is set it supersedes the `speech_contexts` field.
3262      * </pre>
3263      *
3264      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3265      */
clearAdaptation()3266     public Builder clearAdaptation() {
3267       bitField0_ = (bitField0_ & ~0x00000100);
3268       adaptation_ = null;
3269       if (adaptationBuilder_ != null) {
3270         adaptationBuilder_.dispose();
3271         adaptationBuilder_ = null;
3272       }
3273       onChanged();
3274       return this;
3275     }
3276     /**
3277      *
3278      *
3279      * <pre>
3280      * Speech adaptation configuration improves the accuracy of speech
3281      * recognition. For more information, see the [speech
3282      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3283      * documentation.
3284      * When speech adaptation is set it supersedes the `speech_contexts` field.
3285      * </pre>
3286      *
3287      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3288      */
getAdaptationBuilder()3289     public com.google.cloud.speech.v1.SpeechAdaptation.Builder getAdaptationBuilder() {
3290       bitField0_ |= 0x00000100;
3291       onChanged();
3292       return getAdaptationFieldBuilder().getBuilder();
3293     }
3294     /**
3295      *
3296      *
3297      * <pre>
3298      * Speech adaptation configuration improves the accuracy of speech
3299      * recognition. For more information, see the [speech
3300      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3301      * documentation.
3302      * When speech adaptation is set it supersedes the `speech_contexts` field.
3303      * </pre>
3304      *
3305      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3306      */
getAdaptationOrBuilder()3307     public com.google.cloud.speech.v1.SpeechAdaptationOrBuilder getAdaptationOrBuilder() {
3308       if (adaptationBuilder_ != null) {
3309         return adaptationBuilder_.getMessageOrBuilder();
3310       } else {
3311         return adaptation_ == null
3312             ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance()
3313             : adaptation_;
3314       }
3315     }
3316     /**
3317      *
3318      *
3319      * <pre>
3320      * Speech adaptation configuration improves the accuracy of speech
3321      * recognition. For more information, see the [speech
3322      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
3323      * documentation.
3324      * When speech adaptation is set it supersedes the `speech_contexts` field.
3325      * </pre>
3326      *
3327      * <code>.google.cloud.speech.v1.SpeechAdaptation adaptation = 20;</code>
3328      */
3329     private com.google.protobuf.SingleFieldBuilderV3<
3330             com.google.cloud.speech.v1.SpeechAdaptation,
3331             com.google.cloud.speech.v1.SpeechAdaptation.Builder,
3332             com.google.cloud.speech.v1.SpeechAdaptationOrBuilder>
getAdaptationFieldBuilder()3333         getAdaptationFieldBuilder() {
3334       if (adaptationBuilder_ == null) {
3335         adaptationBuilder_ =
3336             new com.google.protobuf.SingleFieldBuilderV3<
3337                 com.google.cloud.speech.v1.SpeechAdaptation,
3338                 com.google.cloud.speech.v1.SpeechAdaptation.Builder,
3339                 com.google.cloud.speech.v1.SpeechAdaptationOrBuilder>(
3340                 getAdaptation(), getParentForChildren(), isClean());
3341         adaptation_ = null;
3342       }
3343       return adaptationBuilder_;
3344     }
3345 
3346     private java.util.List<com.google.cloud.speech.v1.SpeechContext> speechContexts_ =
3347         java.util.Collections.emptyList();
3348 
ensureSpeechContextsIsMutable()3349     private void ensureSpeechContextsIsMutable() {
3350       if (!((bitField0_ & 0x00000200) != 0)) {
3351         speechContexts_ =
3352             new java.util.ArrayList<com.google.cloud.speech.v1.SpeechContext>(speechContexts_);
3353         bitField0_ |= 0x00000200;
3354       }
3355     }
3356 
3357     private com.google.protobuf.RepeatedFieldBuilderV3<
3358             com.google.cloud.speech.v1.SpeechContext,
3359             com.google.cloud.speech.v1.SpeechContext.Builder,
3360             com.google.cloud.speech.v1.SpeechContextOrBuilder>
3361         speechContextsBuilder_;
3362 
3363     /**
3364      *
3365      *
3366      * <pre>
3367      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3368      * A means to provide context to assist the speech recognition. For more
3369      * information, see
3370      * [speech
3371      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3372      * </pre>
3373      *
3374      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3375      */
getSpeechContextsList()3376     public java.util.List<com.google.cloud.speech.v1.SpeechContext> getSpeechContextsList() {
3377       if (speechContextsBuilder_ == null) {
3378         return java.util.Collections.unmodifiableList(speechContexts_);
3379       } else {
3380         return speechContextsBuilder_.getMessageList();
3381       }
3382     }
3383     /**
3384      *
3385      *
3386      * <pre>
3387      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3388      * A means to provide context to assist the speech recognition. For more
3389      * information, see
3390      * [speech
3391      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3392      * </pre>
3393      *
3394      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3395      */
getSpeechContextsCount()3396     public int getSpeechContextsCount() {
3397       if (speechContextsBuilder_ == null) {
3398         return speechContexts_.size();
3399       } else {
3400         return speechContextsBuilder_.getCount();
3401       }
3402     }
3403     /**
3404      *
3405      *
3406      * <pre>
3407      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3408      * A means to provide context to assist the speech recognition. For more
3409      * information, see
3410      * [speech
3411      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3412      * </pre>
3413      *
3414      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3415      */
getSpeechContexts(int index)3416     public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) {
3417       if (speechContextsBuilder_ == null) {
3418         return speechContexts_.get(index);
3419       } else {
3420         return speechContextsBuilder_.getMessage(index);
3421       }
3422     }
3423     /**
3424      *
3425      *
3426      * <pre>
3427      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3428      * A means to provide context to assist the speech recognition. For more
3429      * information, see
3430      * [speech
3431      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3432      * </pre>
3433      *
3434      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3435      */
setSpeechContexts(int index, com.google.cloud.speech.v1.SpeechContext value)3436     public Builder setSpeechContexts(int index, com.google.cloud.speech.v1.SpeechContext value) {
3437       if (speechContextsBuilder_ == null) {
3438         if (value == null) {
3439           throw new NullPointerException();
3440         }
3441         ensureSpeechContextsIsMutable();
3442         speechContexts_.set(index, value);
3443         onChanged();
3444       } else {
3445         speechContextsBuilder_.setMessage(index, value);
3446       }
3447       return this;
3448     }
3449     /**
3450      *
3451      *
3452      * <pre>
3453      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3454      * A means to provide context to assist the speech recognition. For more
3455      * information, see
3456      * [speech
3457      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3458      * </pre>
3459      *
3460      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3461      */
setSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue)3462     public Builder setSpeechContexts(
3463         int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) {
3464       if (speechContextsBuilder_ == null) {
3465         ensureSpeechContextsIsMutable();
3466         speechContexts_.set(index, builderForValue.build());
3467         onChanged();
3468       } else {
3469         speechContextsBuilder_.setMessage(index, builderForValue.build());
3470       }
3471       return this;
3472     }
3473     /**
3474      *
3475      *
3476      * <pre>
3477      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3478      * A means to provide context to assist the speech recognition. For more
3479      * information, see
3480      * [speech
3481      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3482      * </pre>
3483      *
3484      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3485      */
addSpeechContexts(com.google.cloud.speech.v1.SpeechContext value)3486     public Builder addSpeechContexts(com.google.cloud.speech.v1.SpeechContext value) {
3487       if (speechContextsBuilder_ == null) {
3488         if (value == null) {
3489           throw new NullPointerException();
3490         }
3491         ensureSpeechContextsIsMutable();
3492         speechContexts_.add(value);
3493         onChanged();
3494       } else {
3495         speechContextsBuilder_.addMessage(value);
3496       }
3497       return this;
3498     }
3499     /**
3500      *
3501      *
3502      * <pre>
3503      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3504      * A means to provide context to assist the speech recognition. For more
3505      * information, see
3506      * [speech
3507      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3508      * </pre>
3509      *
3510      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3511      */
addSpeechContexts(int index, com.google.cloud.speech.v1.SpeechContext value)3512     public Builder addSpeechContexts(int index, com.google.cloud.speech.v1.SpeechContext value) {
3513       if (speechContextsBuilder_ == null) {
3514         if (value == null) {
3515           throw new NullPointerException();
3516         }
3517         ensureSpeechContextsIsMutable();
3518         speechContexts_.add(index, value);
3519         onChanged();
3520       } else {
3521         speechContextsBuilder_.addMessage(index, value);
3522       }
3523       return this;
3524     }
3525     /**
3526      *
3527      *
3528      * <pre>
3529      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3530      * A means to provide context to assist the speech recognition. For more
3531      * information, see
3532      * [speech
3533      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3534      * </pre>
3535      *
3536      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3537      */
addSpeechContexts( com.google.cloud.speech.v1.SpeechContext.Builder builderForValue)3538     public Builder addSpeechContexts(
3539         com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) {
3540       if (speechContextsBuilder_ == null) {
3541         ensureSpeechContextsIsMutable();
3542         speechContexts_.add(builderForValue.build());
3543         onChanged();
3544       } else {
3545         speechContextsBuilder_.addMessage(builderForValue.build());
3546       }
3547       return this;
3548     }
3549     /**
3550      *
3551      *
3552      * <pre>
3553      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3554      * A means to provide context to assist the speech recognition. For more
3555      * information, see
3556      * [speech
3557      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3558      * </pre>
3559      *
3560      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3561      */
addSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue)3562     public Builder addSpeechContexts(
3563         int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) {
3564       if (speechContextsBuilder_ == null) {
3565         ensureSpeechContextsIsMutable();
3566         speechContexts_.add(index, builderForValue.build());
3567         onChanged();
3568       } else {
3569         speechContextsBuilder_.addMessage(index, builderForValue.build());
3570       }
3571       return this;
3572     }
3573     /**
3574      *
3575      *
3576      * <pre>
3577      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3578      * A means to provide context to assist the speech recognition. For more
3579      * information, see
3580      * [speech
3581      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3582      * </pre>
3583      *
3584      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3585      */
addAllSpeechContexts( java.lang.Iterable<? extends com.google.cloud.speech.v1.SpeechContext> values)3586     public Builder addAllSpeechContexts(
3587         java.lang.Iterable<? extends com.google.cloud.speech.v1.SpeechContext> values) {
3588       if (speechContextsBuilder_ == null) {
3589         ensureSpeechContextsIsMutable();
3590         com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_);
3591         onChanged();
3592       } else {
3593         speechContextsBuilder_.addAllMessages(values);
3594       }
3595       return this;
3596     }
3597     /**
3598      *
3599      *
3600      * <pre>
3601      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3602      * A means to provide context to assist the speech recognition. For more
3603      * information, see
3604      * [speech
3605      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3606      * </pre>
3607      *
3608      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3609      */
clearSpeechContexts()3610     public Builder clearSpeechContexts() {
3611       if (speechContextsBuilder_ == null) {
3612         speechContexts_ = java.util.Collections.emptyList();
3613         bitField0_ = (bitField0_ & ~0x00000200);
3614         onChanged();
3615       } else {
3616         speechContextsBuilder_.clear();
3617       }
3618       return this;
3619     }
3620     /**
3621      *
3622      *
3623      * <pre>
3624      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3625      * A means to provide context to assist the speech recognition. For more
3626      * information, see
3627      * [speech
3628      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3629      * </pre>
3630      *
3631      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3632      */
removeSpeechContexts(int index)3633     public Builder removeSpeechContexts(int index) {
3634       if (speechContextsBuilder_ == null) {
3635         ensureSpeechContextsIsMutable();
3636         speechContexts_.remove(index);
3637         onChanged();
3638       } else {
3639         speechContextsBuilder_.remove(index);
3640       }
3641       return this;
3642     }
3643     /**
3644      *
3645      *
3646      * <pre>
3647      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3648      * A means to provide context to assist the speech recognition. For more
3649      * information, see
3650      * [speech
3651      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3652      * </pre>
3653      *
3654      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3655      */
getSpeechContextsBuilder(int index)3656     public com.google.cloud.speech.v1.SpeechContext.Builder getSpeechContextsBuilder(int index) {
3657       return getSpeechContextsFieldBuilder().getBuilder(index);
3658     }
3659     /**
3660      *
3661      *
3662      * <pre>
3663      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3664      * A means to provide context to assist the speech recognition. For more
3665      * information, see
3666      * [speech
3667      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3668      * </pre>
3669      *
3670      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3671      */
getSpeechContextsOrBuilder(int index)3672     public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuilder(int index) {
3673       if (speechContextsBuilder_ == null) {
3674         return speechContexts_.get(index);
3675       } else {
3676         return speechContextsBuilder_.getMessageOrBuilder(index);
3677       }
3678     }
3679     /**
3680      *
3681      *
3682      * <pre>
3683      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3684      * A means to provide context to assist the speech recognition. For more
3685      * information, see
3686      * [speech
3687      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3688      * </pre>
3689      *
3690      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3691      */
3692     public java.util.List<? extends com.google.cloud.speech.v1.SpeechContextOrBuilder>
getSpeechContextsOrBuilderList()3693         getSpeechContextsOrBuilderList() {
3694       if (speechContextsBuilder_ != null) {
3695         return speechContextsBuilder_.getMessageOrBuilderList();
3696       } else {
3697         return java.util.Collections.unmodifiableList(speechContexts_);
3698       }
3699     }
3700     /**
3701      *
3702      *
3703      * <pre>
3704      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3705      * A means to provide context to assist the speech recognition. For more
3706      * information, see
3707      * [speech
3708      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3709      * </pre>
3710      *
3711      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3712      */
addSpeechContextsBuilder()3713     public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder() {
3714       return getSpeechContextsFieldBuilder()
3715           .addBuilder(com.google.cloud.speech.v1.SpeechContext.getDefaultInstance());
3716     }
3717     /**
3718      *
3719      *
3720      * <pre>
3721      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3722      * A means to provide context to assist the speech recognition. For more
3723      * information, see
3724      * [speech
3725      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3726      * </pre>
3727      *
3728      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3729      */
addSpeechContextsBuilder(int index)3730     public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder(int index) {
3731       return getSpeechContextsFieldBuilder()
3732           .addBuilder(index, com.google.cloud.speech.v1.SpeechContext.getDefaultInstance());
3733     }
3734     /**
3735      *
3736      *
3737      * <pre>
3738      * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
3739      * A means to provide context to assist the speech recognition. For more
3740      * information, see
3741      * [speech
3742      * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
3743      * </pre>
3744      *
3745      * <code>repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6;</code>
3746      */
3747     public java.util.List<com.google.cloud.speech.v1.SpeechContext.Builder>
getSpeechContextsBuilderList()3748         getSpeechContextsBuilderList() {
3749       return getSpeechContextsFieldBuilder().getBuilderList();
3750     }
3751 
3752     private com.google.protobuf.RepeatedFieldBuilderV3<
3753             com.google.cloud.speech.v1.SpeechContext,
3754             com.google.cloud.speech.v1.SpeechContext.Builder,
3755             com.google.cloud.speech.v1.SpeechContextOrBuilder>
getSpeechContextsFieldBuilder()3756         getSpeechContextsFieldBuilder() {
3757       if (speechContextsBuilder_ == null) {
3758         speechContextsBuilder_ =
3759             new com.google.protobuf.RepeatedFieldBuilderV3<
3760                 com.google.cloud.speech.v1.SpeechContext,
3761                 com.google.cloud.speech.v1.SpeechContext.Builder,
3762                 com.google.cloud.speech.v1.SpeechContextOrBuilder>(
3763                 speechContexts_,
3764                 ((bitField0_ & 0x00000200) != 0),
3765                 getParentForChildren(),
3766                 isClean());
3767         speechContexts_ = null;
3768       }
3769       return speechContextsBuilder_;
3770     }
3771 
3772     private boolean enableWordTimeOffsets_;
3773     /**
3774      *
3775      *
3776      * <pre>
3777      * If `true`, the top result includes a list of words and
3778      * the start and end time offsets (timestamps) for those words. If
3779      * `false`, no word-level time offset information is returned. The default is
3780      * `false`.
3781      * </pre>
3782      *
3783      * <code>bool enable_word_time_offsets = 8;</code>
3784      *
3785      * @return The enableWordTimeOffsets.
3786      */
3787     @java.lang.Override
getEnableWordTimeOffsets()3788     public boolean getEnableWordTimeOffsets() {
3789       return enableWordTimeOffsets_;
3790     }
3791     /**
3792      *
3793      *
3794      * <pre>
3795      * If `true`, the top result includes a list of words and
3796      * the start and end time offsets (timestamps) for those words. If
3797      * `false`, no word-level time offset information is returned. The default is
3798      * `false`.
3799      * </pre>
3800      *
3801      * <code>bool enable_word_time_offsets = 8;</code>
3802      *
3803      * @param value The enableWordTimeOffsets to set.
3804      * @return This builder for chaining.
3805      */
setEnableWordTimeOffsets(boolean value)3806     public Builder setEnableWordTimeOffsets(boolean value) {
3807 
3808       enableWordTimeOffsets_ = value;
3809       bitField0_ |= 0x00000400;
3810       onChanged();
3811       return this;
3812     }
3813     /**
3814      *
3815      *
3816      * <pre>
3817      * If `true`, the top result includes a list of words and
3818      * the start and end time offsets (timestamps) for those words. If
3819      * `false`, no word-level time offset information is returned. The default is
3820      * `false`.
3821      * </pre>
3822      *
3823      * <code>bool enable_word_time_offsets = 8;</code>
3824      *
3825      * @return This builder for chaining.
3826      */
clearEnableWordTimeOffsets()3827     public Builder clearEnableWordTimeOffsets() {
3828       bitField0_ = (bitField0_ & ~0x00000400);
3829       enableWordTimeOffsets_ = false;
3830       onChanged();
3831       return this;
3832     }
3833 
3834     private boolean enableWordConfidence_;
3835     /**
3836      *
3837      *
3838      * <pre>
3839      * If `true`, the top result includes a list of words and the
3840      * confidence for those words. If `false`, no word-level confidence
3841      * information is returned. The default is `false`.
3842      * </pre>
3843      *
3844      * <code>bool enable_word_confidence = 15;</code>
3845      *
3846      * @return The enableWordConfidence.
3847      */
3848     @java.lang.Override
getEnableWordConfidence()3849     public boolean getEnableWordConfidence() {
3850       return enableWordConfidence_;
3851     }
3852     /**
3853      *
3854      *
3855      * <pre>
3856      * If `true`, the top result includes a list of words and the
3857      * confidence for those words. If `false`, no word-level confidence
3858      * information is returned. The default is `false`.
3859      * </pre>
3860      *
3861      * <code>bool enable_word_confidence = 15;</code>
3862      *
3863      * @param value The enableWordConfidence to set.
3864      * @return This builder for chaining.
3865      */
setEnableWordConfidence(boolean value)3866     public Builder setEnableWordConfidence(boolean value) {
3867 
3868       enableWordConfidence_ = value;
3869       bitField0_ |= 0x00000800;
3870       onChanged();
3871       return this;
3872     }
3873     /**
3874      *
3875      *
3876      * <pre>
3877      * If `true`, the top result includes a list of words and the
3878      * confidence for those words. If `false`, no word-level confidence
3879      * information is returned. The default is `false`.
3880      * </pre>
3881      *
3882      * <code>bool enable_word_confidence = 15;</code>
3883      *
3884      * @return This builder for chaining.
3885      */
clearEnableWordConfidence()3886     public Builder clearEnableWordConfidence() {
3887       bitField0_ = (bitField0_ & ~0x00000800);
3888       enableWordConfidence_ = false;
3889       onChanged();
3890       return this;
3891     }
3892 
3893     private boolean enableAutomaticPunctuation_;
3894     /**
3895      *
3896      *
3897      * <pre>
3898      * If 'true', adds punctuation to recognition result hypotheses.
3899      * This feature is only available in select languages. Setting this for
3900      * requests in other languages has no effect at all.
3901      * The default 'false' value does not add punctuation to result hypotheses.
3902      * </pre>
3903      *
3904      * <code>bool enable_automatic_punctuation = 11;</code>
3905      *
3906      * @return The enableAutomaticPunctuation.
3907      */
3908     @java.lang.Override
getEnableAutomaticPunctuation()3909     public boolean getEnableAutomaticPunctuation() {
3910       return enableAutomaticPunctuation_;
3911     }
3912     /**
3913      *
3914      *
3915      * <pre>
3916      * If 'true', adds punctuation to recognition result hypotheses.
3917      * This feature is only available in select languages. Setting this for
3918      * requests in other languages has no effect at all.
3919      * The default 'false' value does not add punctuation to result hypotheses.
3920      * </pre>
3921      *
3922      * <code>bool enable_automatic_punctuation = 11;</code>
3923      *
3924      * @param value The enableAutomaticPunctuation to set.
3925      * @return This builder for chaining.
3926      */
setEnableAutomaticPunctuation(boolean value)3927     public Builder setEnableAutomaticPunctuation(boolean value) {
3928 
3929       enableAutomaticPunctuation_ = value;
3930       bitField0_ |= 0x00001000;
3931       onChanged();
3932       return this;
3933     }
3934     /**
3935      *
3936      *
3937      * <pre>
3938      * If 'true', adds punctuation to recognition result hypotheses.
3939      * This feature is only available in select languages. Setting this for
3940      * requests in other languages has no effect at all.
3941      * The default 'false' value does not add punctuation to result hypotheses.
3942      * </pre>
3943      *
3944      * <code>bool enable_automatic_punctuation = 11;</code>
3945      *
3946      * @return This builder for chaining.
3947      */
clearEnableAutomaticPunctuation()3948     public Builder clearEnableAutomaticPunctuation() {
3949       bitField0_ = (bitField0_ & ~0x00001000);
3950       enableAutomaticPunctuation_ = false;
3951       onChanged();
3952       return this;
3953     }
3954 
3955     private com.google.protobuf.BoolValue enableSpokenPunctuation_;
3956     private com.google.protobuf.SingleFieldBuilderV3<
3957             com.google.protobuf.BoolValue,
3958             com.google.protobuf.BoolValue.Builder,
3959             com.google.protobuf.BoolValueOrBuilder>
3960         enableSpokenPunctuationBuilder_;
3961     /**
3962      *
3963      *
3964      * <pre>
3965      * The spoken punctuation behavior for the call
3966      * If not set, uses default behavior based on model of choice
3967      * e.g. command_and_search will enable spoken punctuation by default
3968      * If 'true', replaces spoken punctuation with the corresponding symbols in
3969      * the request. For example, "how are you question mark" becomes "how are
3970      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
3971      * for support. If 'false', spoken punctuation is not replaced.
3972      * </pre>
3973      *
3974      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
3975      *
3976      * @return Whether the enableSpokenPunctuation field is set.
3977      */
hasEnableSpokenPunctuation()3978     public boolean hasEnableSpokenPunctuation() {
3979       return ((bitField0_ & 0x00002000) != 0);
3980     }
3981     /**
3982      *
3983      *
3984      * <pre>
3985      * The spoken punctuation behavior for the call
3986      * If not set, uses default behavior based on model of choice
3987      * e.g. command_and_search will enable spoken punctuation by default
3988      * If 'true', replaces spoken punctuation with the corresponding symbols in
3989      * the request. For example, "how are you question mark" becomes "how are
3990      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
3991      * for support. If 'false', spoken punctuation is not replaced.
3992      * </pre>
3993      *
3994      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
3995      *
3996      * @return The enableSpokenPunctuation.
3997      */
getEnableSpokenPunctuation()3998     public com.google.protobuf.BoolValue getEnableSpokenPunctuation() {
3999       if (enableSpokenPunctuationBuilder_ == null) {
4000         return enableSpokenPunctuation_ == null
4001             ? com.google.protobuf.BoolValue.getDefaultInstance()
4002             : enableSpokenPunctuation_;
4003       } else {
4004         return enableSpokenPunctuationBuilder_.getMessage();
4005       }
4006     }
4007     /**
4008      *
4009      *
4010      * <pre>
4011      * The spoken punctuation behavior for the call
4012      * If not set, uses default behavior based on model of choice
4013      * e.g. command_and_search will enable spoken punctuation by default
4014      * If 'true', replaces spoken punctuation with the corresponding symbols in
4015      * the request. For example, "how are you question mark" becomes "how are
4016      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
4017      * for support. If 'false', spoken punctuation is not replaced.
4018      * </pre>
4019      *
4020      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
4021      */
setEnableSpokenPunctuation(com.google.protobuf.BoolValue value)4022     public Builder setEnableSpokenPunctuation(com.google.protobuf.BoolValue value) {
4023       if (enableSpokenPunctuationBuilder_ == null) {
4024         if (value == null) {
4025           throw new NullPointerException();
4026         }
4027         enableSpokenPunctuation_ = value;
4028       } else {
4029         enableSpokenPunctuationBuilder_.setMessage(value);
4030       }
4031       bitField0_ |= 0x00002000;
4032       onChanged();
4033       return this;
4034     }
4035     /**
4036      *
4037      *
4038      * <pre>
4039      * The spoken punctuation behavior for the call
4040      * If not set, uses default behavior based on model of choice
4041      * e.g. command_and_search will enable spoken punctuation by default
4042      * If 'true', replaces spoken punctuation with the corresponding symbols in
4043      * the request. For example, "how are you question mark" becomes "how are
4044      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
4045      * for support. If 'false', spoken punctuation is not replaced.
4046      * </pre>
4047      *
4048      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
4049      */
setEnableSpokenPunctuation( com.google.protobuf.BoolValue.Builder builderForValue)4050     public Builder setEnableSpokenPunctuation(
4051         com.google.protobuf.BoolValue.Builder builderForValue) {
4052       if (enableSpokenPunctuationBuilder_ == null) {
4053         enableSpokenPunctuation_ = builderForValue.build();
4054       } else {
4055         enableSpokenPunctuationBuilder_.setMessage(builderForValue.build());
4056       }
4057       bitField0_ |= 0x00002000;
4058       onChanged();
4059       return this;
4060     }
4061     /**
4062      *
4063      *
4064      * <pre>
4065      * The spoken punctuation behavior for the call
4066      * If not set, uses default behavior based on model of choice
4067      * e.g. command_and_search will enable spoken punctuation by default
4068      * If 'true', replaces spoken punctuation with the corresponding symbols in
4069      * the request. For example, "how are you question mark" becomes "how are
4070      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
4071      * for support. If 'false', spoken punctuation is not replaced.
4072      * </pre>
4073      *
4074      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
4075      */
mergeEnableSpokenPunctuation(com.google.protobuf.BoolValue value)4076     public Builder mergeEnableSpokenPunctuation(com.google.protobuf.BoolValue value) {
4077       if (enableSpokenPunctuationBuilder_ == null) {
4078         if (((bitField0_ & 0x00002000) != 0)
4079             && enableSpokenPunctuation_ != null
4080             && enableSpokenPunctuation_ != com.google.protobuf.BoolValue.getDefaultInstance()) {
4081           getEnableSpokenPunctuationBuilder().mergeFrom(value);
4082         } else {
4083           enableSpokenPunctuation_ = value;
4084         }
4085       } else {
4086         enableSpokenPunctuationBuilder_.mergeFrom(value);
4087       }
4088       bitField0_ |= 0x00002000;
4089       onChanged();
4090       return this;
4091     }
4092     /**
4093      *
4094      *
4095      * <pre>
4096      * The spoken punctuation behavior for the call
4097      * If not set, uses default behavior based on model of choice
4098      * e.g. command_and_search will enable spoken punctuation by default
4099      * If 'true', replaces spoken punctuation with the corresponding symbols in
4100      * the request. For example, "how are you question mark" becomes "how are
4101      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
4102      * for support. If 'false', spoken punctuation is not replaced.
4103      * </pre>
4104      *
4105      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
4106      */
clearEnableSpokenPunctuation()4107     public Builder clearEnableSpokenPunctuation() {
4108       bitField0_ = (bitField0_ & ~0x00002000);
4109       enableSpokenPunctuation_ = null;
4110       if (enableSpokenPunctuationBuilder_ != null) {
4111         enableSpokenPunctuationBuilder_.dispose();
4112         enableSpokenPunctuationBuilder_ = null;
4113       }
4114       onChanged();
4115       return this;
4116     }
4117     /**
4118      *
4119      *
4120      * <pre>
4121      * The spoken punctuation behavior for the call
4122      * If not set, uses default behavior based on model of choice
4123      * e.g. command_and_search will enable spoken punctuation by default
4124      * If 'true', replaces spoken punctuation with the corresponding symbols in
4125      * the request. For example, "how are you question mark" becomes "how are
4126      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
4127      * for support. If 'false', spoken punctuation is not replaced.
4128      * </pre>
4129      *
4130      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
4131      */
getEnableSpokenPunctuationBuilder()4132     public com.google.protobuf.BoolValue.Builder getEnableSpokenPunctuationBuilder() {
4133       bitField0_ |= 0x00002000;
4134       onChanged();
4135       return getEnableSpokenPunctuationFieldBuilder().getBuilder();
4136     }
4137     /**
4138      *
4139      *
4140      * <pre>
4141      * The spoken punctuation behavior for the call
4142      * If not set, uses default behavior based on model of choice
4143      * e.g. command_and_search will enable spoken punctuation by default
4144      * If 'true', replaces spoken punctuation with the corresponding symbols in
4145      * the request. For example, "how are you question mark" becomes "how are
4146      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
4147      * for support. If 'false', spoken punctuation is not replaced.
4148      * </pre>
4149      *
4150      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
4151      */
getEnableSpokenPunctuationOrBuilder()4152     public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() {
4153       if (enableSpokenPunctuationBuilder_ != null) {
4154         return enableSpokenPunctuationBuilder_.getMessageOrBuilder();
4155       } else {
4156         return enableSpokenPunctuation_ == null
4157             ? com.google.protobuf.BoolValue.getDefaultInstance()
4158             : enableSpokenPunctuation_;
4159       }
4160     }
4161     /**
4162      *
4163      *
4164      * <pre>
4165      * The spoken punctuation behavior for the call
4166      * If not set, uses default behavior based on model of choice
4167      * e.g. command_and_search will enable spoken punctuation by default
4168      * If 'true', replaces spoken punctuation with the corresponding symbols in
4169      * the request. For example, "how are you question mark" becomes "how are
4170      * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
4171      * for support. If 'false', spoken punctuation is not replaced.
4172      * </pre>
4173      *
4174      * <code>.google.protobuf.BoolValue enable_spoken_punctuation = 22;</code>
4175      */
4176     private com.google.protobuf.SingleFieldBuilderV3<
4177             com.google.protobuf.BoolValue,
4178             com.google.protobuf.BoolValue.Builder,
4179             com.google.protobuf.BoolValueOrBuilder>
getEnableSpokenPunctuationFieldBuilder()4180         getEnableSpokenPunctuationFieldBuilder() {
4181       if (enableSpokenPunctuationBuilder_ == null) {
4182         enableSpokenPunctuationBuilder_ =
4183             new com.google.protobuf.SingleFieldBuilderV3<
4184                 com.google.protobuf.BoolValue,
4185                 com.google.protobuf.BoolValue.Builder,
4186                 com.google.protobuf.BoolValueOrBuilder>(
4187                 getEnableSpokenPunctuation(), getParentForChildren(), isClean());
4188         enableSpokenPunctuation_ = null;
4189       }
4190       return enableSpokenPunctuationBuilder_;
4191     }
4192 
4193     private com.google.protobuf.BoolValue enableSpokenEmojis_;
4194     private com.google.protobuf.SingleFieldBuilderV3<
4195             com.google.protobuf.BoolValue,
4196             com.google.protobuf.BoolValue.Builder,
4197             com.google.protobuf.BoolValueOrBuilder>
4198         enableSpokenEmojisBuilder_;
4199     /**
4200      *
4201      *
4202      * <pre>
4203      * The spoken emoji behavior for the call
4204      * If not set, uses default behavior based on model of choice
4205      * If 'true', adds spoken emoji formatting for the request. This will replace
4206      * spoken emojis with the corresponding Unicode symbols in the final
4207      * transcript. If 'false', spoken emojis are not replaced.
4208      * </pre>
4209      *
4210      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4211      *
4212      * @return Whether the enableSpokenEmojis field is set.
4213      */
hasEnableSpokenEmojis()4214     public boolean hasEnableSpokenEmojis() {
4215       return ((bitField0_ & 0x00004000) != 0);
4216     }
4217     /**
4218      *
4219      *
4220      * <pre>
4221      * The spoken emoji behavior for the call
4222      * If not set, uses default behavior based on model of choice
4223      * If 'true', adds spoken emoji formatting for the request. This will replace
4224      * spoken emojis with the corresponding Unicode symbols in the final
4225      * transcript. If 'false', spoken emojis are not replaced.
4226      * </pre>
4227      *
4228      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4229      *
4230      * @return The enableSpokenEmojis.
4231      */
getEnableSpokenEmojis()4232     public com.google.protobuf.BoolValue getEnableSpokenEmojis() {
4233       if (enableSpokenEmojisBuilder_ == null) {
4234         return enableSpokenEmojis_ == null
4235             ? com.google.protobuf.BoolValue.getDefaultInstance()
4236             : enableSpokenEmojis_;
4237       } else {
4238         return enableSpokenEmojisBuilder_.getMessage();
4239       }
4240     }
4241     /**
4242      *
4243      *
4244      * <pre>
4245      * The spoken emoji behavior for the call
4246      * If not set, uses default behavior based on model of choice
4247      * If 'true', adds spoken emoji formatting for the request. This will replace
4248      * spoken emojis with the corresponding Unicode symbols in the final
4249      * transcript. If 'false', spoken emojis are not replaced.
4250      * </pre>
4251      *
4252      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4253      */
setEnableSpokenEmojis(com.google.protobuf.BoolValue value)4254     public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue value) {
4255       if (enableSpokenEmojisBuilder_ == null) {
4256         if (value == null) {
4257           throw new NullPointerException();
4258         }
4259         enableSpokenEmojis_ = value;
4260       } else {
4261         enableSpokenEmojisBuilder_.setMessage(value);
4262       }
4263       bitField0_ |= 0x00004000;
4264       onChanged();
4265       return this;
4266     }
4267     /**
4268      *
4269      *
4270      * <pre>
4271      * The spoken emoji behavior for the call
4272      * If not set, uses default behavior based on model of choice
4273      * If 'true', adds spoken emoji formatting for the request. This will replace
4274      * spoken emojis with the corresponding Unicode symbols in the final
4275      * transcript. If 'false', spoken emojis are not replaced.
4276      * </pre>
4277      *
4278      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4279      */
setEnableSpokenEmojis(com.google.protobuf.BoolValue.Builder builderForValue)4280     public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue.Builder builderForValue) {
4281       if (enableSpokenEmojisBuilder_ == null) {
4282         enableSpokenEmojis_ = builderForValue.build();
4283       } else {
4284         enableSpokenEmojisBuilder_.setMessage(builderForValue.build());
4285       }
4286       bitField0_ |= 0x00004000;
4287       onChanged();
4288       return this;
4289     }
4290     /**
4291      *
4292      *
4293      * <pre>
4294      * The spoken emoji behavior for the call
4295      * If not set, uses default behavior based on model of choice
4296      * If 'true', adds spoken emoji formatting for the request. This will replace
4297      * spoken emojis with the corresponding Unicode symbols in the final
4298      * transcript. If 'false', spoken emojis are not replaced.
4299      * </pre>
4300      *
4301      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4302      */
mergeEnableSpokenEmojis(com.google.protobuf.BoolValue value)4303     public Builder mergeEnableSpokenEmojis(com.google.protobuf.BoolValue value) {
4304       if (enableSpokenEmojisBuilder_ == null) {
4305         if (((bitField0_ & 0x00004000) != 0)
4306             && enableSpokenEmojis_ != null
4307             && enableSpokenEmojis_ != com.google.protobuf.BoolValue.getDefaultInstance()) {
4308           getEnableSpokenEmojisBuilder().mergeFrom(value);
4309         } else {
4310           enableSpokenEmojis_ = value;
4311         }
4312       } else {
4313         enableSpokenEmojisBuilder_.mergeFrom(value);
4314       }
4315       bitField0_ |= 0x00004000;
4316       onChanged();
4317       return this;
4318     }
4319     /**
4320      *
4321      *
4322      * <pre>
4323      * The spoken emoji behavior for the call
4324      * If not set, uses default behavior based on model of choice
4325      * If 'true', adds spoken emoji formatting for the request. This will replace
4326      * spoken emojis with the corresponding Unicode symbols in the final
4327      * transcript. If 'false', spoken emojis are not replaced.
4328      * </pre>
4329      *
4330      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4331      */
clearEnableSpokenEmojis()4332     public Builder clearEnableSpokenEmojis() {
4333       bitField0_ = (bitField0_ & ~0x00004000);
4334       enableSpokenEmojis_ = null;
4335       if (enableSpokenEmojisBuilder_ != null) {
4336         enableSpokenEmojisBuilder_.dispose();
4337         enableSpokenEmojisBuilder_ = null;
4338       }
4339       onChanged();
4340       return this;
4341     }
4342     /**
4343      *
4344      *
4345      * <pre>
4346      * The spoken emoji behavior for the call
4347      * If not set, uses default behavior based on model of choice
4348      * If 'true', adds spoken emoji formatting for the request. This will replace
4349      * spoken emojis with the corresponding Unicode symbols in the final
4350      * transcript. If 'false', spoken emojis are not replaced.
4351      * </pre>
4352      *
4353      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4354      */
getEnableSpokenEmojisBuilder()4355     public com.google.protobuf.BoolValue.Builder getEnableSpokenEmojisBuilder() {
4356       bitField0_ |= 0x00004000;
4357       onChanged();
4358       return getEnableSpokenEmojisFieldBuilder().getBuilder();
4359     }
4360     /**
4361      *
4362      *
4363      * <pre>
4364      * The spoken emoji behavior for the call
4365      * If not set, uses default behavior based on model of choice
4366      * If 'true', adds spoken emoji formatting for the request. This will replace
4367      * spoken emojis with the corresponding Unicode symbols in the final
4368      * transcript. If 'false', spoken emojis are not replaced.
4369      * </pre>
4370      *
4371      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4372      */
getEnableSpokenEmojisOrBuilder()4373     public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() {
4374       if (enableSpokenEmojisBuilder_ != null) {
4375         return enableSpokenEmojisBuilder_.getMessageOrBuilder();
4376       } else {
4377         return enableSpokenEmojis_ == null
4378             ? com.google.protobuf.BoolValue.getDefaultInstance()
4379             : enableSpokenEmojis_;
4380       }
4381     }
4382     /**
4383      *
4384      *
4385      * <pre>
4386      * The spoken emoji behavior for the call
4387      * If not set, uses default behavior based on model of choice
4388      * If 'true', adds spoken emoji formatting for the request. This will replace
4389      * spoken emojis with the corresponding Unicode symbols in the final
4390      * transcript. If 'false', spoken emojis are not replaced.
4391      * </pre>
4392      *
4393      * <code>.google.protobuf.BoolValue enable_spoken_emojis = 23;</code>
4394      */
4395     private com.google.protobuf.SingleFieldBuilderV3<
4396             com.google.protobuf.BoolValue,
4397             com.google.protobuf.BoolValue.Builder,
4398             com.google.protobuf.BoolValueOrBuilder>
getEnableSpokenEmojisFieldBuilder()4399         getEnableSpokenEmojisFieldBuilder() {
4400       if (enableSpokenEmojisBuilder_ == null) {
4401         enableSpokenEmojisBuilder_ =
4402             new com.google.protobuf.SingleFieldBuilderV3<
4403                 com.google.protobuf.BoolValue,
4404                 com.google.protobuf.BoolValue.Builder,
4405                 com.google.protobuf.BoolValueOrBuilder>(
4406                 getEnableSpokenEmojis(), getParentForChildren(), isClean());
4407         enableSpokenEmojis_ = null;
4408       }
4409       return enableSpokenEmojisBuilder_;
4410     }
4411 
4412     private com.google.cloud.speech.v1.SpeakerDiarizationConfig diarizationConfig_;
4413     private com.google.protobuf.SingleFieldBuilderV3<
4414             com.google.cloud.speech.v1.SpeakerDiarizationConfig,
4415             com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder,
4416             com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder>
4417         diarizationConfigBuilder_;
4418     /**
4419      *
4420      *
4421      * <pre>
4422      * Config to enable speaker diarization and set additional
4423      * parameters to make diarization better suited for your application.
4424      * Note: When this is enabled, we send all the words from the beginning of the
4425      * audio for the top alternative in every consecutive STREAMING responses.
4426      * This is done in order to improve our speaker tags as our models learn to
4427      * identify the speakers in the conversation over time.
4428      * For non-streaming requests, the diarization results will be provided only
4429      * in the top alternative of the FINAL SpeechRecognitionResult.
4430      * </pre>
4431      *
4432      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4433      *
4434      * @return Whether the diarizationConfig field is set.
4435      */
hasDiarizationConfig()4436     public boolean hasDiarizationConfig() {
4437       return ((bitField0_ & 0x00008000) != 0);
4438     }
4439     /**
4440      *
4441      *
4442      * <pre>
4443      * Config to enable speaker diarization and set additional
4444      * parameters to make diarization better suited for your application.
4445      * Note: When this is enabled, we send all the words from the beginning of the
4446      * audio for the top alternative in every consecutive STREAMING responses.
4447      * This is done in order to improve our speaker tags as our models learn to
4448      * identify the speakers in the conversation over time.
4449      * For non-streaming requests, the diarization results will be provided only
4450      * in the top alternative of the FINAL SpeechRecognitionResult.
4451      * </pre>
4452      *
4453      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4454      *
4455      * @return The diarizationConfig.
4456      */
getDiarizationConfig()4457     public com.google.cloud.speech.v1.SpeakerDiarizationConfig getDiarizationConfig() {
4458       if (diarizationConfigBuilder_ == null) {
4459         return diarizationConfig_ == null
4460             ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance()
4461             : diarizationConfig_;
4462       } else {
4463         return diarizationConfigBuilder_.getMessage();
4464       }
4465     }
4466     /**
4467      *
4468      *
4469      * <pre>
4470      * Config to enable speaker diarization and set additional
4471      * parameters to make diarization better suited for your application.
4472      * Note: When this is enabled, we send all the words from the beginning of the
4473      * audio for the top alternative in every consecutive STREAMING responses.
4474      * This is done in order to improve our speaker tags as our models learn to
4475      * identify the speakers in the conversation over time.
4476      * For non-streaming requests, the diarization results will be provided only
4477      * in the top alternative of the FINAL SpeechRecognitionResult.
4478      * </pre>
4479      *
4480      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4481      */
setDiarizationConfig(com.google.cloud.speech.v1.SpeakerDiarizationConfig value)4482     public Builder setDiarizationConfig(com.google.cloud.speech.v1.SpeakerDiarizationConfig value) {
4483       if (diarizationConfigBuilder_ == null) {
4484         if (value == null) {
4485           throw new NullPointerException();
4486         }
4487         diarizationConfig_ = value;
4488       } else {
4489         diarizationConfigBuilder_.setMessage(value);
4490       }
4491       bitField0_ |= 0x00008000;
4492       onChanged();
4493       return this;
4494     }
4495     /**
4496      *
4497      *
4498      * <pre>
4499      * Config to enable speaker diarization and set additional
4500      * parameters to make diarization better suited for your application.
4501      * Note: When this is enabled, we send all the words from the beginning of the
4502      * audio for the top alternative in every consecutive STREAMING responses.
4503      * This is done in order to improve our speaker tags as our models learn to
4504      * identify the speakers in the conversation over time.
4505      * For non-streaming requests, the diarization results will be provided only
4506      * in the top alternative of the FINAL SpeechRecognitionResult.
4507      * </pre>
4508      *
4509      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4510      */
setDiarizationConfig( com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder builderForValue)4511     public Builder setDiarizationConfig(
4512         com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder builderForValue) {
4513       if (diarizationConfigBuilder_ == null) {
4514         diarizationConfig_ = builderForValue.build();
4515       } else {
4516         diarizationConfigBuilder_.setMessage(builderForValue.build());
4517       }
4518       bitField0_ |= 0x00008000;
4519       onChanged();
4520       return this;
4521     }
4522     /**
4523      *
4524      *
4525      * <pre>
4526      * Config to enable speaker diarization and set additional
4527      * parameters to make diarization better suited for your application.
4528      * Note: When this is enabled, we send all the words from the beginning of the
4529      * audio for the top alternative in every consecutive STREAMING responses.
4530      * This is done in order to improve our speaker tags as our models learn to
4531      * identify the speakers in the conversation over time.
4532      * For non-streaming requests, the diarization results will be provided only
4533      * in the top alternative of the FINAL SpeechRecognitionResult.
4534      * </pre>
4535      *
4536      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4537      */
mergeDiarizationConfig( com.google.cloud.speech.v1.SpeakerDiarizationConfig value)4538     public Builder mergeDiarizationConfig(
4539         com.google.cloud.speech.v1.SpeakerDiarizationConfig value) {
4540       if (diarizationConfigBuilder_ == null) {
4541         if (((bitField0_ & 0x00008000) != 0)
4542             && diarizationConfig_ != null
4543             && diarizationConfig_
4544                 != com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance()) {
4545           getDiarizationConfigBuilder().mergeFrom(value);
4546         } else {
4547           diarizationConfig_ = value;
4548         }
4549       } else {
4550         diarizationConfigBuilder_.mergeFrom(value);
4551       }
4552       bitField0_ |= 0x00008000;
4553       onChanged();
4554       return this;
4555     }
4556     /**
4557      *
4558      *
4559      * <pre>
4560      * Config to enable speaker diarization and set additional
4561      * parameters to make diarization better suited for your application.
4562      * Note: When this is enabled, we send all the words from the beginning of the
4563      * audio for the top alternative in every consecutive STREAMING responses.
4564      * This is done in order to improve our speaker tags as our models learn to
4565      * identify the speakers in the conversation over time.
4566      * For non-streaming requests, the diarization results will be provided only
4567      * in the top alternative of the FINAL SpeechRecognitionResult.
4568      * </pre>
4569      *
4570      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4571      */
clearDiarizationConfig()4572     public Builder clearDiarizationConfig() {
4573       bitField0_ = (bitField0_ & ~0x00008000);
4574       diarizationConfig_ = null;
4575       if (diarizationConfigBuilder_ != null) {
4576         diarizationConfigBuilder_.dispose();
4577         diarizationConfigBuilder_ = null;
4578       }
4579       onChanged();
4580       return this;
4581     }
4582     /**
4583      *
4584      *
4585      * <pre>
4586      * Config to enable speaker diarization and set additional
4587      * parameters to make diarization better suited for your application.
4588      * Note: When this is enabled, we send all the words from the beginning of the
4589      * audio for the top alternative in every consecutive STREAMING responses.
4590      * This is done in order to improve our speaker tags as our models learn to
4591      * identify the speakers in the conversation over time.
4592      * For non-streaming requests, the diarization results will be provided only
4593      * in the top alternative of the FINAL SpeechRecognitionResult.
4594      * </pre>
4595      *
4596      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4597      */
4598     public com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder
getDiarizationConfigBuilder()4599         getDiarizationConfigBuilder() {
4600       bitField0_ |= 0x00008000;
4601       onChanged();
4602       return getDiarizationConfigFieldBuilder().getBuilder();
4603     }
4604     /**
4605      *
4606      *
4607      * <pre>
4608      * Config to enable speaker diarization and set additional
4609      * parameters to make diarization better suited for your application.
4610      * Note: When this is enabled, we send all the words from the beginning of the
4611      * audio for the top alternative in every consecutive STREAMING responses.
4612      * This is done in order to improve our speaker tags as our models learn to
4613      * identify the speakers in the conversation over time.
4614      * For non-streaming requests, the diarization results will be provided only
4615      * in the top alternative of the FINAL SpeechRecognitionResult.
4616      * </pre>
4617      *
4618      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4619      */
4620     public com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder
getDiarizationConfigOrBuilder()4621         getDiarizationConfigOrBuilder() {
4622       if (diarizationConfigBuilder_ != null) {
4623         return diarizationConfigBuilder_.getMessageOrBuilder();
4624       } else {
4625         return diarizationConfig_ == null
4626             ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance()
4627             : diarizationConfig_;
4628       }
4629     }
4630     /**
4631      *
4632      *
4633      * <pre>
4634      * Config to enable speaker diarization and set additional
4635      * parameters to make diarization better suited for your application.
4636      * Note: When this is enabled, we send all the words from the beginning of the
4637      * audio for the top alternative in every consecutive STREAMING responses.
4638      * This is done in order to improve our speaker tags as our models learn to
4639      * identify the speakers in the conversation over time.
4640      * For non-streaming requests, the diarization results will be provided only
4641      * in the top alternative of the FINAL SpeechRecognitionResult.
4642      * </pre>
4643      *
4644      * <code>.google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19;</code>
4645      */
4646     private com.google.protobuf.SingleFieldBuilderV3<
4647             com.google.cloud.speech.v1.SpeakerDiarizationConfig,
4648             com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder,
4649             com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder>
getDiarizationConfigFieldBuilder()4650         getDiarizationConfigFieldBuilder() {
4651       if (diarizationConfigBuilder_ == null) {
4652         diarizationConfigBuilder_ =
4653             new com.google.protobuf.SingleFieldBuilderV3<
4654                 com.google.cloud.speech.v1.SpeakerDiarizationConfig,
4655                 com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder,
4656                 com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder>(
4657                 getDiarizationConfig(), getParentForChildren(), isClean());
4658         diarizationConfig_ = null;
4659       }
4660       return diarizationConfigBuilder_;
4661     }
4662 
4663     private com.google.cloud.speech.v1.RecognitionMetadata metadata_;
4664     private com.google.protobuf.SingleFieldBuilderV3<
4665             com.google.cloud.speech.v1.RecognitionMetadata,
4666             com.google.cloud.speech.v1.RecognitionMetadata.Builder,
4667             com.google.cloud.speech.v1.RecognitionMetadataOrBuilder>
4668         metadataBuilder_;
4669     /**
4670      *
4671      *
4672      * <pre>
4673      * Metadata regarding this request.
4674      * </pre>
4675      *
4676      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4677      *
4678      * @return Whether the metadata field is set.
4679      */
hasMetadata()4680     public boolean hasMetadata() {
4681       return ((bitField0_ & 0x00010000) != 0);
4682     }
4683     /**
4684      *
4685      *
4686      * <pre>
4687      * Metadata regarding this request.
4688      * </pre>
4689      *
4690      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4691      *
4692      * @return The metadata.
4693      */
getMetadata()4694     public com.google.cloud.speech.v1.RecognitionMetadata getMetadata() {
4695       if (metadataBuilder_ == null) {
4696         return metadata_ == null
4697             ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance()
4698             : metadata_;
4699       } else {
4700         return metadataBuilder_.getMessage();
4701       }
4702     }
4703     /**
4704      *
4705      *
4706      * <pre>
4707      * Metadata regarding this request.
4708      * </pre>
4709      *
4710      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4711      */
setMetadata(com.google.cloud.speech.v1.RecognitionMetadata value)4712     public Builder setMetadata(com.google.cloud.speech.v1.RecognitionMetadata value) {
4713       if (metadataBuilder_ == null) {
4714         if (value == null) {
4715           throw new NullPointerException();
4716         }
4717         metadata_ = value;
4718       } else {
4719         metadataBuilder_.setMessage(value);
4720       }
4721       bitField0_ |= 0x00010000;
4722       onChanged();
4723       return this;
4724     }
4725     /**
4726      *
4727      *
4728      * <pre>
4729      * Metadata regarding this request.
4730      * </pre>
4731      *
4732      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4733      */
setMetadata( com.google.cloud.speech.v1.RecognitionMetadata.Builder builderForValue)4734     public Builder setMetadata(
4735         com.google.cloud.speech.v1.RecognitionMetadata.Builder builderForValue) {
4736       if (metadataBuilder_ == null) {
4737         metadata_ = builderForValue.build();
4738       } else {
4739         metadataBuilder_.setMessage(builderForValue.build());
4740       }
4741       bitField0_ |= 0x00010000;
4742       onChanged();
4743       return this;
4744     }
4745     /**
4746      *
4747      *
4748      * <pre>
4749      * Metadata regarding this request.
4750      * </pre>
4751      *
4752      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4753      */
mergeMetadata(com.google.cloud.speech.v1.RecognitionMetadata value)4754     public Builder mergeMetadata(com.google.cloud.speech.v1.RecognitionMetadata value) {
4755       if (metadataBuilder_ == null) {
4756         if (((bitField0_ & 0x00010000) != 0)
4757             && metadata_ != null
4758             && metadata_ != com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance()) {
4759           getMetadataBuilder().mergeFrom(value);
4760         } else {
4761           metadata_ = value;
4762         }
4763       } else {
4764         metadataBuilder_.mergeFrom(value);
4765       }
4766       bitField0_ |= 0x00010000;
4767       onChanged();
4768       return this;
4769     }
4770     /**
4771      *
4772      *
4773      * <pre>
4774      * Metadata regarding this request.
4775      * </pre>
4776      *
4777      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4778      */
clearMetadata()4779     public Builder clearMetadata() {
4780       bitField0_ = (bitField0_ & ~0x00010000);
4781       metadata_ = null;
4782       if (metadataBuilder_ != null) {
4783         metadataBuilder_.dispose();
4784         metadataBuilder_ = null;
4785       }
4786       onChanged();
4787       return this;
4788     }
4789     /**
4790      *
4791      *
4792      * <pre>
4793      * Metadata regarding this request.
4794      * </pre>
4795      *
4796      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4797      */
getMetadataBuilder()4798     public com.google.cloud.speech.v1.RecognitionMetadata.Builder getMetadataBuilder() {
4799       bitField0_ |= 0x00010000;
4800       onChanged();
4801       return getMetadataFieldBuilder().getBuilder();
4802     }
4803     /**
4804      *
4805      *
4806      * <pre>
4807      * Metadata regarding this request.
4808      * </pre>
4809      *
4810      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4811      */
getMetadataOrBuilder()4812     public com.google.cloud.speech.v1.RecognitionMetadataOrBuilder getMetadataOrBuilder() {
4813       if (metadataBuilder_ != null) {
4814         return metadataBuilder_.getMessageOrBuilder();
4815       } else {
4816         return metadata_ == null
4817             ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance()
4818             : metadata_;
4819       }
4820     }
4821     /**
4822      *
4823      *
4824      * <pre>
4825      * Metadata regarding this request.
4826      * </pre>
4827      *
4828      * <code>.google.cloud.speech.v1.RecognitionMetadata metadata = 9;</code>
4829      */
4830     private com.google.protobuf.SingleFieldBuilderV3<
4831             com.google.cloud.speech.v1.RecognitionMetadata,
4832             com.google.cloud.speech.v1.RecognitionMetadata.Builder,
4833             com.google.cloud.speech.v1.RecognitionMetadataOrBuilder>
getMetadataFieldBuilder()4834         getMetadataFieldBuilder() {
4835       if (metadataBuilder_ == null) {
4836         metadataBuilder_ =
4837             new com.google.protobuf.SingleFieldBuilderV3<
4838                 com.google.cloud.speech.v1.RecognitionMetadata,
4839                 com.google.cloud.speech.v1.RecognitionMetadata.Builder,
4840                 com.google.cloud.speech.v1.RecognitionMetadataOrBuilder>(
4841                 getMetadata(), getParentForChildren(), isClean());
4842         metadata_ = null;
4843       }
4844       return metadataBuilder_;
4845     }
4846 
4847     private java.lang.Object model_ = "";
4848     /**
4849      *
4850      *
4851      * <pre>
4852      * Which model to select for the given request. Select the model
4853      * best suited to your domain to get best results. If a model is not
4854      * explicitly specified, then we auto-select a model based on the parameters
4855      * in the RecognitionConfig.
4856      * &lt;table&gt;
4857      *   &lt;tr&gt;
4858      *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
4859      *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
4860      *   &lt;/tr&gt;
4861      *   &lt;tr&gt;
4862      *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
4863      *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
4864      *   &lt;/tr&gt;
4865      *   &lt;tr&gt;
4866      *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
4867      *     &lt;td&gt;Best for short form content like commands or single shot directed
4868      *     speech.&lt;/td&gt;
4869      *   &lt;/tr&gt;
4870      *   &lt;tr&gt;
4871      *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
4872      *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
4873      *   &lt;/tr&gt;
4874      *   &lt;tr&gt;
4875      *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
4876      *     &lt;td&gt;Best for audio that originated from a phone call (typically
4877      *     recorded at an 8khz sampling rate).&lt;/td&gt;
4878      *   &lt;/tr&gt;
4879      *   &lt;tr&gt;
4880      *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
4881      *     &lt;td&gt;Best for audio that originated from video or includes multiple
4882      *         speakers. Ideally the audio is recorded at a 16khz or greater
4883      *         sampling rate. This is a premium model that costs more than the
4884      *         standard rate.&lt;/td&gt;
4885      *   &lt;/tr&gt;
4886      *   &lt;tr&gt;
4887      *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
4888      *     &lt;td&gt;Best for audio that is not one of the specific audio models.
4889      *         For example, long-form audio. Ideally the audio is high-fidelity,
4890      *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
4891      *   &lt;/tr&gt;
4892      *   &lt;tr&gt;
4893      *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
4894      *     &lt;td&gt;Best for audio that originated from a conversation between a
4895      *         medical provider and patient.&lt;/td&gt;
4896      *   &lt;/tr&gt;
4897      *   &lt;tr&gt;
4898      *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
4899      *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
4900      *         provider.&lt;/td&gt;
4901      *   &lt;/tr&gt;
4902      * &lt;/table&gt;
4903      * </pre>
4904      *
4905      * <code>string model = 13;</code>
4906      *
4907      * @return The model.
4908      */
getModel()4909     public java.lang.String getModel() {
4910       java.lang.Object ref = model_;
4911       if (!(ref instanceof java.lang.String)) {
4912         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
4913         java.lang.String s = bs.toStringUtf8();
4914         model_ = s;
4915         return s;
4916       } else {
4917         return (java.lang.String) ref;
4918       }
4919     }
4920     /**
4921      *
4922      *
4923      * <pre>
4924      * Which model to select for the given request. Select the model
4925      * best suited to your domain to get best results. If a model is not
4926      * explicitly specified, then we auto-select a model based on the parameters
4927      * in the RecognitionConfig.
4928      * &lt;table&gt;
4929      *   &lt;tr&gt;
4930      *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
4931      *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
4932      *   &lt;/tr&gt;
4933      *   &lt;tr&gt;
4934      *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
4935      *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
4936      *   &lt;/tr&gt;
4937      *   &lt;tr&gt;
4938      *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
4939      *     &lt;td&gt;Best for short form content like commands or single shot directed
4940      *     speech.&lt;/td&gt;
4941      *   &lt;/tr&gt;
4942      *   &lt;tr&gt;
4943      *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
4944      *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
4945      *   &lt;/tr&gt;
4946      *   &lt;tr&gt;
4947      *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
4948      *     &lt;td&gt;Best for audio that originated from a phone call (typically
4949      *     recorded at an 8khz sampling rate).&lt;/td&gt;
4950      *   &lt;/tr&gt;
4951      *   &lt;tr&gt;
4952      *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
4953      *     &lt;td&gt;Best for audio that originated from video or includes multiple
4954      *         speakers. Ideally the audio is recorded at a 16khz or greater
4955      *         sampling rate. This is a premium model that costs more than the
4956      *         standard rate.&lt;/td&gt;
4957      *   &lt;/tr&gt;
4958      *   &lt;tr&gt;
4959      *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
4960      *     &lt;td&gt;Best for audio that is not one of the specific audio models.
4961      *         For example, long-form audio. Ideally the audio is high-fidelity,
4962      *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
4963      *   &lt;/tr&gt;
4964      *   &lt;tr&gt;
4965      *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
4966      *     &lt;td&gt;Best for audio that originated from a conversation between a
4967      *         medical provider and patient.&lt;/td&gt;
4968      *   &lt;/tr&gt;
4969      *   &lt;tr&gt;
4970      *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
4971      *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
4972      *         provider.&lt;/td&gt;
4973      *   &lt;/tr&gt;
4974      * &lt;/table&gt;
4975      * </pre>
4976      *
4977      * <code>string model = 13;</code>
4978      *
4979      * @return The bytes for model.
4980      */
getModelBytes()4981     public com.google.protobuf.ByteString getModelBytes() {
4982       java.lang.Object ref = model_;
4983       if (ref instanceof String) {
4984         com.google.protobuf.ByteString b =
4985             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
4986         model_ = b;
4987         return b;
4988       } else {
4989         return (com.google.protobuf.ByteString) ref;
4990       }
4991     }
4992     /**
4993      *
4994      *
4995      * <pre>
4996      * Which model to select for the given request. Select the model
4997      * best suited to your domain to get best results. If a model is not
4998      * explicitly specified, then we auto-select a model based on the parameters
4999      * in the RecognitionConfig.
5000      * &lt;table&gt;
5001      *   &lt;tr&gt;
5002      *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
5003      *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
5004      *   &lt;/tr&gt;
5005      *   &lt;tr&gt;
5006      *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
5007      *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
5008      *   &lt;/tr&gt;
5009      *   &lt;tr&gt;
5010      *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
5011      *     &lt;td&gt;Best for short form content like commands or single shot directed
5012      *     speech.&lt;/td&gt;
5013      *   &lt;/tr&gt;
5014      *   &lt;tr&gt;
5015      *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
5016      *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
5017      *   &lt;/tr&gt;
5018      *   &lt;tr&gt;
5019      *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
5020      *     &lt;td&gt;Best for audio that originated from a phone call (typically
5021      *     recorded at an 8khz sampling rate).&lt;/td&gt;
5022      *   &lt;/tr&gt;
5023      *   &lt;tr&gt;
5024      *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
5025      *     &lt;td&gt;Best for audio that originated from video or includes multiple
5026      *         speakers. Ideally the audio is recorded at a 16khz or greater
5027      *         sampling rate. This is a premium model that costs more than the
5028      *         standard rate.&lt;/td&gt;
5029      *   &lt;/tr&gt;
5030      *   &lt;tr&gt;
5031      *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
5032      *     &lt;td&gt;Best for audio that is not one of the specific audio models.
5033      *         For example, long-form audio. Ideally the audio is high-fidelity,
5034      *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
5035      *   &lt;/tr&gt;
5036      *   &lt;tr&gt;
5037      *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
5038      *     &lt;td&gt;Best for audio that originated from a conversation between a
5039      *         medical provider and patient.&lt;/td&gt;
5040      *   &lt;/tr&gt;
5041      *   &lt;tr&gt;
5042      *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
5043      *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
5044      *         provider.&lt;/td&gt;
5045      *   &lt;/tr&gt;
5046      * &lt;/table&gt;
5047      * </pre>
5048      *
5049      * <code>string model = 13;</code>
5050      *
5051      * @param value The model to set.
5052      * @return This builder for chaining.
5053      */
setModel(java.lang.String value)5054     public Builder setModel(java.lang.String value) {
5055       if (value == null) {
5056         throw new NullPointerException();
5057       }
5058       model_ = value;
5059       bitField0_ |= 0x00020000;
5060       onChanged();
5061       return this;
5062     }
5063     /**
5064      *
5065      *
5066      * <pre>
5067      * Which model to select for the given request. Select the model
5068      * best suited to your domain to get best results. If a model is not
5069      * explicitly specified, then we auto-select a model based on the parameters
5070      * in the RecognitionConfig.
5071      * &lt;table&gt;
5072      *   &lt;tr&gt;
5073      *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
5074      *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
5075      *   &lt;/tr&gt;
5076      *   &lt;tr&gt;
5077      *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
5078      *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
5079      *   &lt;/tr&gt;
5080      *   &lt;tr&gt;
5081      *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
5082      *     &lt;td&gt;Best for short form content like commands or single shot directed
5083      *     speech.&lt;/td&gt;
5084      *   &lt;/tr&gt;
5085      *   &lt;tr&gt;
5086      *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
5087      *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
5088      *   &lt;/tr&gt;
5089      *   &lt;tr&gt;
5090      *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
5091      *     &lt;td&gt;Best for audio that originated from a phone call (typically
5092      *     recorded at an 8khz sampling rate).&lt;/td&gt;
5093      *   &lt;/tr&gt;
5094      *   &lt;tr&gt;
5095      *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
5096      *     &lt;td&gt;Best for audio that originated from video or includes multiple
5097      *         speakers. Ideally the audio is recorded at a 16khz or greater
5098      *         sampling rate. This is a premium model that costs more than the
5099      *         standard rate.&lt;/td&gt;
5100      *   &lt;/tr&gt;
5101      *   &lt;tr&gt;
5102      *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
5103      *     &lt;td&gt;Best for audio that is not one of the specific audio models.
5104      *         For example, long-form audio. Ideally the audio is high-fidelity,
5105      *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
5106      *   &lt;/tr&gt;
5107      *   &lt;tr&gt;
5108      *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
5109      *     &lt;td&gt;Best for audio that originated from a conversation between a
5110      *         medical provider and patient.&lt;/td&gt;
5111      *   &lt;/tr&gt;
5112      *   &lt;tr&gt;
5113      *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
5114      *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
5115      *         provider.&lt;/td&gt;
5116      *   &lt;/tr&gt;
5117      * &lt;/table&gt;
5118      * </pre>
5119      *
5120      * <code>string model = 13;</code>
5121      *
5122      * @return This builder for chaining.
5123      */
clearModel()5124     public Builder clearModel() {
5125       model_ = getDefaultInstance().getModel();
5126       bitField0_ = (bitField0_ & ~0x00020000);
5127       onChanged();
5128       return this;
5129     }
5130     /**
5131      *
5132      *
5133      * <pre>
5134      * Which model to select for the given request. Select the model
5135      * best suited to your domain to get best results. If a model is not
5136      * explicitly specified, then we auto-select a model based on the parameters
5137      * in the RecognitionConfig.
5138      * &lt;table&gt;
5139      *   &lt;tr&gt;
5140      *     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
5141      *     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
5142      *   &lt;/tr&gt;
5143      *   &lt;tr&gt;
5144      *     &lt;td&gt;&lt;code&gt;latest_long&lt;/code&gt;&lt;/td&gt;
5145      *     &lt;td&gt;Best for long form content like media or conversation.&lt;/td&gt;
5146      *   &lt;/tr&gt;
5147      *   &lt;tr&gt;
5148      *     &lt;td&gt;&lt;code&gt;latest_short&lt;/code&gt;&lt;/td&gt;
5149      *     &lt;td&gt;Best for short form content like commands or single shot directed
5150      *     speech.&lt;/td&gt;
5151      *   &lt;/tr&gt;
5152      *   &lt;tr&gt;
5153      *     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
5154      *     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
5155      *   &lt;/tr&gt;
5156      *   &lt;tr&gt;
5157      *     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
5158      *     &lt;td&gt;Best for audio that originated from a phone call (typically
5159      *     recorded at an 8khz sampling rate).&lt;/td&gt;
5160      *   &lt;/tr&gt;
5161      *   &lt;tr&gt;
5162      *     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
5163      *     &lt;td&gt;Best for audio that originated from video or includes multiple
5164      *         speakers. Ideally the audio is recorded at a 16khz or greater
5165      *         sampling rate. This is a premium model that costs more than the
5166      *         standard rate.&lt;/td&gt;
5167      *   &lt;/tr&gt;
5168      *   &lt;tr&gt;
5169      *     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
5170      *     &lt;td&gt;Best for audio that is not one of the specific audio models.
5171      *         For example, long-form audio. Ideally the audio is high-fidelity,
5172      *         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
5173      *   &lt;/tr&gt;
5174      *   &lt;tr&gt;
5175      *     &lt;td&gt;&lt;code&gt;medical_conversation&lt;/code&gt;&lt;/td&gt;
5176      *     &lt;td&gt;Best for audio that originated from a conversation between a
5177      *         medical provider and patient.&lt;/td&gt;
5178      *   &lt;/tr&gt;
5179      *   &lt;tr&gt;
5180      *     &lt;td&gt;&lt;code&gt;medical_dictation&lt;/code&gt;&lt;/td&gt;
5181      *     &lt;td&gt;Best for audio that originated from dictation notes by a medical
5182      *         provider.&lt;/td&gt;
5183      *   &lt;/tr&gt;
5184      * &lt;/table&gt;
5185      * </pre>
5186      *
5187      * <code>string model = 13;</code>
5188      *
5189      * @param value The bytes for model to set.
5190      * @return This builder for chaining.
5191      */
setModelBytes(com.google.protobuf.ByteString value)5192     public Builder setModelBytes(com.google.protobuf.ByteString value) {
5193       if (value == null) {
5194         throw new NullPointerException();
5195       }
5196       checkByteStringIsUtf8(value);
5197       model_ = value;
5198       bitField0_ |= 0x00020000;
5199       onChanged();
5200       return this;
5201     }
5202 
5203     private boolean useEnhanced_;
5204     /**
5205      *
5206      *
5207      * <pre>
5208      * Set to true to use an enhanced model for speech recognition.
5209      * If `use_enhanced` is set to true and the `model` field is not set, then
5210      * an appropriate enhanced model is chosen if an enhanced model exists for
5211      * the audio.
5212      * If `use_enhanced` is true and an enhanced version of the specified model
5213      * does not exist, then the speech is recognized using the standard version
5214      * of the specified model.
5215      * </pre>
5216      *
5217      * <code>bool use_enhanced = 14;</code>
5218      *
5219      * @return The useEnhanced.
5220      */
5221     @java.lang.Override
getUseEnhanced()5222     public boolean getUseEnhanced() {
5223       return useEnhanced_;
5224     }
5225     /**
5226      *
5227      *
5228      * <pre>
5229      * Set to true to use an enhanced model for speech recognition.
5230      * If `use_enhanced` is set to true and the `model` field is not set, then
5231      * an appropriate enhanced model is chosen if an enhanced model exists for
5232      * the audio.
5233      * If `use_enhanced` is true and an enhanced version of the specified model
5234      * does not exist, then the speech is recognized using the standard version
5235      * of the specified model.
5236      * </pre>
5237      *
5238      * <code>bool use_enhanced = 14;</code>
5239      *
5240      * @param value The useEnhanced to set.
5241      * @return This builder for chaining.
5242      */
setUseEnhanced(boolean value)5243     public Builder setUseEnhanced(boolean value) {
5244 
5245       useEnhanced_ = value;
5246       bitField0_ |= 0x00040000;
5247       onChanged();
5248       return this;
5249     }
5250     /**
5251      *
5252      *
5253      * <pre>
5254      * Set to true to use an enhanced model for speech recognition.
5255      * If `use_enhanced` is set to true and the `model` field is not set, then
5256      * an appropriate enhanced model is chosen if an enhanced model exists for
5257      * the audio.
5258      * If `use_enhanced` is true and an enhanced version of the specified model
5259      * does not exist, then the speech is recognized using the standard version
5260      * of the specified model.
5261      * </pre>
5262      *
5263      * <code>bool use_enhanced = 14;</code>
5264      *
5265      * @return This builder for chaining.
5266      */
clearUseEnhanced()5267     public Builder clearUseEnhanced() {
5268       bitField0_ = (bitField0_ & ~0x00040000);
5269       useEnhanced_ = false;
5270       onChanged();
5271       return this;
5272     }
5273 
5274     @java.lang.Override
setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields)5275     public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
5276       return super.setUnknownFields(unknownFields);
5277     }
5278 
5279     @java.lang.Override
mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields)5280     public final Builder mergeUnknownFields(
5281         final com.google.protobuf.UnknownFieldSet unknownFields) {
5282       return super.mergeUnknownFields(unknownFields);
5283     }
5284 
5285     // @@protoc_insertion_point(builder_scope:google.cloud.speech.v1.RecognitionConfig)
5286   }
5287 
5288   // @@protoc_insertion_point(class_scope:google.cloud.speech.v1.RecognitionConfig)
5289   private static final com.google.cloud.speech.v1.RecognitionConfig DEFAULT_INSTANCE;
5290 
5291   static {
5292     DEFAULT_INSTANCE = new com.google.cloud.speech.v1.RecognitionConfig();
5293   }
5294 
getDefaultInstance()5295   public static com.google.cloud.speech.v1.RecognitionConfig getDefaultInstance() {
5296     return DEFAULT_INSTANCE;
5297   }
5298 
5299   private static final com.google.protobuf.Parser<RecognitionConfig> PARSER =
5300       new com.google.protobuf.AbstractParser<RecognitionConfig>() {
5301         @java.lang.Override
5302         public RecognitionConfig parsePartialFrom(
5303             com.google.protobuf.CodedInputStream input,
5304             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5305             throws com.google.protobuf.InvalidProtocolBufferException {
5306           Builder builder = newBuilder();
5307           try {
5308             builder.mergeFrom(input, extensionRegistry);
5309           } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5310             throw e.setUnfinishedMessage(builder.buildPartial());
5311           } catch (com.google.protobuf.UninitializedMessageException e) {
5312             throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
5313           } catch (java.io.IOException e) {
5314             throw new com.google.protobuf.InvalidProtocolBufferException(e)
5315                 .setUnfinishedMessage(builder.buildPartial());
5316           }
5317           return builder.buildPartial();
5318         }
5319       };
5320 
parser()5321   public static com.google.protobuf.Parser<RecognitionConfig> parser() {
5322     return PARSER;
5323   }
5324 
5325   @java.lang.Override
getParserForType()5326   public com.google.protobuf.Parser<RecognitionConfig> getParserForType() {
5327     return PARSER;
5328   }
5329 
5330   @java.lang.Override
getDefaultInstanceForType()5331   public com.google.cloud.speech.v1.RecognitionConfig getDefaultInstanceForType() {
5332     return DEFAULT_INSTANCE;
5333   }
5334 }
5335