• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // Generated by the protocol buffer compiler.  DO NOT EDIT!
17 // source: google/cloud/dialogflow/v2beta1/audio_config.proto
18 
19 package com.google.cloud.dialogflow.v2beta1;
20 
21 /**
22  *
23  *
24  * <pre>
25  * Instructs the speech recognizer on how to process the audio content.
26  * </pre>
27  *
28  * Protobuf type {@code google.cloud.dialogflow.v2beta1.InputAudioConfig}
29  */
30 public final class InputAudioConfig extends com.google.protobuf.GeneratedMessageV3
31     implements
32     // @@protoc_insertion_point(message_implements:google.cloud.dialogflow.v2beta1.InputAudioConfig)
33     InputAudioConfigOrBuilder {
34   private static final long serialVersionUID = 0L;
35   // Use InputAudioConfig.newBuilder() to construct.
InputAudioConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder)36   private InputAudioConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
37     super(builder);
38   }
39 
InputAudioConfig()40   private InputAudioConfig() {
41     audioEncoding_ = 0;
42     languageCode_ = "";
43     phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
44     speechContexts_ = java.util.Collections.emptyList();
45     model_ = "";
46     modelVariant_ = 0;
47   }
48 
49   @java.lang.Override
50   @SuppressWarnings({"unused"})
newInstance(UnusedPrivateParameter unused)51   protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
52     return new InputAudioConfig();
53   }
54 
55   @java.lang.Override
getUnknownFields()56   public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
57     return this.unknownFields;
58   }
59 
getDescriptor()60   public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
61     return com.google.cloud.dialogflow.v2beta1.AudioConfigProto
62         .internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_descriptor;
63   }
64 
65   @java.lang.Override
66   protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()67       internalGetFieldAccessorTable() {
68     return com.google.cloud.dialogflow.v2beta1.AudioConfigProto
69         .internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_fieldAccessorTable
70         .ensureFieldAccessorsInitialized(
71             com.google.cloud.dialogflow.v2beta1.InputAudioConfig.class,
72             com.google.cloud.dialogflow.v2beta1.InputAudioConfig.Builder.class);
73   }
74 
75   public static final int AUDIO_ENCODING_FIELD_NUMBER = 1;
76   private int audioEncoding_ = 0;
77   /**
78    *
79    *
80    * <pre>
81    * Required. Audio encoding of the audio content to process.
82    * </pre>
83    *
84    * <code>.google.cloud.dialogflow.v2beta1.AudioEncoding audio_encoding = 1;</code>
85    *
86    * @return The enum numeric value on the wire for audioEncoding.
87    */
88   @java.lang.Override
getAudioEncodingValue()89   public int getAudioEncodingValue() {
90     return audioEncoding_;
91   }
92   /**
93    *
94    *
95    * <pre>
96    * Required. Audio encoding of the audio content to process.
97    * </pre>
98    *
99    * <code>.google.cloud.dialogflow.v2beta1.AudioEncoding audio_encoding = 1;</code>
100    *
101    * @return The audioEncoding.
102    */
103   @java.lang.Override
getAudioEncoding()104   public com.google.cloud.dialogflow.v2beta1.AudioEncoding getAudioEncoding() {
105     com.google.cloud.dialogflow.v2beta1.AudioEncoding result =
106         com.google.cloud.dialogflow.v2beta1.AudioEncoding.forNumber(audioEncoding_);
107     return result == null ? com.google.cloud.dialogflow.v2beta1.AudioEncoding.UNRECOGNIZED : result;
108   }
109 
110   public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2;
111   private int sampleRateHertz_ = 0;
112   /**
113    *
114    *
115    * <pre>
116    * Required. Sample rate (in Hertz) of the audio content sent in the query.
117    * Refer to
118    * [Cloud Speech API
119    * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
120    * more details.
121    * </pre>
122    *
123    * <code>int32 sample_rate_hertz = 2;</code>
124    *
125    * @return The sampleRateHertz.
126    */
127   @java.lang.Override
getSampleRateHertz()128   public int getSampleRateHertz() {
129     return sampleRateHertz_;
130   }
131 
132   public static final int LANGUAGE_CODE_FIELD_NUMBER = 3;
133 
134   @SuppressWarnings("serial")
135   private volatile java.lang.Object languageCode_ = "";
136   /**
137    *
138    *
139    * <pre>
140    * Required. The language of the supplied audio. Dialogflow does not do
141    * translations. See [Language
142    * Support](https://cloud.google.com/dialogflow/docs/reference/language)
143    * for a list of the currently supported language codes. Note that queries in
144    * the same session do not necessarily need to specify the same language.
145    * </pre>
146    *
147    * <code>string language_code = 3;</code>
148    *
149    * @return The languageCode.
150    */
151   @java.lang.Override
getLanguageCode()152   public java.lang.String getLanguageCode() {
153     java.lang.Object ref = languageCode_;
154     if (ref instanceof java.lang.String) {
155       return (java.lang.String) ref;
156     } else {
157       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
158       java.lang.String s = bs.toStringUtf8();
159       languageCode_ = s;
160       return s;
161     }
162   }
163   /**
164    *
165    *
166    * <pre>
167    * Required. The language of the supplied audio. Dialogflow does not do
168    * translations. See [Language
169    * Support](https://cloud.google.com/dialogflow/docs/reference/language)
170    * for a list of the currently supported language codes. Note that queries in
171    * the same session do not necessarily need to specify the same language.
172    * </pre>
173    *
174    * <code>string language_code = 3;</code>
175    *
176    * @return The bytes for languageCode.
177    */
178   @java.lang.Override
getLanguageCodeBytes()179   public com.google.protobuf.ByteString getLanguageCodeBytes() {
180     java.lang.Object ref = languageCode_;
181     if (ref instanceof java.lang.String) {
182       com.google.protobuf.ByteString b =
183           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
184       languageCode_ = b;
185       return b;
186     } else {
187       return (com.google.protobuf.ByteString) ref;
188     }
189   }
190 
191   public static final int ENABLE_WORD_INFO_FIELD_NUMBER = 13;
192   private boolean enableWordInfo_ = false;
193   /**
194    *
195    *
196    * <pre>
197    * If `true`, Dialogflow returns
198    * [SpeechWordInfo][google.cloud.dialogflow.v2beta1.SpeechWordInfo] in
199    * [StreamingRecognitionResult][google.cloud.dialogflow.v2beta1.StreamingRecognitionResult]
200    * with information about the recognized speech words, e.g. start and end time
201    * offsets. If false or unspecified, Speech doesn't return any word-level
202    * information.
203    * </pre>
204    *
205    * <code>bool enable_word_info = 13;</code>
206    *
207    * @return The enableWordInfo.
208    */
209   @java.lang.Override
getEnableWordInfo()210   public boolean getEnableWordInfo() {
211     return enableWordInfo_;
212   }
213 
214   public static final int PHRASE_HINTS_FIELD_NUMBER = 4;
215 
216   @SuppressWarnings("serial")
217   private com.google.protobuf.LazyStringList phraseHints_;
218   /**
219    *
220    *
221    * <pre>
222    * A list of strings containing words and phrases that the speech
223    * recognizer should recognize with higher likelihood.
224    * See [the Cloud Speech
225    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
226    * for more details.
227    * This field is deprecated. Please use [speech_contexts]() instead. If you
228    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
229    * treat the [phrase_hints]() as a single additional [SpeechContext]().
230    * </pre>
231    *
232    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
233    *
234    * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
235    *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
236    * @return A list containing the phraseHints.
237    */
238   @java.lang.Deprecated
getPhraseHintsList()239   public com.google.protobuf.ProtocolStringList getPhraseHintsList() {
240     return phraseHints_;
241   }
242   /**
243    *
244    *
245    * <pre>
246    * A list of strings containing words and phrases that the speech
247    * recognizer should recognize with higher likelihood.
248    * See [the Cloud Speech
249    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
250    * for more details.
251    * This field is deprecated. Please use [speech_contexts]() instead. If you
252    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
253    * treat the [phrase_hints]() as a single additional [SpeechContext]().
254    * </pre>
255    *
256    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
257    *
258    * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
259    *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
260    * @return The count of phraseHints.
261    */
262   @java.lang.Deprecated
getPhraseHintsCount()263   public int getPhraseHintsCount() {
264     return phraseHints_.size();
265   }
266   /**
267    *
268    *
269    * <pre>
270    * A list of strings containing words and phrases that the speech
271    * recognizer should recognize with higher likelihood.
272    * See [the Cloud Speech
273    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
274    * for more details.
275    * This field is deprecated. Please use [speech_contexts]() instead. If you
276    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
277    * treat the [phrase_hints]() as a single additional [SpeechContext]().
278    * </pre>
279    *
280    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
281    *
282    * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
283    *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
284    * @param index The index of the element to return.
285    * @return The phraseHints at the given index.
286    */
287   @java.lang.Deprecated
getPhraseHints(int index)288   public java.lang.String getPhraseHints(int index) {
289     return phraseHints_.get(index);
290   }
291   /**
292    *
293    *
294    * <pre>
295    * A list of strings containing words and phrases that the speech
296    * recognizer should recognize with higher likelihood.
297    * See [the Cloud Speech
298    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
299    * for more details.
300    * This field is deprecated. Please use [speech_contexts]() instead. If you
301    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
302    * treat the [phrase_hints]() as a single additional [SpeechContext]().
303    * </pre>
304    *
305    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
306    *
307    * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
308    *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
309    * @param index The index of the value to return.
310    * @return The bytes of the phraseHints at the given index.
311    */
312   @java.lang.Deprecated
getPhraseHintsBytes(int index)313   public com.google.protobuf.ByteString getPhraseHintsBytes(int index) {
314     return phraseHints_.getByteString(index);
315   }
316 
317   public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 11;
318 
319   @SuppressWarnings("serial")
320   private java.util.List<com.google.cloud.dialogflow.v2beta1.SpeechContext> speechContexts_;
321   /**
322    *
323    *
324    * <pre>
325    * Context information to assist speech recognition.
326    * See [the Cloud Speech
327    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
328    * for more details.
329    * </pre>
330    *
331    * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
332    */
333   @java.lang.Override
getSpeechContextsList()334   public java.util.List<com.google.cloud.dialogflow.v2beta1.SpeechContext> getSpeechContextsList() {
335     return speechContexts_;
336   }
337   /**
338    *
339    *
340    * <pre>
341    * Context information to assist speech recognition.
342    * See [the Cloud Speech
343    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
344    * for more details.
345    * </pre>
346    *
347    * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
348    */
349   @java.lang.Override
350   public java.util.List<? extends com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>
getSpeechContextsOrBuilderList()351       getSpeechContextsOrBuilderList() {
352     return speechContexts_;
353   }
354   /**
355    *
356    *
357    * <pre>
358    * Context information to assist speech recognition.
359    * See [the Cloud Speech
360    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
361    * for more details.
362    * </pre>
363    *
364    * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
365    */
366   @java.lang.Override
getSpeechContextsCount()367   public int getSpeechContextsCount() {
368     return speechContexts_.size();
369   }
370   /**
371    *
372    *
373    * <pre>
374    * Context information to assist speech recognition.
375    * See [the Cloud Speech
376    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
377    * for more details.
378    * </pre>
379    *
380    * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
381    */
382   @java.lang.Override
getSpeechContexts(int index)383   public com.google.cloud.dialogflow.v2beta1.SpeechContext getSpeechContexts(int index) {
384     return speechContexts_.get(index);
385   }
386   /**
387    *
388    *
389    * <pre>
390    * Context information to assist speech recognition.
391    * See [the Cloud Speech
392    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
393    * for more details.
394    * </pre>
395    *
396    * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
397    */
398   @java.lang.Override
getSpeechContextsOrBuilder( int index)399   public com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder(
400       int index) {
401     return speechContexts_.get(index);
402   }
403 
404   public static final int MODEL_FIELD_NUMBER = 7;
405 
406   @SuppressWarnings("serial")
407   private volatile java.lang.Object model_ = "";
408   /**
409    *
410    *
411    * <pre>
412    * Which Speech model to select for the given request. Select the
413    * model best suited to your domain to get best results. If a model is not
414    * explicitly specified, then we auto-select a model based on the parameters
415    * in the InputAudioConfig.
416    * If enhanced speech model is enabled for the agent and an enhanced
417    * version of the specified model for the language does not exist, then the
418    * speech is recognized using the standard version of the specified model.
419    * Refer to
420    * [Cloud Speech API
421    * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
422    * for more details.
423    * If you specify a model, the following models typically have the best
424    * performance:
425    * - phone_call (best for Agent Assist and telephony)
426    * - latest_short (best for Dialogflow non-telephony)
427    * - command_and_search (best for very short utterances and commands)
428    * </pre>
429    *
430    * <code>string model = 7;</code>
431    *
432    * @return The model.
433    */
434   @java.lang.Override
getModel()435   public java.lang.String getModel() {
436     java.lang.Object ref = model_;
437     if (ref instanceof java.lang.String) {
438       return (java.lang.String) ref;
439     } else {
440       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
441       java.lang.String s = bs.toStringUtf8();
442       model_ = s;
443       return s;
444     }
445   }
446   /**
447    *
448    *
449    * <pre>
450    * Which Speech model to select for the given request. Select the
451    * model best suited to your domain to get best results. If a model is not
452    * explicitly specified, then we auto-select a model based on the parameters
453    * in the InputAudioConfig.
454    * If enhanced speech model is enabled for the agent and an enhanced
455    * version of the specified model for the language does not exist, then the
456    * speech is recognized using the standard version of the specified model.
457    * Refer to
458    * [Cloud Speech API
459    * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
460    * for more details.
461    * If you specify a model, the following models typically have the best
462    * performance:
463    * - phone_call (best for Agent Assist and telephony)
464    * - latest_short (best for Dialogflow non-telephony)
465    * - command_and_search (best for very short utterances and commands)
466    * </pre>
467    *
468    * <code>string model = 7;</code>
469    *
470    * @return The bytes for model.
471    */
472   @java.lang.Override
getModelBytes()473   public com.google.protobuf.ByteString getModelBytes() {
474     java.lang.Object ref = model_;
475     if (ref instanceof java.lang.String) {
476       com.google.protobuf.ByteString b =
477           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
478       model_ = b;
479       return b;
480     } else {
481       return (com.google.protobuf.ByteString) ref;
482     }
483   }
484 
485   public static final int MODEL_VARIANT_FIELD_NUMBER = 10;
486   private int modelVariant_ = 0;
487   /**
488    *
489    *
490    * <pre>
491    * Which variant of the [Speech
492    * model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
493    * </pre>
494    *
495    * <code>.google.cloud.dialogflow.v2beta1.SpeechModelVariant model_variant = 10;</code>
496    *
497    * @return The enum numeric value on the wire for modelVariant.
498    */
499   @java.lang.Override
getModelVariantValue()500   public int getModelVariantValue() {
501     return modelVariant_;
502   }
503   /**
504    *
505    *
506    * <pre>
507    * Which variant of the [Speech
508    * model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
509    * </pre>
510    *
511    * <code>.google.cloud.dialogflow.v2beta1.SpeechModelVariant model_variant = 10;</code>
512    *
513    * @return The modelVariant.
514    */
515   @java.lang.Override
getModelVariant()516   public com.google.cloud.dialogflow.v2beta1.SpeechModelVariant getModelVariant() {
517     com.google.cloud.dialogflow.v2beta1.SpeechModelVariant result =
518         com.google.cloud.dialogflow.v2beta1.SpeechModelVariant.forNumber(modelVariant_);
519     return result == null
520         ? com.google.cloud.dialogflow.v2beta1.SpeechModelVariant.UNRECOGNIZED
521         : result;
522   }
523 
524   public static final int SINGLE_UTTERANCE_FIELD_NUMBER = 8;
525   private boolean singleUtterance_ = false;
526   /**
527    *
528    *
529    * <pre>
530    * If `false` (default), recognition does not cease until the
531    * client closes the stream.
532    * If `true`, the recognizer will detect a single spoken utterance in input
533    * audio. Recognition ceases when it detects the audio's voice has
534    * stopped or paused. In this case, once a detected intent is received, the
535    * client should close the stream and start a new request with a new stream as
536    * needed.
537    * Note: This setting is relevant only for streaming methods.
538    * Note: When specified, InputAudioConfig.single_utterance takes precedence
539    * over StreamingDetectIntentRequest.single_utterance.
540    * </pre>
541    *
542    * <code>bool single_utterance = 8;</code>
543    *
544    * @return The singleUtterance.
545    */
546   @java.lang.Override
getSingleUtterance()547   public boolean getSingleUtterance() {
548     return singleUtterance_;
549   }
550 
551   public static final int DISABLE_NO_SPEECH_RECOGNIZED_EVENT_FIELD_NUMBER = 14;
552   private boolean disableNoSpeechRecognizedEvent_ = false;
553   /**
554    *
555    *
556    * <pre>
557    * Only used in
558    * [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]
559    * and
560    * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent].
561    * If `false` and recognition doesn't return any result, trigger
562    * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
563    * </pre>
564    *
565    * <code>bool disable_no_speech_recognized_event = 14;</code>
566    *
567    * @return The disableNoSpeechRecognizedEvent.
568    */
569   @java.lang.Override
getDisableNoSpeechRecognizedEvent()570   public boolean getDisableNoSpeechRecognizedEvent() {
571     return disableNoSpeechRecognizedEvent_;
572   }
573 
574   public static final int BARGE_IN_CONFIG_FIELD_NUMBER = 15;
575   private com.google.cloud.dialogflow.v2beta1.BargeInConfig bargeInConfig_;
576   /**
577    *
578    *
579    * <pre>
580    * Configuration of barge-in behavior during the streaming of input audio.
581    * </pre>
582    *
583    * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
584    *
585    * @return Whether the bargeInConfig field is set.
586    */
587   @java.lang.Override
hasBargeInConfig()588   public boolean hasBargeInConfig() {
589     return bargeInConfig_ != null;
590   }
591   /**
592    *
593    *
594    * <pre>
595    * Configuration of barge-in behavior during the streaming of input audio.
596    * </pre>
597    *
598    * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
599    *
600    * @return The bargeInConfig.
601    */
602   @java.lang.Override
getBargeInConfig()603   public com.google.cloud.dialogflow.v2beta1.BargeInConfig getBargeInConfig() {
604     return bargeInConfig_ == null
605         ? com.google.cloud.dialogflow.v2beta1.BargeInConfig.getDefaultInstance()
606         : bargeInConfig_;
607   }
608   /**
609    *
610    *
611    * <pre>
612    * Configuration of barge-in behavior during the streaming of input audio.
613    * </pre>
614    *
615    * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
616    */
617   @java.lang.Override
getBargeInConfigOrBuilder()618   public com.google.cloud.dialogflow.v2beta1.BargeInConfigOrBuilder getBargeInConfigOrBuilder() {
619     return bargeInConfig_ == null
620         ? com.google.cloud.dialogflow.v2beta1.BargeInConfig.getDefaultInstance()
621         : bargeInConfig_;
622   }
623 
624   public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 17;
625   private boolean enableAutomaticPunctuation_ = false;
626   /**
627    *
628    *
629    * <pre>
630    * Enable automatic punctuation option at the speech backend.
631    * </pre>
632    *
633    * <code>bool enable_automatic_punctuation = 17;</code>
634    *
635    * @return The enableAutomaticPunctuation.
636    */
637   @java.lang.Override
getEnableAutomaticPunctuation()638   public boolean getEnableAutomaticPunctuation() {
639     return enableAutomaticPunctuation_;
640   }
641 
642   private byte memoizedIsInitialized = -1;
643 
644   @java.lang.Override
isInitialized()645   public final boolean isInitialized() {
646     byte isInitialized = memoizedIsInitialized;
647     if (isInitialized == 1) return true;
648     if (isInitialized == 0) return false;
649 
650     memoizedIsInitialized = 1;
651     return true;
652   }
653 
654   @java.lang.Override
writeTo(com.google.protobuf.CodedOutputStream output)655   public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
656     if (audioEncoding_
657         != com.google.cloud.dialogflow.v2beta1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED
658             .getNumber()) {
659       output.writeEnum(1, audioEncoding_);
660     }
661     if (sampleRateHertz_ != 0) {
662       output.writeInt32(2, sampleRateHertz_);
663     }
664     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
665       com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_);
666     }
667     for (int i = 0; i < phraseHints_.size(); i++) {
668       com.google.protobuf.GeneratedMessageV3.writeString(output, 4, phraseHints_.getRaw(i));
669     }
670     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
671       com.google.protobuf.GeneratedMessageV3.writeString(output, 7, model_);
672     }
673     if (singleUtterance_ != false) {
674       output.writeBool(8, singleUtterance_);
675     }
676     if (modelVariant_
677         != com.google.cloud.dialogflow.v2beta1.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED
678             .getNumber()) {
679       output.writeEnum(10, modelVariant_);
680     }
681     for (int i = 0; i < speechContexts_.size(); i++) {
682       output.writeMessage(11, speechContexts_.get(i));
683     }
684     if (enableWordInfo_ != false) {
685       output.writeBool(13, enableWordInfo_);
686     }
687     if (disableNoSpeechRecognizedEvent_ != false) {
688       output.writeBool(14, disableNoSpeechRecognizedEvent_);
689     }
690     if (bargeInConfig_ != null) {
691       output.writeMessage(15, getBargeInConfig());
692     }
693     if (enableAutomaticPunctuation_ != false) {
694       output.writeBool(17, enableAutomaticPunctuation_);
695     }
696     getUnknownFields().writeTo(output);
697   }
698 
699   @java.lang.Override
getSerializedSize()700   public int getSerializedSize() {
701     int size = memoizedSize;
702     if (size != -1) return size;
703 
704     size = 0;
705     if (audioEncoding_
706         != com.google.cloud.dialogflow.v2beta1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED
707             .getNumber()) {
708       size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, audioEncoding_);
709     }
710     if (sampleRateHertz_ != 0) {
711       size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_);
712     }
713     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
714       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_);
715     }
716     {
717       int dataSize = 0;
718       for (int i = 0; i < phraseHints_.size(); i++) {
719         dataSize += computeStringSizeNoTag(phraseHints_.getRaw(i));
720       }
721       size += dataSize;
722       size += 1 * getPhraseHintsList().size();
723     }
724     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
725       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, model_);
726     }
727     if (singleUtterance_ != false) {
728       size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, singleUtterance_);
729     }
730     if (modelVariant_
731         != com.google.cloud.dialogflow.v2beta1.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED
732             .getNumber()) {
733       size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, modelVariant_);
734     }
735     for (int i = 0; i < speechContexts_.size(); i++) {
736       size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, speechContexts_.get(i));
737     }
738     if (enableWordInfo_ != false) {
739       size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, enableWordInfo_);
740     }
741     if (disableNoSpeechRecognizedEvent_ != false) {
742       size +=
743           com.google.protobuf.CodedOutputStream.computeBoolSize(
744               14, disableNoSpeechRecognizedEvent_);
745     }
746     if (bargeInConfig_ != null) {
747       size += com.google.protobuf.CodedOutputStream.computeMessageSize(15, getBargeInConfig());
748     }
749     if (enableAutomaticPunctuation_ != false) {
750       size +=
751           com.google.protobuf.CodedOutputStream.computeBoolSize(17, enableAutomaticPunctuation_);
752     }
753     size += getUnknownFields().getSerializedSize();
754     memoizedSize = size;
755     return size;
756   }
757 
758   @java.lang.Override
equals(final java.lang.Object obj)759   public boolean equals(final java.lang.Object obj) {
760     if (obj == this) {
761       return true;
762     }
763     if (!(obj instanceof com.google.cloud.dialogflow.v2beta1.InputAudioConfig)) {
764       return super.equals(obj);
765     }
766     com.google.cloud.dialogflow.v2beta1.InputAudioConfig other =
767         (com.google.cloud.dialogflow.v2beta1.InputAudioConfig) obj;
768 
769     if (audioEncoding_ != other.audioEncoding_) return false;
770     if (getSampleRateHertz() != other.getSampleRateHertz()) return false;
771     if (!getLanguageCode().equals(other.getLanguageCode())) return false;
772     if (getEnableWordInfo() != other.getEnableWordInfo()) return false;
773     if (!getPhraseHintsList().equals(other.getPhraseHintsList())) return false;
774     if (!getSpeechContextsList().equals(other.getSpeechContextsList())) return false;
775     if (!getModel().equals(other.getModel())) return false;
776     if (modelVariant_ != other.modelVariant_) return false;
777     if (getSingleUtterance() != other.getSingleUtterance()) return false;
778     if (getDisableNoSpeechRecognizedEvent() != other.getDisableNoSpeechRecognizedEvent())
779       return false;
780     if (hasBargeInConfig() != other.hasBargeInConfig()) return false;
781     if (hasBargeInConfig()) {
782       if (!getBargeInConfig().equals(other.getBargeInConfig())) return false;
783     }
784     if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false;
785     if (!getUnknownFields().equals(other.getUnknownFields())) return false;
786     return true;
787   }
788 
789   @java.lang.Override
hashCode()790   public int hashCode() {
791     if (memoizedHashCode != 0) {
792       return memoizedHashCode;
793     }
794     int hash = 41;
795     hash = (19 * hash) + getDescriptor().hashCode();
796     hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER;
797     hash = (53 * hash) + audioEncoding_;
798     hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER;
799     hash = (53 * hash) + getSampleRateHertz();
800     hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
801     hash = (53 * hash) + getLanguageCode().hashCode();
802     hash = (37 * hash) + ENABLE_WORD_INFO_FIELD_NUMBER;
803     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordInfo());
804     if (getPhraseHintsCount() > 0) {
805       hash = (37 * hash) + PHRASE_HINTS_FIELD_NUMBER;
806       hash = (53 * hash) + getPhraseHintsList().hashCode();
807     }
808     if (getSpeechContextsCount() > 0) {
809       hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER;
810       hash = (53 * hash) + getSpeechContextsList().hashCode();
811     }
812     hash = (37 * hash) + MODEL_FIELD_NUMBER;
813     hash = (53 * hash) + getModel().hashCode();
814     hash = (37 * hash) + MODEL_VARIANT_FIELD_NUMBER;
815     hash = (53 * hash) + modelVariant_;
816     hash = (37 * hash) + SINGLE_UTTERANCE_FIELD_NUMBER;
817     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSingleUtterance());
818     hash = (37 * hash) + DISABLE_NO_SPEECH_RECOGNIZED_EVENT_FIELD_NUMBER;
819     hash =
820         (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDisableNoSpeechRecognizedEvent());
821     if (hasBargeInConfig()) {
822       hash = (37 * hash) + BARGE_IN_CONFIG_FIELD_NUMBER;
823       hash = (53 * hash) + getBargeInConfig().hashCode();
824     }
825     hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER;
826     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation());
827     hash = (29 * hash) + getUnknownFields().hashCode();
828     memoizedHashCode = hash;
829     return hash;
830   }
831 
parseFrom( java.nio.ByteBuffer data)832   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
833       java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
834     return PARSER.parseFrom(data);
835   }
836 
parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)837   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
838       java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
839       throws com.google.protobuf.InvalidProtocolBufferException {
840     return PARSER.parseFrom(data, extensionRegistry);
841   }
842 
parseFrom( com.google.protobuf.ByteString data)843   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
844       com.google.protobuf.ByteString data)
845       throws com.google.protobuf.InvalidProtocolBufferException {
846     return PARSER.parseFrom(data);
847   }
848 
parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)849   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
850       com.google.protobuf.ByteString data,
851       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
852       throws com.google.protobuf.InvalidProtocolBufferException {
853     return PARSER.parseFrom(data, extensionRegistry);
854   }
855 
parseFrom(byte[] data)856   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(byte[] data)
857       throws com.google.protobuf.InvalidProtocolBufferException {
858     return PARSER.parseFrom(data);
859   }
860 
parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)861   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
862       byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
863       throws com.google.protobuf.InvalidProtocolBufferException {
864     return PARSER.parseFrom(data, extensionRegistry);
865   }
866 
parseFrom( java.io.InputStream input)867   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
868       java.io.InputStream input) throws java.io.IOException {
869     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
870   }
871 
parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)872   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
873       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
874       throws java.io.IOException {
875     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
876         PARSER, input, extensionRegistry);
877   }
878 
parseDelimitedFrom( java.io.InputStream input)879   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseDelimitedFrom(
880       java.io.InputStream input) throws java.io.IOException {
881     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
882   }
883 
parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)884   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseDelimitedFrom(
885       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
886       throws java.io.IOException {
887     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
888         PARSER, input, extensionRegistry);
889   }
890 
parseFrom( com.google.protobuf.CodedInputStream input)891   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
892       com.google.protobuf.CodedInputStream input) throws java.io.IOException {
893     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
894   }
895 
parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)896   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig parseFrom(
897       com.google.protobuf.CodedInputStream input,
898       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
899       throws java.io.IOException {
900     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
901         PARSER, input, extensionRegistry);
902   }
903 
904   @java.lang.Override
newBuilderForType()905   public Builder newBuilderForType() {
906     return newBuilder();
907   }
908 
newBuilder()909   public static Builder newBuilder() {
910     return DEFAULT_INSTANCE.toBuilder();
911   }
912 
newBuilder(com.google.cloud.dialogflow.v2beta1.InputAudioConfig prototype)913   public static Builder newBuilder(com.google.cloud.dialogflow.v2beta1.InputAudioConfig prototype) {
914     return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
915   }
916 
917   @java.lang.Override
toBuilder()918   public Builder toBuilder() {
919     return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
920   }
921 
922   @java.lang.Override
newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)923   protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
924     Builder builder = new Builder(parent);
925     return builder;
926   }
927   /**
928    *
929    *
930    * <pre>
931    * Instructs the speech recognizer on how to process the audio content.
932    * </pre>
933    *
934    * Protobuf type {@code google.cloud.dialogflow.v2beta1.InputAudioConfig}
935    */
936   public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
937       implements
938       // @@protoc_insertion_point(builder_implements:google.cloud.dialogflow.v2beta1.InputAudioConfig)
939       com.google.cloud.dialogflow.v2beta1.InputAudioConfigOrBuilder {
getDescriptor()940     public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
941       return com.google.cloud.dialogflow.v2beta1.AudioConfigProto
942           .internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_descriptor;
943     }
944 
945     @java.lang.Override
946     protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()947         internalGetFieldAccessorTable() {
948       return com.google.cloud.dialogflow.v2beta1.AudioConfigProto
949           .internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_fieldAccessorTable
950           .ensureFieldAccessorsInitialized(
951               com.google.cloud.dialogflow.v2beta1.InputAudioConfig.class,
952               com.google.cloud.dialogflow.v2beta1.InputAudioConfig.Builder.class);
953     }
954 
955     // Construct using com.google.cloud.dialogflow.v2beta1.InputAudioConfig.newBuilder()
Builder()956     private Builder() {}
957 
Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)958     private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
959       super(parent);
960     }
961 
962     @java.lang.Override
clear()963     public Builder clear() {
964       super.clear();
965       bitField0_ = 0;
966       audioEncoding_ = 0;
967       sampleRateHertz_ = 0;
968       languageCode_ = "";
969       enableWordInfo_ = false;
970       phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
971       bitField0_ = (bitField0_ & ~0x00000010);
972       if (speechContextsBuilder_ == null) {
973         speechContexts_ = java.util.Collections.emptyList();
974       } else {
975         speechContexts_ = null;
976         speechContextsBuilder_.clear();
977       }
978       bitField0_ = (bitField0_ & ~0x00000020);
979       model_ = "";
980       modelVariant_ = 0;
981       singleUtterance_ = false;
982       disableNoSpeechRecognizedEvent_ = false;
983       bargeInConfig_ = null;
984       if (bargeInConfigBuilder_ != null) {
985         bargeInConfigBuilder_.dispose();
986         bargeInConfigBuilder_ = null;
987       }
988       enableAutomaticPunctuation_ = false;
989       return this;
990     }
991 
992     @java.lang.Override
getDescriptorForType()993     public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
994       return com.google.cloud.dialogflow.v2beta1.AudioConfigProto
995           .internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_descriptor;
996     }
997 
998     @java.lang.Override
getDefaultInstanceForType()999     public com.google.cloud.dialogflow.v2beta1.InputAudioConfig getDefaultInstanceForType() {
1000       return com.google.cloud.dialogflow.v2beta1.InputAudioConfig.getDefaultInstance();
1001     }
1002 
1003     @java.lang.Override
build()1004     public com.google.cloud.dialogflow.v2beta1.InputAudioConfig build() {
1005       com.google.cloud.dialogflow.v2beta1.InputAudioConfig result = buildPartial();
1006       if (!result.isInitialized()) {
1007         throw newUninitializedMessageException(result);
1008       }
1009       return result;
1010     }
1011 
1012     @java.lang.Override
buildPartial()1013     public com.google.cloud.dialogflow.v2beta1.InputAudioConfig buildPartial() {
1014       com.google.cloud.dialogflow.v2beta1.InputAudioConfig result =
1015           new com.google.cloud.dialogflow.v2beta1.InputAudioConfig(this);
1016       buildPartialRepeatedFields(result);
1017       if (bitField0_ != 0) {
1018         buildPartial0(result);
1019       }
1020       onBuilt();
1021       return result;
1022     }
1023 
buildPartialRepeatedFields( com.google.cloud.dialogflow.v2beta1.InputAudioConfig result)1024     private void buildPartialRepeatedFields(
1025         com.google.cloud.dialogflow.v2beta1.InputAudioConfig result) {
1026       if (((bitField0_ & 0x00000010) != 0)) {
1027         phraseHints_ = phraseHints_.getUnmodifiableView();
1028         bitField0_ = (bitField0_ & ~0x00000010);
1029       }
1030       result.phraseHints_ = phraseHints_;
1031       if (speechContextsBuilder_ == null) {
1032         if (((bitField0_ & 0x00000020) != 0)) {
1033           speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_);
1034           bitField0_ = (bitField0_ & ~0x00000020);
1035         }
1036         result.speechContexts_ = speechContexts_;
1037       } else {
1038         result.speechContexts_ = speechContextsBuilder_.build();
1039       }
1040     }
1041 
buildPartial0(com.google.cloud.dialogflow.v2beta1.InputAudioConfig result)1042     private void buildPartial0(com.google.cloud.dialogflow.v2beta1.InputAudioConfig result) {
1043       int from_bitField0_ = bitField0_;
1044       if (((from_bitField0_ & 0x00000001) != 0)) {
1045         result.audioEncoding_ = audioEncoding_;
1046       }
1047       if (((from_bitField0_ & 0x00000002) != 0)) {
1048         result.sampleRateHertz_ = sampleRateHertz_;
1049       }
1050       if (((from_bitField0_ & 0x00000004) != 0)) {
1051         result.languageCode_ = languageCode_;
1052       }
1053       if (((from_bitField0_ & 0x00000008) != 0)) {
1054         result.enableWordInfo_ = enableWordInfo_;
1055       }
1056       if (((from_bitField0_ & 0x00000040) != 0)) {
1057         result.model_ = model_;
1058       }
1059       if (((from_bitField0_ & 0x00000080) != 0)) {
1060         result.modelVariant_ = modelVariant_;
1061       }
1062       if (((from_bitField0_ & 0x00000100) != 0)) {
1063         result.singleUtterance_ = singleUtterance_;
1064       }
1065       if (((from_bitField0_ & 0x00000200) != 0)) {
1066         result.disableNoSpeechRecognizedEvent_ = disableNoSpeechRecognizedEvent_;
1067       }
1068       if (((from_bitField0_ & 0x00000400) != 0)) {
1069         result.bargeInConfig_ =
1070             bargeInConfigBuilder_ == null ? bargeInConfig_ : bargeInConfigBuilder_.build();
1071       }
1072       if (((from_bitField0_ & 0x00000800) != 0)) {
1073         result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_;
1074       }
1075     }
1076 
1077     @java.lang.Override
clone()1078     public Builder clone() {
1079       return super.clone();
1080     }
1081 
1082     @java.lang.Override
setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1083     public Builder setField(
1084         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1085       return super.setField(field, value);
1086     }
1087 
1088     @java.lang.Override
clearField(com.google.protobuf.Descriptors.FieldDescriptor field)1089     public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
1090       return super.clearField(field);
1091     }
1092 
1093     @java.lang.Override
clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)1094     public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
1095       return super.clearOneof(oneof);
1096     }
1097 
1098     @java.lang.Override
setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value)1099     public Builder setRepeatedField(
1100         com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
1101       return super.setRepeatedField(field, index, value);
1102     }
1103 
1104     @java.lang.Override
addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1105     public Builder addRepeatedField(
1106         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1107       return super.addRepeatedField(field, value);
1108     }
1109 
1110     @java.lang.Override
mergeFrom(com.google.protobuf.Message other)1111     public Builder mergeFrom(com.google.protobuf.Message other) {
1112       if (other instanceof com.google.cloud.dialogflow.v2beta1.InputAudioConfig) {
1113         return mergeFrom((com.google.cloud.dialogflow.v2beta1.InputAudioConfig) other);
1114       } else {
1115         super.mergeFrom(other);
1116         return this;
1117       }
1118     }
1119 
mergeFrom(com.google.cloud.dialogflow.v2beta1.InputAudioConfig other)1120     public Builder mergeFrom(com.google.cloud.dialogflow.v2beta1.InputAudioConfig other) {
1121       if (other == com.google.cloud.dialogflow.v2beta1.InputAudioConfig.getDefaultInstance())
1122         return this;
1123       if (other.audioEncoding_ != 0) {
1124         setAudioEncodingValue(other.getAudioEncodingValue());
1125       }
1126       if (other.getSampleRateHertz() != 0) {
1127         setSampleRateHertz(other.getSampleRateHertz());
1128       }
1129       if (!other.getLanguageCode().isEmpty()) {
1130         languageCode_ = other.languageCode_;
1131         bitField0_ |= 0x00000004;
1132         onChanged();
1133       }
1134       if (other.getEnableWordInfo() != false) {
1135         setEnableWordInfo(other.getEnableWordInfo());
1136       }
1137       if (!other.phraseHints_.isEmpty()) {
1138         if (phraseHints_.isEmpty()) {
1139           phraseHints_ = other.phraseHints_;
1140           bitField0_ = (bitField0_ & ~0x00000010);
1141         } else {
1142           ensurePhraseHintsIsMutable();
1143           phraseHints_.addAll(other.phraseHints_);
1144         }
1145         onChanged();
1146       }
1147       if (speechContextsBuilder_ == null) {
1148         if (!other.speechContexts_.isEmpty()) {
1149           if (speechContexts_.isEmpty()) {
1150             speechContexts_ = other.speechContexts_;
1151             bitField0_ = (bitField0_ & ~0x00000020);
1152           } else {
1153             ensureSpeechContextsIsMutable();
1154             speechContexts_.addAll(other.speechContexts_);
1155           }
1156           onChanged();
1157         }
1158       } else {
1159         if (!other.speechContexts_.isEmpty()) {
1160           if (speechContextsBuilder_.isEmpty()) {
1161             speechContextsBuilder_.dispose();
1162             speechContextsBuilder_ = null;
1163             speechContexts_ = other.speechContexts_;
1164             bitField0_ = (bitField0_ & ~0x00000020);
1165             speechContextsBuilder_ =
1166                 com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
1167                     ? getSpeechContextsFieldBuilder()
1168                     : null;
1169           } else {
1170             speechContextsBuilder_.addAllMessages(other.speechContexts_);
1171           }
1172         }
1173       }
1174       if (!other.getModel().isEmpty()) {
1175         model_ = other.model_;
1176         bitField0_ |= 0x00000040;
1177         onChanged();
1178       }
1179       if (other.modelVariant_ != 0) {
1180         setModelVariantValue(other.getModelVariantValue());
1181       }
1182       if (other.getSingleUtterance() != false) {
1183         setSingleUtterance(other.getSingleUtterance());
1184       }
1185       if (other.getDisableNoSpeechRecognizedEvent() != false) {
1186         setDisableNoSpeechRecognizedEvent(other.getDisableNoSpeechRecognizedEvent());
1187       }
1188       if (other.hasBargeInConfig()) {
1189         mergeBargeInConfig(other.getBargeInConfig());
1190       }
1191       if (other.getEnableAutomaticPunctuation() != false) {
1192         setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation());
1193       }
1194       this.mergeUnknownFields(other.getUnknownFields());
1195       onChanged();
1196       return this;
1197     }
1198 
1199     @java.lang.Override
isInitialized()1200     public final boolean isInitialized() {
1201       return true;
1202     }
1203 
1204     @java.lang.Override
mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1205     public Builder mergeFrom(
1206         com.google.protobuf.CodedInputStream input,
1207         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1208         throws java.io.IOException {
1209       if (extensionRegistry == null) {
1210         throw new java.lang.NullPointerException();
1211       }
1212       try {
1213         boolean done = false;
1214         while (!done) {
1215           int tag = input.readTag();
1216           switch (tag) {
1217             case 0:
1218               done = true;
1219               break;
1220             case 8:
1221               {
1222                 audioEncoding_ = input.readEnum();
1223                 bitField0_ |= 0x00000001;
1224                 break;
1225               } // case 8
1226             case 16:
1227               {
1228                 sampleRateHertz_ = input.readInt32();
1229                 bitField0_ |= 0x00000002;
1230                 break;
1231               } // case 16
1232             case 26:
1233               {
1234                 languageCode_ = input.readStringRequireUtf8();
1235                 bitField0_ |= 0x00000004;
1236                 break;
1237               } // case 26
1238             case 34:
1239               {
1240                 java.lang.String s = input.readStringRequireUtf8();
1241                 ensurePhraseHintsIsMutable();
1242                 phraseHints_.add(s);
1243                 break;
1244               } // case 34
1245             case 58:
1246               {
1247                 model_ = input.readStringRequireUtf8();
1248                 bitField0_ |= 0x00000040;
1249                 break;
1250               } // case 58
1251             case 64:
1252               {
1253                 singleUtterance_ = input.readBool();
1254                 bitField0_ |= 0x00000100;
1255                 break;
1256               } // case 64
1257             case 80:
1258               {
1259                 modelVariant_ = input.readEnum();
1260                 bitField0_ |= 0x00000080;
1261                 break;
1262               } // case 80
1263             case 90:
1264               {
1265                 com.google.cloud.dialogflow.v2beta1.SpeechContext m =
1266                     input.readMessage(
1267                         com.google.cloud.dialogflow.v2beta1.SpeechContext.parser(),
1268                         extensionRegistry);
1269                 if (speechContextsBuilder_ == null) {
1270                   ensureSpeechContextsIsMutable();
1271                   speechContexts_.add(m);
1272                 } else {
1273                   speechContextsBuilder_.addMessage(m);
1274                 }
1275                 break;
1276               } // case 90
1277             case 104:
1278               {
1279                 enableWordInfo_ = input.readBool();
1280                 bitField0_ |= 0x00000008;
1281                 break;
1282               } // case 104
1283             case 112:
1284               {
1285                 disableNoSpeechRecognizedEvent_ = input.readBool();
1286                 bitField0_ |= 0x00000200;
1287                 break;
1288               } // case 112
1289             case 122:
1290               {
1291                 input.readMessage(getBargeInConfigFieldBuilder().getBuilder(), extensionRegistry);
1292                 bitField0_ |= 0x00000400;
1293                 break;
1294               } // case 122
1295             case 136:
1296               {
1297                 enableAutomaticPunctuation_ = input.readBool();
1298                 bitField0_ |= 0x00000800;
1299                 break;
1300               } // case 136
1301             default:
1302               {
1303                 if (!super.parseUnknownField(input, extensionRegistry, tag)) {
1304                   done = true; // was an endgroup tag
1305                 }
1306                 break;
1307               } // default:
1308           } // switch (tag)
1309         } // while (!done)
1310       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1311         throw e.unwrapIOException();
1312       } finally {
1313         onChanged();
1314       } // finally
1315       return this;
1316     }
1317 
1318     private int bitField0_;
1319 
1320     private int audioEncoding_ = 0;
1321     /**
1322      *
1323      *
1324      * <pre>
1325      * Required. Audio encoding of the audio content to process.
1326      * </pre>
1327      *
1328      * <code>.google.cloud.dialogflow.v2beta1.AudioEncoding audio_encoding = 1;</code>
1329      *
1330      * @return The enum numeric value on the wire for audioEncoding.
1331      */
1332     @java.lang.Override
getAudioEncodingValue()1333     public int getAudioEncodingValue() {
1334       return audioEncoding_;
1335     }
1336     /**
1337      *
1338      *
1339      * <pre>
1340      * Required. Audio encoding of the audio content to process.
1341      * </pre>
1342      *
1343      * <code>.google.cloud.dialogflow.v2beta1.AudioEncoding audio_encoding = 1;</code>
1344      *
1345      * @param value The enum numeric value on the wire for audioEncoding to set.
1346      * @return This builder for chaining.
1347      */
setAudioEncodingValue(int value)1348     public Builder setAudioEncodingValue(int value) {
1349       audioEncoding_ = value;
1350       bitField0_ |= 0x00000001;
1351       onChanged();
1352       return this;
1353     }
1354     /**
1355      *
1356      *
1357      * <pre>
1358      * Required. Audio encoding of the audio content to process.
1359      * </pre>
1360      *
1361      * <code>.google.cloud.dialogflow.v2beta1.AudioEncoding audio_encoding = 1;</code>
1362      *
1363      * @return The audioEncoding.
1364      */
1365     @java.lang.Override
getAudioEncoding()1366     public com.google.cloud.dialogflow.v2beta1.AudioEncoding getAudioEncoding() {
1367       com.google.cloud.dialogflow.v2beta1.AudioEncoding result =
1368           com.google.cloud.dialogflow.v2beta1.AudioEncoding.forNumber(audioEncoding_);
1369       return result == null
1370           ? com.google.cloud.dialogflow.v2beta1.AudioEncoding.UNRECOGNIZED
1371           : result;
1372     }
1373     /**
1374      *
1375      *
1376      * <pre>
1377      * Required. Audio encoding of the audio content to process.
1378      * </pre>
1379      *
1380      * <code>.google.cloud.dialogflow.v2beta1.AudioEncoding audio_encoding = 1;</code>
1381      *
1382      * @param value The audioEncoding to set.
1383      * @return This builder for chaining.
1384      */
setAudioEncoding(com.google.cloud.dialogflow.v2beta1.AudioEncoding value)1385     public Builder setAudioEncoding(com.google.cloud.dialogflow.v2beta1.AudioEncoding value) {
1386       if (value == null) {
1387         throw new NullPointerException();
1388       }
1389       bitField0_ |= 0x00000001;
1390       audioEncoding_ = value.getNumber();
1391       onChanged();
1392       return this;
1393     }
1394     /**
1395      *
1396      *
1397      * <pre>
1398      * Required. Audio encoding of the audio content to process.
1399      * </pre>
1400      *
1401      * <code>.google.cloud.dialogflow.v2beta1.AudioEncoding audio_encoding = 1;</code>
1402      *
1403      * @return This builder for chaining.
1404      */
clearAudioEncoding()1405     public Builder clearAudioEncoding() {
1406       bitField0_ = (bitField0_ & ~0x00000001);
1407       audioEncoding_ = 0;
1408       onChanged();
1409       return this;
1410     }
1411 
1412     private int sampleRateHertz_;
1413     /**
1414      *
1415      *
1416      * <pre>
1417      * Required. Sample rate (in Hertz) of the audio content sent in the query.
1418      * Refer to
1419      * [Cloud Speech API
1420      * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
1421      * more details.
1422      * </pre>
1423      *
1424      * <code>int32 sample_rate_hertz = 2;</code>
1425      *
1426      * @return The sampleRateHertz.
1427      */
1428     @java.lang.Override
getSampleRateHertz()1429     public int getSampleRateHertz() {
1430       return sampleRateHertz_;
1431     }
1432     /**
1433      *
1434      *
1435      * <pre>
1436      * Required. Sample rate (in Hertz) of the audio content sent in the query.
1437      * Refer to
1438      * [Cloud Speech API
1439      * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
1440      * more details.
1441      * </pre>
1442      *
1443      * <code>int32 sample_rate_hertz = 2;</code>
1444      *
1445      * @param value The sampleRateHertz to set.
1446      * @return This builder for chaining.
1447      */
setSampleRateHertz(int value)1448     public Builder setSampleRateHertz(int value) {
1449 
1450       sampleRateHertz_ = value;
1451       bitField0_ |= 0x00000002;
1452       onChanged();
1453       return this;
1454     }
1455     /**
1456      *
1457      *
1458      * <pre>
1459      * Required. Sample rate (in Hertz) of the audio content sent in the query.
1460      * Refer to
1461      * [Cloud Speech API
1462      * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
1463      * more details.
1464      * </pre>
1465      *
1466      * <code>int32 sample_rate_hertz = 2;</code>
1467      *
1468      * @return This builder for chaining.
1469      */
clearSampleRateHertz()1470     public Builder clearSampleRateHertz() {
1471       bitField0_ = (bitField0_ & ~0x00000002);
1472       sampleRateHertz_ = 0;
1473       onChanged();
1474       return this;
1475     }
1476 
1477     private java.lang.Object languageCode_ = "";
1478     /**
1479      *
1480      *
1481      * <pre>
1482      * Required. The language of the supplied audio. Dialogflow does not do
1483      * translations. See [Language
1484      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1485      * for a list of the currently supported language codes. Note that queries in
1486      * the same session do not necessarily need to specify the same language.
1487      * </pre>
1488      *
1489      * <code>string language_code = 3;</code>
1490      *
1491      * @return The languageCode.
1492      */
getLanguageCode()1493     public java.lang.String getLanguageCode() {
1494       java.lang.Object ref = languageCode_;
1495       if (!(ref instanceof java.lang.String)) {
1496         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
1497         java.lang.String s = bs.toStringUtf8();
1498         languageCode_ = s;
1499         return s;
1500       } else {
1501         return (java.lang.String) ref;
1502       }
1503     }
1504     /**
1505      *
1506      *
1507      * <pre>
1508      * Required. The language of the supplied audio. Dialogflow does not do
1509      * translations. See [Language
1510      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1511      * for a list of the currently supported language codes. Note that queries in
1512      * the same session do not necessarily need to specify the same language.
1513      * </pre>
1514      *
1515      * <code>string language_code = 3;</code>
1516      *
1517      * @return The bytes for languageCode.
1518      */
getLanguageCodeBytes()1519     public com.google.protobuf.ByteString getLanguageCodeBytes() {
1520       java.lang.Object ref = languageCode_;
1521       if (ref instanceof String) {
1522         com.google.protobuf.ByteString b =
1523             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
1524         languageCode_ = b;
1525         return b;
1526       } else {
1527         return (com.google.protobuf.ByteString) ref;
1528       }
1529     }
1530     /**
1531      *
1532      *
1533      * <pre>
1534      * Required. The language of the supplied audio. Dialogflow does not do
1535      * translations. See [Language
1536      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1537      * for a list of the currently supported language codes. Note that queries in
1538      * the same session do not necessarily need to specify the same language.
1539      * </pre>
1540      *
1541      * <code>string language_code = 3;</code>
1542      *
1543      * @param value The languageCode to set.
1544      * @return This builder for chaining.
1545      */
setLanguageCode(java.lang.String value)1546     public Builder setLanguageCode(java.lang.String value) {
1547       if (value == null) {
1548         throw new NullPointerException();
1549       }
1550       languageCode_ = value;
1551       bitField0_ |= 0x00000004;
1552       onChanged();
1553       return this;
1554     }
1555     /**
1556      *
1557      *
1558      * <pre>
1559      * Required. The language of the supplied audio. Dialogflow does not do
1560      * translations. See [Language
1561      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1562      * for a list of the currently supported language codes. Note that queries in
1563      * the same session do not necessarily need to specify the same language.
1564      * </pre>
1565      *
1566      * <code>string language_code = 3;</code>
1567      *
1568      * @return This builder for chaining.
1569      */
clearLanguageCode()1570     public Builder clearLanguageCode() {
1571       languageCode_ = getDefaultInstance().getLanguageCode();
1572       bitField0_ = (bitField0_ & ~0x00000004);
1573       onChanged();
1574       return this;
1575     }
1576     /**
1577      *
1578      *
1579      * <pre>
1580      * Required. The language of the supplied audio. Dialogflow does not do
1581      * translations. See [Language
1582      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1583      * for a list of the currently supported language codes. Note that queries in
1584      * the same session do not necessarily need to specify the same language.
1585      * </pre>
1586      *
1587      * <code>string language_code = 3;</code>
1588      *
1589      * @param value The bytes for languageCode to set.
1590      * @return This builder for chaining.
1591      */
setLanguageCodeBytes(com.google.protobuf.ByteString value)1592     public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) {
1593       if (value == null) {
1594         throw new NullPointerException();
1595       }
1596       checkByteStringIsUtf8(value);
1597       languageCode_ = value;
1598       bitField0_ |= 0x00000004;
1599       onChanged();
1600       return this;
1601     }
1602 
1603     private boolean enableWordInfo_;
1604     /**
1605      *
1606      *
1607      * <pre>
1608      * If `true`, Dialogflow returns
1609      * [SpeechWordInfo][google.cloud.dialogflow.v2beta1.SpeechWordInfo] in
1610      * [StreamingRecognitionResult][google.cloud.dialogflow.v2beta1.StreamingRecognitionResult]
1611      * with information about the recognized speech words, e.g. start and end time
1612      * offsets. If false or unspecified, Speech doesn't return any word-level
1613      * information.
1614      * </pre>
1615      *
1616      * <code>bool enable_word_info = 13;</code>
1617      *
1618      * @return The enableWordInfo.
1619      */
1620     @java.lang.Override
getEnableWordInfo()1621     public boolean getEnableWordInfo() {
1622       return enableWordInfo_;
1623     }
1624     /**
1625      *
1626      *
1627      * <pre>
1628      * If `true`, Dialogflow returns
1629      * [SpeechWordInfo][google.cloud.dialogflow.v2beta1.SpeechWordInfo] in
1630      * [StreamingRecognitionResult][google.cloud.dialogflow.v2beta1.StreamingRecognitionResult]
1631      * with information about the recognized speech words, e.g. start and end time
1632      * offsets. If false or unspecified, Speech doesn't return any word-level
1633      * information.
1634      * </pre>
1635      *
1636      * <code>bool enable_word_info = 13;</code>
1637      *
1638      * @param value The enableWordInfo to set.
1639      * @return This builder for chaining.
1640      */
setEnableWordInfo(boolean value)1641     public Builder setEnableWordInfo(boolean value) {
1642 
1643       enableWordInfo_ = value;
1644       bitField0_ |= 0x00000008;
1645       onChanged();
1646       return this;
1647     }
1648     /**
1649      *
1650      *
1651      * <pre>
1652      * If `true`, Dialogflow returns
1653      * [SpeechWordInfo][google.cloud.dialogflow.v2beta1.SpeechWordInfo] in
1654      * [StreamingRecognitionResult][google.cloud.dialogflow.v2beta1.StreamingRecognitionResult]
1655      * with information about the recognized speech words, e.g. start and end time
1656      * offsets. If false or unspecified, Speech doesn't return any word-level
1657      * information.
1658      * </pre>
1659      *
1660      * <code>bool enable_word_info = 13;</code>
1661      *
1662      * @return This builder for chaining.
1663      */
clearEnableWordInfo()1664     public Builder clearEnableWordInfo() {
1665       bitField0_ = (bitField0_ & ~0x00000008);
1666       enableWordInfo_ = false;
1667       onChanged();
1668       return this;
1669     }
1670 
1671     private com.google.protobuf.LazyStringList phraseHints_ =
1672         com.google.protobuf.LazyStringArrayList.EMPTY;
1673 
ensurePhraseHintsIsMutable()1674     private void ensurePhraseHintsIsMutable() {
1675       if (!((bitField0_ & 0x00000010) != 0)) {
1676         phraseHints_ = new com.google.protobuf.LazyStringArrayList(phraseHints_);
1677         bitField0_ |= 0x00000010;
1678       }
1679     }
1680     /**
1681      *
1682      *
1683      * <pre>
1684      * A list of strings containing words and phrases that the speech
1685      * recognizer should recognize with higher likelihood.
1686      * See [the Cloud Speech
1687      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1688      * for more details.
1689      * This field is deprecated. Please use [speech_contexts]() instead. If you
1690      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1691      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1692      * </pre>
1693      *
1694      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1695      *
1696      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1697      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1698      * @return A list containing the phraseHints.
1699      */
1700     @java.lang.Deprecated
getPhraseHintsList()1701     public com.google.protobuf.ProtocolStringList getPhraseHintsList() {
1702       return phraseHints_.getUnmodifiableView();
1703     }
1704     /**
1705      *
1706      *
1707      * <pre>
1708      * A list of strings containing words and phrases that the speech
1709      * recognizer should recognize with higher likelihood.
1710      * See [the Cloud Speech
1711      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1712      * for more details.
1713      * This field is deprecated. Please use [speech_contexts]() instead. If you
1714      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1715      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1716      * </pre>
1717      *
1718      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1719      *
1720      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1721      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1722      * @return The count of phraseHints.
1723      */
1724     @java.lang.Deprecated
getPhraseHintsCount()1725     public int getPhraseHintsCount() {
1726       return phraseHints_.size();
1727     }
1728     /**
1729      *
1730      *
1731      * <pre>
1732      * A list of strings containing words and phrases that the speech
1733      * recognizer should recognize with higher likelihood.
1734      * See [the Cloud Speech
1735      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1736      * for more details.
1737      * This field is deprecated. Please use [speech_contexts]() instead. If you
1738      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1739      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1740      * </pre>
1741      *
1742      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1743      *
1744      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1745      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1746      * @param index The index of the element to return.
1747      * @return The phraseHints at the given index.
1748      */
1749     @java.lang.Deprecated
getPhraseHints(int index)1750     public java.lang.String getPhraseHints(int index) {
1751       return phraseHints_.get(index);
1752     }
1753     /**
1754      *
1755      *
1756      * <pre>
1757      * A list of strings containing words and phrases that the speech
1758      * recognizer should recognize with higher likelihood.
1759      * See [the Cloud Speech
1760      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1761      * for more details.
1762      * This field is deprecated. Please use [speech_contexts]() instead. If you
1763      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1764      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1765      * </pre>
1766      *
1767      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1768      *
1769      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1770      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1771      * @param index The index of the value to return.
1772      * @return The bytes of the phraseHints at the given index.
1773      */
1774     @java.lang.Deprecated
getPhraseHintsBytes(int index)1775     public com.google.protobuf.ByteString getPhraseHintsBytes(int index) {
1776       return phraseHints_.getByteString(index);
1777     }
1778     /**
1779      *
1780      *
1781      * <pre>
1782      * A list of strings containing words and phrases that the speech
1783      * recognizer should recognize with higher likelihood.
1784      * See [the Cloud Speech
1785      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1786      * for more details.
1787      * This field is deprecated. Please use [speech_contexts]() instead. If you
1788      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1789      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1790      * </pre>
1791      *
1792      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1793      *
1794      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1795      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1796      * @param index The index to set the value at.
1797      * @param value The phraseHints to set.
1798      * @return This builder for chaining.
1799      */
1800     @java.lang.Deprecated
setPhraseHints(int index, java.lang.String value)1801     public Builder setPhraseHints(int index, java.lang.String value) {
1802       if (value == null) {
1803         throw new NullPointerException();
1804       }
1805       ensurePhraseHintsIsMutable();
1806       phraseHints_.set(index, value);
1807       onChanged();
1808       return this;
1809     }
1810     /**
1811      *
1812      *
1813      * <pre>
1814      * A list of strings containing words and phrases that the speech
1815      * recognizer should recognize with higher likelihood.
1816      * See [the Cloud Speech
1817      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1818      * for more details.
1819      * This field is deprecated. Please use [speech_contexts]() instead. If you
1820      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1821      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1822      * </pre>
1823      *
1824      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1825      *
1826      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1827      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1828      * @param value The phraseHints to add.
1829      * @return This builder for chaining.
1830      */
1831     @java.lang.Deprecated
addPhraseHints(java.lang.String value)1832     public Builder addPhraseHints(java.lang.String value) {
1833       if (value == null) {
1834         throw new NullPointerException();
1835       }
1836       ensurePhraseHintsIsMutable();
1837       phraseHints_.add(value);
1838       onChanged();
1839       return this;
1840     }
1841     /**
1842      *
1843      *
1844      * <pre>
1845      * A list of strings containing words and phrases that the speech
1846      * recognizer should recognize with higher likelihood.
1847      * See [the Cloud Speech
1848      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1849      * for more details.
1850      * This field is deprecated. Please use [speech_contexts]() instead. If you
1851      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1852      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1853      * </pre>
1854      *
1855      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1856      *
1857      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1858      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1859      * @param values The phraseHints to add.
1860      * @return This builder for chaining.
1861      */
1862     @java.lang.Deprecated
addAllPhraseHints(java.lang.Iterable<java.lang.String> values)1863     public Builder addAllPhraseHints(java.lang.Iterable<java.lang.String> values) {
1864       ensurePhraseHintsIsMutable();
1865       com.google.protobuf.AbstractMessageLite.Builder.addAll(values, phraseHints_);
1866       onChanged();
1867       return this;
1868     }
1869     /**
1870      *
1871      *
1872      * <pre>
1873      * A list of strings containing words and phrases that the speech
1874      * recognizer should recognize with higher likelihood.
1875      * See [the Cloud Speech
1876      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1877      * for more details.
1878      * This field is deprecated. Please use [speech_contexts]() instead. If you
1879      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1880      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1881      * </pre>
1882      *
1883      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1884      *
1885      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1886      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1887      * @return This builder for chaining.
1888      */
1889     @java.lang.Deprecated
clearPhraseHints()1890     public Builder clearPhraseHints() {
1891       phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
1892       bitField0_ = (bitField0_ & ~0x00000010);
1893       onChanged();
1894       return this;
1895     }
1896     /**
1897      *
1898      *
1899      * <pre>
1900      * A list of strings containing words and phrases that the speech
1901      * recognizer should recognize with higher likelihood.
1902      * See [the Cloud Speech
1903      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1904      * for more details.
1905      * This field is deprecated. Please use [speech_contexts]() instead. If you
1906      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1907      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1908      * </pre>
1909      *
1910      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1911      *
1912      * @deprecated google.cloud.dialogflow.v2beta1.InputAudioConfig.phrase_hints is deprecated. See
1913      *     google/cloud/dialogflow/v2beta1/audio_config.proto;l=266
1914      * @param value The bytes of the phraseHints to add.
1915      * @return This builder for chaining.
1916      */
1917     @java.lang.Deprecated
addPhraseHintsBytes(com.google.protobuf.ByteString value)1918     public Builder addPhraseHintsBytes(com.google.protobuf.ByteString value) {
1919       if (value == null) {
1920         throw new NullPointerException();
1921       }
1922       checkByteStringIsUtf8(value);
1923       ensurePhraseHintsIsMutable();
1924       phraseHints_.add(value);
1925       onChanged();
1926       return this;
1927     }
1928 
1929     private java.util.List<com.google.cloud.dialogflow.v2beta1.SpeechContext> speechContexts_ =
1930         java.util.Collections.emptyList();
1931 
ensureSpeechContextsIsMutable()1932     private void ensureSpeechContextsIsMutable() {
1933       if (!((bitField0_ & 0x00000020) != 0)) {
1934         speechContexts_ =
1935             new java.util.ArrayList<com.google.cloud.dialogflow.v2beta1.SpeechContext>(
1936                 speechContexts_);
1937         bitField0_ |= 0x00000020;
1938       }
1939     }
1940 
1941     private com.google.protobuf.RepeatedFieldBuilderV3<
1942             com.google.cloud.dialogflow.v2beta1.SpeechContext,
1943             com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder,
1944             com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>
1945         speechContextsBuilder_;
1946 
1947     /**
1948      *
1949      *
1950      * <pre>
1951      * Context information to assist speech recognition.
1952      * See [the Cloud Speech
1953      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1954      * for more details.
1955      * </pre>
1956      *
1957      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
1958      */
1959     public java.util.List<com.google.cloud.dialogflow.v2beta1.SpeechContext>
getSpeechContextsList()1960         getSpeechContextsList() {
1961       if (speechContextsBuilder_ == null) {
1962         return java.util.Collections.unmodifiableList(speechContexts_);
1963       } else {
1964         return speechContextsBuilder_.getMessageList();
1965       }
1966     }
1967     /**
1968      *
1969      *
1970      * <pre>
1971      * Context information to assist speech recognition.
1972      * See [the Cloud Speech
1973      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1974      * for more details.
1975      * </pre>
1976      *
1977      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
1978      */
getSpeechContextsCount()1979     public int getSpeechContextsCount() {
1980       if (speechContextsBuilder_ == null) {
1981         return speechContexts_.size();
1982       } else {
1983         return speechContextsBuilder_.getCount();
1984       }
1985     }
1986     /**
1987      *
1988      *
1989      * <pre>
1990      * Context information to assist speech recognition.
1991      * See [the Cloud Speech
1992      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1993      * for more details.
1994      * </pre>
1995      *
1996      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
1997      */
getSpeechContexts(int index)1998     public com.google.cloud.dialogflow.v2beta1.SpeechContext getSpeechContexts(int index) {
1999       if (speechContextsBuilder_ == null) {
2000         return speechContexts_.get(index);
2001       } else {
2002         return speechContextsBuilder_.getMessage(index);
2003       }
2004     }
2005     /**
2006      *
2007      *
2008      * <pre>
2009      * Context information to assist speech recognition.
2010      * See [the Cloud Speech
2011      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2012      * for more details.
2013      * </pre>
2014      *
2015      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2016      */
setSpeechContexts( int index, com.google.cloud.dialogflow.v2beta1.SpeechContext value)2017     public Builder setSpeechContexts(
2018         int index, com.google.cloud.dialogflow.v2beta1.SpeechContext value) {
2019       if (speechContextsBuilder_ == null) {
2020         if (value == null) {
2021           throw new NullPointerException();
2022         }
2023         ensureSpeechContextsIsMutable();
2024         speechContexts_.set(index, value);
2025         onChanged();
2026       } else {
2027         speechContextsBuilder_.setMessage(index, value);
2028       }
2029       return this;
2030     }
2031     /**
2032      *
2033      *
2034      * <pre>
2035      * Context information to assist speech recognition.
2036      * See [the Cloud Speech
2037      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2038      * for more details.
2039      * </pre>
2040      *
2041      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2042      */
setSpeechContexts( int index, com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue)2043     public Builder setSpeechContexts(
2044         int index, com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue) {
2045       if (speechContextsBuilder_ == null) {
2046         ensureSpeechContextsIsMutable();
2047         speechContexts_.set(index, builderForValue.build());
2048         onChanged();
2049       } else {
2050         speechContextsBuilder_.setMessage(index, builderForValue.build());
2051       }
2052       return this;
2053     }
2054     /**
2055      *
2056      *
2057      * <pre>
2058      * Context information to assist speech recognition.
2059      * See [the Cloud Speech
2060      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2061      * for more details.
2062      * </pre>
2063      *
2064      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2065      */
addSpeechContexts(com.google.cloud.dialogflow.v2beta1.SpeechContext value)2066     public Builder addSpeechContexts(com.google.cloud.dialogflow.v2beta1.SpeechContext value) {
2067       if (speechContextsBuilder_ == null) {
2068         if (value == null) {
2069           throw new NullPointerException();
2070         }
2071         ensureSpeechContextsIsMutable();
2072         speechContexts_.add(value);
2073         onChanged();
2074       } else {
2075         speechContextsBuilder_.addMessage(value);
2076       }
2077       return this;
2078     }
2079     /**
2080      *
2081      *
2082      * <pre>
2083      * Context information to assist speech recognition.
2084      * See [the Cloud Speech
2085      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2086      * for more details.
2087      * </pre>
2088      *
2089      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2090      */
addSpeechContexts( int index, com.google.cloud.dialogflow.v2beta1.SpeechContext value)2091     public Builder addSpeechContexts(
2092         int index, com.google.cloud.dialogflow.v2beta1.SpeechContext value) {
2093       if (speechContextsBuilder_ == null) {
2094         if (value == null) {
2095           throw new NullPointerException();
2096         }
2097         ensureSpeechContextsIsMutable();
2098         speechContexts_.add(index, value);
2099         onChanged();
2100       } else {
2101         speechContextsBuilder_.addMessage(index, value);
2102       }
2103       return this;
2104     }
2105     /**
2106      *
2107      *
2108      * <pre>
2109      * Context information to assist speech recognition.
2110      * See [the Cloud Speech
2111      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2112      * for more details.
2113      * </pre>
2114      *
2115      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2116      */
addSpeechContexts( com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue)2117     public Builder addSpeechContexts(
2118         com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue) {
2119       if (speechContextsBuilder_ == null) {
2120         ensureSpeechContextsIsMutable();
2121         speechContexts_.add(builderForValue.build());
2122         onChanged();
2123       } else {
2124         speechContextsBuilder_.addMessage(builderForValue.build());
2125       }
2126       return this;
2127     }
2128     /**
2129      *
2130      *
2131      * <pre>
2132      * Context information to assist speech recognition.
2133      * See [the Cloud Speech
2134      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2135      * for more details.
2136      * </pre>
2137      *
2138      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2139      */
addSpeechContexts( int index, com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue)2140     public Builder addSpeechContexts(
2141         int index, com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue) {
2142       if (speechContextsBuilder_ == null) {
2143         ensureSpeechContextsIsMutable();
2144         speechContexts_.add(index, builderForValue.build());
2145         onChanged();
2146       } else {
2147         speechContextsBuilder_.addMessage(index, builderForValue.build());
2148       }
2149       return this;
2150     }
2151     /**
2152      *
2153      *
2154      * <pre>
2155      * Context information to assist speech recognition.
2156      * See [the Cloud Speech
2157      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2158      * for more details.
2159      * </pre>
2160      *
2161      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2162      */
addAllSpeechContexts( java.lang.Iterable<? extends com.google.cloud.dialogflow.v2beta1.SpeechContext> values)2163     public Builder addAllSpeechContexts(
2164         java.lang.Iterable<? extends com.google.cloud.dialogflow.v2beta1.SpeechContext> values) {
2165       if (speechContextsBuilder_ == null) {
2166         ensureSpeechContextsIsMutable();
2167         com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_);
2168         onChanged();
2169       } else {
2170         speechContextsBuilder_.addAllMessages(values);
2171       }
2172       return this;
2173     }
2174     /**
2175      *
2176      *
2177      * <pre>
2178      * Context information to assist speech recognition.
2179      * See [the Cloud Speech
2180      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2181      * for more details.
2182      * </pre>
2183      *
2184      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2185      */
clearSpeechContexts()2186     public Builder clearSpeechContexts() {
2187       if (speechContextsBuilder_ == null) {
2188         speechContexts_ = java.util.Collections.emptyList();
2189         bitField0_ = (bitField0_ & ~0x00000020);
2190         onChanged();
2191       } else {
2192         speechContextsBuilder_.clear();
2193       }
2194       return this;
2195     }
2196     /**
2197      *
2198      *
2199      * <pre>
2200      * Context information to assist speech recognition.
2201      * See [the Cloud Speech
2202      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2203      * for more details.
2204      * </pre>
2205      *
2206      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2207      */
removeSpeechContexts(int index)2208     public Builder removeSpeechContexts(int index) {
2209       if (speechContextsBuilder_ == null) {
2210         ensureSpeechContextsIsMutable();
2211         speechContexts_.remove(index);
2212         onChanged();
2213       } else {
2214         speechContextsBuilder_.remove(index);
2215       }
2216       return this;
2217     }
2218     /**
2219      *
2220      *
2221      * <pre>
2222      * Context information to assist speech recognition.
2223      * See [the Cloud Speech
2224      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2225      * for more details.
2226      * </pre>
2227      *
2228      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2229      */
getSpeechContextsBuilder( int index)2230     public com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder getSpeechContextsBuilder(
2231         int index) {
2232       return getSpeechContextsFieldBuilder().getBuilder(index);
2233     }
2234     /**
2235      *
2236      *
2237      * <pre>
2238      * Context information to assist speech recognition.
2239      * See [the Cloud Speech
2240      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2241      * for more details.
2242      * </pre>
2243      *
2244      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2245      */
getSpeechContextsOrBuilder( int index)2246     public com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder(
2247         int index) {
2248       if (speechContextsBuilder_ == null) {
2249         return speechContexts_.get(index);
2250       } else {
2251         return speechContextsBuilder_.getMessageOrBuilder(index);
2252       }
2253     }
2254     /**
2255      *
2256      *
2257      * <pre>
2258      * Context information to assist speech recognition.
2259      * See [the Cloud Speech
2260      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2261      * for more details.
2262      * </pre>
2263      *
2264      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2265      */
2266     public java.util.List<? extends com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>
getSpeechContextsOrBuilderList()2267         getSpeechContextsOrBuilderList() {
2268       if (speechContextsBuilder_ != null) {
2269         return speechContextsBuilder_.getMessageOrBuilderList();
2270       } else {
2271         return java.util.Collections.unmodifiableList(speechContexts_);
2272       }
2273     }
2274     /**
2275      *
2276      *
2277      * <pre>
2278      * Context information to assist speech recognition.
2279      * See [the Cloud Speech
2280      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2281      * for more details.
2282      * </pre>
2283      *
2284      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2285      */
addSpeechContextsBuilder()2286     public com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder addSpeechContextsBuilder() {
2287       return getSpeechContextsFieldBuilder()
2288           .addBuilder(com.google.cloud.dialogflow.v2beta1.SpeechContext.getDefaultInstance());
2289     }
2290     /**
2291      *
2292      *
2293      * <pre>
2294      * Context information to assist speech recognition.
2295      * See [the Cloud Speech
2296      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2297      * for more details.
2298      * </pre>
2299      *
2300      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2301      */
addSpeechContextsBuilder( int index)2302     public com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder addSpeechContextsBuilder(
2303         int index) {
2304       return getSpeechContextsFieldBuilder()
2305           .addBuilder(
2306               index, com.google.cloud.dialogflow.v2beta1.SpeechContext.getDefaultInstance());
2307     }
2308     /**
2309      *
2310      *
2311      * <pre>
2312      * Context information to assist speech recognition.
2313      * See [the Cloud Speech
2314      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2315      * for more details.
2316      * </pre>
2317      *
2318      * <code>repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;</code>
2319      */
2320     public java.util.List<com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder>
getSpeechContextsBuilderList()2321         getSpeechContextsBuilderList() {
2322       return getSpeechContextsFieldBuilder().getBuilderList();
2323     }
2324 
2325     private com.google.protobuf.RepeatedFieldBuilderV3<
2326             com.google.cloud.dialogflow.v2beta1.SpeechContext,
2327             com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder,
2328             com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>
getSpeechContextsFieldBuilder()2329         getSpeechContextsFieldBuilder() {
2330       if (speechContextsBuilder_ == null) {
2331         speechContextsBuilder_ =
2332             new com.google.protobuf.RepeatedFieldBuilderV3<
2333                 com.google.cloud.dialogflow.v2beta1.SpeechContext,
2334                 com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder,
2335                 com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>(
2336                 speechContexts_,
2337                 ((bitField0_ & 0x00000020) != 0),
2338                 getParentForChildren(),
2339                 isClean());
2340         speechContexts_ = null;
2341       }
2342       return speechContextsBuilder_;
2343     }
2344 
2345     private java.lang.Object model_ = "";
2346     /**
2347      *
2348      *
2349      * <pre>
2350      * Which Speech model to select for the given request. Select the
2351      * model best suited to your domain to get best results. If a model is not
2352      * explicitly specified, then we auto-select a model based on the parameters
2353      * in the InputAudioConfig.
2354      * If enhanced speech model is enabled for the agent and an enhanced
2355      * version of the specified model for the language does not exist, then the
2356      * speech is recognized using the standard version of the specified model.
2357      * Refer to
2358      * [Cloud Speech API
2359      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2360      * for more details.
2361      * If you specify a model, the following models typically have the best
2362      * performance:
2363      * - phone_call (best for Agent Assist and telephony)
2364      * - latest_short (best for Dialogflow non-telephony)
2365      * - command_and_search (best for very short utterances and commands)
2366      * </pre>
2367      *
2368      * <code>string model = 7;</code>
2369      *
2370      * @return The model.
2371      */
getModel()2372     public java.lang.String getModel() {
2373       java.lang.Object ref = model_;
2374       if (!(ref instanceof java.lang.String)) {
2375         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
2376         java.lang.String s = bs.toStringUtf8();
2377         model_ = s;
2378         return s;
2379       } else {
2380         return (java.lang.String) ref;
2381       }
2382     }
2383     /**
2384      *
2385      *
2386      * <pre>
2387      * Which Speech model to select for the given request. Select the
2388      * model best suited to your domain to get best results. If a model is not
2389      * explicitly specified, then we auto-select a model based on the parameters
2390      * in the InputAudioConfig.
2391      * If enhanced speech model is enabled for the agent and an enhanced
2392      * version of the specified model for the language does not exist, then the
2393      * speech is recognized using the standard version of the specified model.
2394      * Refer to
2395      * [Cloud Speech API
2396      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2397      * for more details.
2398      * If you specify a model, the following models typically have the best
2399      * performance:
2400      * - phone_call (best for Agent Assist and telephony)
2401      * - latest_short (best for Dialogflow non-telephony)
2402      * - command_and_search (best for very short utterances and commands)
2403      * </pre>
2404      *
2405      * <code>string model = 7;</code>
2406      *
2407      * @return The bytes for model.
2408      */
getModelBytes()2409     public com.google.protobuf.ByteString getModelBytes() {
2410       java.lang.Object ref = model_;
2411       if (ref instanceof String) {
2412         com.google.protobuf.ByteString b =
2413             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
2414         model_ = b;
2415         return b;
2416       } else {
2417         return (com.google.protobuf.ByteString) ref;
2418       }
2419     }
2420     /**
2421      *
2422      *
2423      * <pre>
2424      * Which Speech model to select for the given request. Select the
2425      * model best suited to your domain to get best results. If a model is not
2426      * explicitly specified, then we auto-select a model based on the parameters
2427      * in the InputAudioConfig.
2428      * If enhanced speech model is enabled for the agent and an enhanced
2429      * version of the specified model for the language does not exist, then the
2430      * speech is recognized using the standard version of the specified model.
2431      * Refer to
2432      * [Cloud Speech API
2433      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2434      * for more details.
2435      * If you specify a model, the following models typically have the best
2436      * performance:
2437      * - phone_call (best for Agent Assist and telephony)
2438      * - latest_short (best for Dialogflow non-telephony)
2439      * - command_and_search (best for very short utterances and commands)
2440      * </pre>
2441      *
2442      * <code>string model = 7;</code>
2443      *
2444      * @param value The model to set.
2445      * @return This builder for chaining.
2446      */
setModel(java.lang.String value)2447     public Builder setModel(java.lang.String value) {
2448       if (value == null) {
2449         throw new NullPointerException();
2450       }
2451       model_ = value;
2452       bitField0_ |= 0x00000040;
2453       onChanged();
2454       return this;
2455     }
2456     /**
2457      *
2458      *
2459      * <pre>
2460      * Which Speech model to select for the given request. Select the
2461      * model best suited to your domain to get best results. If a model is not
2462      * explicitly specified, then we auto-select a model based on the parameters
2463      * in the InputAudioConfig.
2464      * If enhanced speech model is enabled for the agent and an enhanced
2465      * version of the specified model for the language does not exist, then the
2466      * speech is recognized using the standard version of the specified model.
2467      * Refer to
2468      * [Cloud Speech API
2469      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2470      * for more details.
2471      * If you specify a model, the following models typically have the best
2472      * performance:
2473      * - phone_call (best for Agent Assist and telephony)
2474      * - latest_short (best for Dialogflow non-telephony)
2475      * - command_and_search (best for very short utterances and commands)
2476      * </pre>
2477      *
2478      * <code>string model = 7;</code>
2479      *
2480      * @return This builder for chaining.
2481      */
clearModel()2482     public Builder clearModel() {
2483       model_ = getDefaultInstance().getModel();
2484       bitField0_ = (bitField0_ & ~0x00000040);
2485       onChanged();
2486       return this;
2487     }
2488     /**
2489      *
2490      *
2491      * <pre>
2492      * Which Speech model to select for the given request. Select the
2493      * model best suited to your domain to get best results. If a model is not
2494      * explicitly specified, then we auto-select a model based on the parameters
2495      * in the InputAudioConfig.
2496      * If enhanced speech model is enabled for the agent and an enhanced
2497      * version of the specified model for the language does not exist, then the
2498      * speech is recognized using the standard version of the specified model.
2499      * Refer to
2500      * [Cloud Speech API
2501      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2502      * for more details.
2503      * If you specify a model, the following models typically have the best
2504      * performance:
2505      * - phone_call (best for Agent Assist and telephony)
2506      * - latest_short (best for Dialogflow non-telephony)
2507      * - command_and_search (best for very short utterances and commands)
2508      * </pre>
2509      *
2510      * <code>string model = 7;</code>
2511      *
2512      * @param value The bytes for model to set.
2513      * @return This builder for chaining.
2514      */
setModelBytes(com.google.protobuf.ByteString value)2515     public Builder setModelBytes(com.google.protobuf.ByteString value) {
2516       if (value == null) {
2517         throw new NullPointerException();
2518       }
2519       checkByteStringIsUtf8(value);
2520       model_ = value;
2521       bitField0_ |= 0x00000040;
2522       onChanged();
2523       return this;
2524     }
2525 
2526     private int modelVariant_ = 0;
2527     /**
2528      *
2529      *
2530      * <pre>
2531      * Which variant of the [Speech
2532      * model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
2533      * </pre>
2534      *
2535      * <code>.google.cloud.dialogflow.v2beta1.SpeechModelVariant model_variant = 10;</code>
2536      *
2537      * @return The enum numeric value on the wire for modelVariant.
2538      */
2539     @java.lang.Override
getModelVariantValue()2540     public int getModelVariantValue() {
2541       return modelVariant_;
2542     }
2543     /**
2544      *
2545      *
2546      * <pre>
2547      * Which variant of the [Speech
2548      * model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
2549      * </pre>
2550      *
2551      * <code>.google.cloud.dialogflow.v2beta1.SpeechModelVariant model_variant = 10;</code>
2552      *
2553      * @param value The enum numeric value on the wire for modelVariant to set.
2554      * @return This builder for chaining.
2555      */
setModelVariantValue(int value)2556     public Builder setModelVariantValue(int value) {
2557       modelVariant_ = value;
2558       bitField0_ |= 0x00000080;
2559       onChanged();
2560       return this;
2561     }
2562     /**
2563      *
2564      *
2565      * <pre>
2566      * Which variant of the [Speech
2567      * model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
2568      * </pre>
2569      *
2570      * <code>.google.cloud.dialogflow.v2beta1.SpeechModelVariant model_variant = 10;</code>
2571      *
2572      * @return The modelVariant.
2573      */
2574     @java.lang.Override
getModelVariant()2575     public com.google.cloud.dialogflow.v2beta1.SpeechModelVariant getModelVariant() {
2576       com.google.cloud.dialogflow.v2beta1.SpeechModelVariant result =
2577           com.google.cloud.dialogflow.v2beta1.SpeechModelVariant.forNumber(modelVariant_);
2578       return result == null
2579           ? com.google.cloud.dialogflow.v2beta1.SpeechModelVariant.UNRECOGNIZED
2580           : result;
2581     }
2582     /**
2583      *
2584      *
2585      * <pre>
2586      * Which variant of the [Speech
2587      * model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
2588      * </pre>
2589      *
2590      * <code>.google.cloud.dialogflow.v2beta1.SpeechModelVariant model_variant = 10;</code>
2591      *
2592      * @param value The modelVariant to set.
2593      * @return This builder for chaining.
2594      */
setModelVariant(com.google.cloud.dialogflow.v2beta1.SpeechModelVariant value)2595     public Builder setModelVariant(com.google.cloud.dialogflow.v2beta1.SpeechModelVariant value) {
2596       if (value == null) {
2597         throw new NullPointerException();
2598       }
2599       bitField0_ |= 0x00000080;
2600       modelVariant_ = value.getNumber();
2601       onChanged();
2602       return this;
2603     }
2604     /**
2605      *
2606      *
2607      * <pre>
2608      * Which variant of the [Speech
2609      * model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
2610      * </pre>
2611      *
2612      * <code>.google.cloud.dialogflow.v2beta1.SpeechModelVariant model_variant = 10;</code>
2613      *
2614      * @return This builder for chaining.
2615      */
clearModelVariant()2616     public Builder clearModelVariant() {
2617       bitField0_ = (bitField0_ & ~0x00000080);
2618       modelVariant_ = 0;
2619       onChanged();
2620       return this;
2621     }
2622 
2623     private boolean singleUtterance_;
2624     /**
2625      *
2626      *
2627      * <pre>
2628      * If `false` (default), recognition does not cease until the
2629      * client closes the stream.
2630      * If `true`, the recognizer will detect a single spoken utterance in input
2631      * audio. Recognition ceases when it detects the audio's voice has
2632      * stopped or paused. In this case, once a detected intent is received, the
2633      * client should close the stream and start a new request with a new stream as
2634      * needed.
2635      * Note: This setting is relevant only for streaming methods.
2636      * Note: When specified, InputAudioConfig.single_utterance takes precedence
2637      * over StreamingDetectIntentRequest.single_utterance.
2638      * </pre>
2639      *
2640      * <code>bool single_utterance = 8;</code>
2641      *
2642      * @return The singleUtterance.
2643      */
2644     @java.lang.Override
getSingleUtterance()2645     public boolean getSingleUtterance() {
2646       return singleUtterance_;
2647     }
2648     /**
2649      *
2650      *
2651      * <pre>
2652      * If `false` (default), recognition does not cease until the
2653      * client closes the stream.
2654      * If `true`, the recognizer will detect a single spoken utterance in input
2655      * audio. Recognition ceases when it detects the audio's voice has
2656      * stopped or paused. In this case, once a detected intent is received, the
2657      * client should close the stream and start a new request with a new stream as
2658      * needed.
2659      * Note: This setting is relevant only for streaming methods.
2660      * Note: When specified, InputAudioConfig.single_utterance takes precedence
2661      * over StreamingDetectIntentRequest.single_utterance.
2662      * </pre>
2663      *
2664      * <code>bool single_utterance = 8;</code>
2665      *
2666      * @param value The singleUtterance to set.
2667      * @return This builder for chaining.
2668      */
setSingleUtterance(boolean value)2669     public Builder setSingleUtterance(boolean value) {
2670 
2671       singleUtterance_ = value;
2672       bitField0_ |= 0x00000100;
2673       onChanged();
2674       return this;
2675     }
2676     /**
2677      *
2678      *
2679      * <pre>
2680      * If `false` (default), recognition does not cease until the
2681      * client closes the stream.
2682      * If `true`, the recognizer will detect a single spoken utterance in input
2683      * audio. Recognition ceases when it detects the audio's voice has
2684      * stopped or paused. In this case, once a detected intent is received, the
2685      * client should close the stream and start a new request with a new stream as
2686      * needed.
2687      * Note: This setting is relevant only for streaming methods.
2688      * Note: When specified, InputAudioConfig.single_utterance takes precedence
2689      * over StreamingDetectIntentRequest.single_utterance.
2690      * </pre>
2691      *
2692      * <code>bool single_utterance = 8;</code>
2693      *
2694      * @return This builder for chaining.
2695      */
clearSingleUtterance()2696     public Builder clearSingleUtterance() {
2697       bitField0_ = (bitField0_ & ~0x00000100);
2698       singleUtterance_ = false;
2699       onChanged();
2700       return this;
2701     }
2702 
2703     private boolean disableNoSpeechRecognizedEvent_;
2704     /**
2705      *
2706      *
2707      * <pre>
2708      * Only used in
2709      * [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]
2710      * and
2711      * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent].
2712      * If `false` and recognition doesn't return any result, trigger
2713      * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
2714      * </pre>
2715      *
2716      * <code>bool disable_no_speech_recognized_event = 14;</code>
2717      *
2718      * @return The disableNoSpeechRecognizedEvent.
2719      */
2720     @java.lang.Override
getDisableNoSpeechRecognizedEvent()2721     public boolean getDisableNoSpeechRecognizedEvent() {
2722       return disableNoSpeechRecognizedEvent_;
2723     }
2724     /**
2725      *
2726      *
2727      * <pre>
2728      * Only used in
2729      * [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]
2730      * and
2731      * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent].
2732      * If `false` and recognition doesn't return any result, trigger
2733      * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
2734      * </pre>
2735      *
2736      * <code>bool disable_no_speech_recognized_event = 14;</code>
2737      *
2738      * @param value The disableNoSpeechRecognizedEvent to set.
2739      * @return This builder for chaining.
2740      */
setDisableNoSpeechRecognizedEvent(boolean value)2741     public Builder setDisableNoSpeechRecognizedEvent(boolean value) {
2742 
2743       disableNoSpeechRecognizedEvent_ = value;
2744       bitField0_ |= 0x00000200;
2745       onChanged();
2746       return this;
2747     }
2748     /**
2749      *
2750      *
2751      * <pre>
2752      * Only used in
2753      * [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]
2754      * and
2755      * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent].
2756      * If `false` and recognition doesn't return any result, trigger
2757      * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
2758      * </pre>
2759      *
2760      * <code>bool disable_no_speech_recognized_event = 14;</code>
2761      *
2762      * @return This builder for chaining.
2763      */
clearDisableNoSpeechRecognizedEvent()2764     public Builder clearDisableNoSpeechRecognizedEvent() {
2765       bitField0_ = (bitField0_ & ~0x00000200);
2766       disableNoSpeechRecognizedEvent_ = false;
2767       onChanged();
2768       return this;
2769     }
2770 
2771     private com.google.cloud.dialogflow.v2beta1.BargeInConfig bargeInConfig_;
2772     private com.google.protobuf.SingleFieldBuilderV3<
2773             com.google.cloud.dialogflow.v2beta1.BargeInConfig,
2774             com.google.cloud.dialogflow.v2beta1.BargeInConfig.Builder,
2775             com.google.cloud.dialogflow.v2beta1.BargeInConfigOrBuilder>
2776         bargeInConfigBuilder_;
2777     /**
2778      *
2779      *
2780      * <pre>
2781      * Configuration of barge-in behavior during the streaming of input audio.
2782      * </pre>
2783      *
2784      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2785      *
2786      * @return Whether the bargeInConfig field is set.
2787      */
hasBargeInConfig()2788     public boolean hasBargeInConfig() {
2789       return ((bitField0_ & 0x00000400) != 0);
2790     }
2791     /**
2792      *
2793      *
2794      * <pre>
2795      * Configuration of barge-in behavior during the streaming of input audio.
2796      * </pre>
2797      *
2798      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2799      *
2800      * @return The bargeInConfig.
2801      */
getBargeInConfig()2802     public com.google.cloud.dialogflow.v2beta1.BargeInConfig getBargeInConfig() {
2803       if (bargeInConfigBuilder_ == null) {
2804         return bargeInConfig_ == null
2805             ? com.google.cloud.dialogflow.v2beta1.BargeInConfig.getDefaultInstance()
2806             : bargeInConfig_;
2807       } else {
2808         return bargeInConfigBuilder_.getMessage();
2809       }
2810     }
2811     /**
2812      *
2813      *
2814      * <pre>
2815      * Configuration of barge-in behavior during the streaming of input audio.
2816      * </pre>
2817      *
2818      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2819      */
setBargeInConfig(com.google.cloud.dialogflow.v2beta1.BargeInConfig value)2820     public Builder setBargeInConfig(com.google.cloud.dialogflow.v2beta1.BargeInConfig value) {
2821       if (bargeInConfigBuilder_ == null) {
2822         if (value == null) {
2823           throw new NullPointerException();
2824         }
2825         bargeInConfig_ = value;
2826       } else {
2827         bargeInConfigBuilder_.setMessage(value);
2828       }
2829       bitField0_ |= 0x00000400;
2830       onChanged();
2831       return this;
2832     }
2833     /**
2834      *
2835      *
2836      * <pre>
2837      * Configuration of barge-in behavior during the streaming of input audio.
2838      * </pre>
2839      *
2840      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2841      */
setBargeInConfig( com.google.cloud.dialogflow.v2beta1.BargeInConfig.Builder builderForValue)2842     public Builder setBargeInConfig(
2843         com.google.cloud.dialogflow.v2beta1.BargeInConfig.Builder builderForValue) {
2844       if (bargeInConfigBuilder_ == null) {
2845         bargeInConfig_ = builderForValue.build();
2846       } else {
2847         bargeInConfigBuilder_.setMessage(builderForValue.build());
2848       }
2849       bitField0_ |= 0x00000400;
2850       onChanged();
2851       return this;
2852     }
2853     /**
2854      *
2855      *
2856      * <pre>
2857      * Configuration of barge-in behavior during the streaming of input audio.
2858      * </pre>
2859      *
2860      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2861      */
mergeBargeInConfig(com.google.cloud.dialogflow.v2beta1.BargeInConfig value)2862     public Builder mergeBargeInConfig(com.google.cloud.dialogflow.v2beta1.BargeInConfig value) {
2863       if (bargeInConfigBuilder_ == null) {
2864         if (((bitField0_ & 0x00000400) != 0)
2865             && bargeInConfig_ != null
2866             && bargeInConfig_
2867                 != com.google.cloud.dialogflow.v2beta1.BargeInConfig.getDefaultInstance()) {
2868           getBargeInConfigBuilder().mergeFrom(value);
2869         } else {
2870           bargeInConfig_ = value;
2871         }
2872       } else {
2873         bargeInConfigBuilder_.mergeFrom(value);
2874       }
2875       bitField0_ |= 0x00000400;
2876       onChanged();
2877       return this;
2878     }
2879     /**
2880      *
2881      *
2882      * <pre>
2883      * Configuration of barge-in behavior during the streaming of input audio.
2884      * </pre>
2885      *
2886      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2887      */
clearBargeInConfig()2888     public Builder clearBargeInConfig() {
2889       bitField0_ = (bitField0_ & ~0x00000400);
2890       bargeInConfig_ = null;
2891       if (bargeInConfigBuilder_ != null) {
2892         bargeInConfigBuilder_.dispose();
2893         bargeInConfigBuilder_ = null;
2894       }
2895       onChanged();
2896       return this;
2897     }
2898     /**
2899      *
2900      *
2901      * <pre>
2902      * Configuration of barge-in behavior during the streaming of input audio.
2903      * </pre>
2904      *
2905      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2906      */
getBargeInConfigBuilder()2907     public com.google.cloud.dialogflow.v2beta1.BargeInConfig.Builder getBargeInConfigBuilder() {
2908       bitField0_ |= 0x00000400;
2909       onChanged();
2910       return getBargeInConfigFieldBuilder().getBuilder();
2911     }
2912     /**
2913      *
2914      *
2915      * <pre>
2916      * Configuration of barge-in behavior during the streaming of input audio.
2917      * </pre>
2918      *
2919      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2920      */
getBargeInConfigOrBuilder()2921     public com.google.cloud.dialogflow.v2beta1.BargeInConfigOrBuilder getBargeInConfigOrBuilder() {
2922       if (bargeInConfigBuilder_ != null) {
2923         return bargeInConfigBuilder_.getMessageOrBuilder();
2924       } else {
2925         return bargeInConfig_ == null
2926             ? com.google.cloud.dialogflow.v2beta1.BargeInConfig.getDefaultInstance()
2927             : bargeInConfig_;
2928       }
2929     }
2930     /**
2931      *
2932      *
2933      * <pre>
2934      * Configuration of barge-in behavior during the streaming of input audio.
2935      * </pre>
2936      *
2937      * <code>.google.cloud.dialogflow.v2beta1.BargeInConfig barge_in_config = 15;</code>
2938      */
2939     private com.google.protobuf.SingleFieldBuilderV3<
2940             com.google.cloud.dialogflow.v2beta1.BargeInConfig,
2941             com.google.cloud.dialogflow.v2beta1.BargeInConfig.Builder,
2942             com.google.cloud.dialogflow.v2beta1.BargeInConfigOrBuilder>
getBargeInConfigFieldBuilder()2943         getBargeInConfigFieldBuilder() {
2944       if (bargeInConfigBuilder_ == null) {
2945         bargeInConfigBuilder_ =
2946             new com.google.protobuf.SingleFieldBuilderV3<
2947                 com.google.cloud.dialogflow.v2beta1.BargeInConfig,
2948                 com.google.cloud.dialogflow.v2beta1.BargeInConfig.Builder,
2949                 com.google.cloud.dialogflow.v2beta1.BargeInConfigOrBuilder>(
2950                 getBargeInConfig(), getParentForChildren(), isClean());
2951         bargeInConfig_ = null;
2952       }
2953       return bargeInConfigBuilder_;
2954     }
2955 
2956     private boolean enableAutomaticPunctuation_;
2957     /**
2958      *
2959      *
2960      * <pre>
2961      * Enable automatic punctuation option at the speech backend.
2962      * </pre>
2963      *
2964      * <code>bool enable_automatic_punctuation = 17;</code>
2965      *
2966      * @return The enableAutomaticPunctuation.
2967      */
2968     @java.lang.Override
getEnableAutomaticPunctuation()2969     public boolean getEnableAutomaticPunctuation() {
2970       return enableAutomaticPunctuation_;
2971     }
2972     /**
2973      *
2974      *
2975      * <pre>
2976      * Enable automatic punctuation option at the speech backend.
2977      * </pre>
2978      *
2979      * <code>bool enable_automatic_punctuation = 17;</code>
2980      *
2981      * @param value The enableAutomaticPunctuation to set.
2982      * @return This builder for chaining.
2983      */
setEnableAutomaticPunctuation(boolean value)2984     public Builder setEnableAutomaticPunctuation(boolean value) {
2985 
2986       enableAutomaticPunctuation_ = value;
2987       bitField0_ |= 0x00000800;
2988       onChanged();
2989       return this;
2990     }
2991     /**
2992      *
2993      *
2994      * <pre>
2995      * Enable automatic punctuation option at the speech backend.
2996      * </pre>
2997      *
2998      * <code>bool enable_automatic_punctuation = 17;</code>
2999      *
3000      * @return This builder for chaining.
3001      */
clearEnableAutomaticPunctuation()3002     public Builder clearEnableAutomaticPunctuation() {
3003       bitField0_ = (bitField0_ & ~0x00000800);
3004       enableAutomaticPunctuation_ = false;
3005       onChanged();
3006       return this;
3007     }
3008 
3009     @java.lang.Override
setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields)3010     public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
3011       return super.setUnknownFields(unknownFields);
3012     }
3013 
3014     @java.lang.Override
mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields)3015     public final Builder mergeUnknownFields(
3016         final com.google.protobuf.UnknownFieldSet unknownFields) {
3017       return super.mergeUnknownFields(unknownFields);
3018     }
3019 
3020     // @@protoc_insertion_point(builder_scope:google.cloud.dialogflow.v2beta1.InputAudioConfig)
3021   }
3022 
3023   // @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.InputAudioConfig)
3024   private static final com.google.cloud.dialogflow.v2beta1.InputAudioConfig DEFAULT_INSTANCE;
3025 
3026   static {
3027     DEFAULT_INSTANCE = new com.google.cloud.dialogflow.v2beta1.InputAudioConfig();
3028   }
3029 
getDefaultInstance()3030   public static com.google.cloud.dialogflow.v2beta1.InputAudioConfig getDefaultInstance() {
3031     return DEFAULT_INSTANCE;
3032   }
3033 
3034   private static final com.google.protobuf.Parser<InputAudioConfig> PARSER =
3035       new com.google.protobuf.AbstractParser<InputAudioConfig>() {
3036         @java.lang.Override
3037         public InputAudioConfig parsePartialFrom(
3038             com.google.protobuf.CodedInputStream input,
3039             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3040             throws com.google.protobuf.InvalidProtocolBufferException {
3041           Builder builder = newBuilder();
3042           try {
3043             builder.mergeFrom(input, extensionRegistry);
3044           } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3045             throw e.setUnfinishedMessage(builder.buildPartial());
3046           } catch (com.google.protobuf.UninitializedMessageException e) {
3047             throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
3048           } catch (java.io.IOException e) {
3049             throw new com.google.protobuf.InvalidProtocolBufferException(e)
3050                 .setUnfinishedMessage(builder.buildPartial());
3051           }
3052           return builder.buildPartial();
3053         }
3054       };
3055 
parser()3056   public static com.google.protobuf.Parser<InputAudioConfig> parser() {
3057     return PARSER;
3058   }
3059 
3060   @java.lang.Override
getParserForType()3061   public com.google.protobuf.Parser<InputAudioConfig> getParserForType() {
3062     return PARSER;
3063   }
3064 
3065   @java.lang.Override
getDefaultInstanceForType()3066   public com.google.cloud.dialogflow.v2beta1.InputAudioConfig getDefaultInstanceForType() {
3067     return DEFAULT_INSTANCE;
3068   }
3069 }
3070