• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // Generated by the protocol buffer compiler.  DO NOT EDIT!
17 // source: google/cloud/dialogflow/v2/audio_config.proto
18 
19 package com.google.cloud.dialogflow.v2;
20 
21 /**
22  *
23  *
24  * <pre>
25  * Instructs the speech recognizer how to process the audio content.
26  * </pre>
27  *
28  * Protobuf type {@code google.cloud.dialogflow.v2.InputAudioConfig}
29  */
30 public final class InputAudioConfig extends com.google.protobuf.GeneratedMessageV3
31     implements
32     // @@protoc_insertion_point(message_implements:google.cloud.dialogflow.v2.InputAudioConfig)
33     InputAudioConfigOrBuilder {
34   private static final long serialVersionUID = 0L;
35   // Use InputAudioConfig.newBuilder() to construct.
InputAudioConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder)36   private InputAudioConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
37     super(builder);
38   }
39 
InputAudioConfig()40   private InputAudioConfig() {
41     audioEncoding_ = 0;
42     languageCode_ = "";
43     phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
44     speechContexts_ = java.util.Collections.emptyList();
45     model_ = "";
46     modelVariant_ = 0;
47   }
48 
49   @java.lang.Override
50   @SuppressWarnings({"unused"})
newInstance(UnusedPrivateParameter unused)51   protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
52     return new InputAudioConfig();
53   }
54 
55   @java.lang.Override
getUnknownFields()56   public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
57     return this.unknownFields;
58   }
59 
getDescriptor()60   public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
61     return com.google.cloud.dialogflow.v2.AudioConfigProto
62         .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_descriptor;
63   }
64 
65   @java.lang.Override
66   protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()67       internalGetFieldAccessorTable() {
68     return com.google.cloud.dialogflow.v2.AudioConfigProto
69         .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_fieldAccessorTable
70         .ensureFieldAccessorsInitialized(
71             com.google.cloud.dialogflow.v2.InputAudioConfig.class,
72             com.google.cloud.dialogflow.v2.InputAudioConfig.Builder.class);
73   }
74 
75   public static final int AUDIO_ENCODING_FIELD_NUMBER = 1;
76   private int audioEncoding_ = 0;
77   /**
78    *
79    *
80    * <pre>
81    * Required. Audio encoding of the audio content to process.
82    * </pre>
83    *
84    * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
85    *
86    * @return The enum numeric value on the wire for audioEncoding.
87    */
88   @java.lang.Override
getAudioEncodingValue()89   public int getAudioEncodingValue() {
90     return audioEncoding_;
91   }
92   /**
93    *
94    *
95    * <pre>
96    * Required. Audio encoding of the audio content to process.
97    * </pre>
98    *
99    * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
100    *
101    * @return The audioEncoding.
102    */
103   @java.lang.Override
getAudioEncoding()104   public com.google.cloud.dialogflow.v2.AudioEncoding getAudioEncoding() {
105     com.google.cloud.dialogflow.v2.AudioEncoding result =
106         com.google.cloud.dialogflow.v2.AudioEncoding.forNumber(audioEncoding_);
107     return result == null ? com.google.cloud.dialogflow.v2.AudioEncoding.UNRECOGNIZED : result;
108   }
109 
110   public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2;
111   private int sampleRateHertz_ = 0;
112   /**
113    *
114    *
115    * <pre>
116    * Required. Sample rate (in Hertz) of the audio content sent in the query.
117    * Refer to
118    * [Cloud Speech API
119    * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
120    * more details.
121    * </pre>
122    *
123    * <code>int32 sample_rate_hertz = 2;</code>
124    *
125    * @return The sampleRateHertz.
126    */
127   @java.lang.Override
getSampleRateHertz()128   public int getSampleRateHertz() {
129     return sampleRateHertz_;
130   }
131 
132   public static final int LANGUAGE_CODE_FIELD_NUMBER = 3;
133 
134   @SuppressWarnings("serial")
135   private volatile java.lang.Object languageCode_ = "";
136   /**
137    *
138    *
139    * <pre>
140    * Required. The language of the supplied audio. Dialogflow does not do
141    * translations. See [Language
142    * Support](https://cloud.google.com/dialogflow/docs/reference/language)
143    * for a list of the currently supported language codes. Note that queries in
144    * the same session do not necessarily need to specify the same language.
145    * </pre>
146    *
147    * <code>string language_code = 3;</code>
148    *
149    * @return The languageCode.
150    */
151   @java.lang.Override
getLanguageCode()152   public java.lang.String getLanguageCode() {
153     java.lang.Object ref = languageCode_;
154     if (ref instanceof java.lang.String) {
155       return (java.lang.String) ref;
156     } else {
157       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
158       java.lang.String s = bs.toStringUtf8();
159       languageCode_ = s;
160       return s;
161     }
162   }
163   /**
164    *
165    *
166    * <pre>
167    * Required. The language of the supplied audio. Dialogflow does not do
168    * translations. See [Language
169    * Support](https://cloud.google.com/dialogflow/docs/reference/language)
170    * for a list of the currently supported language codes. Note that queries in
171    * the same session do not necessarily need to specify the same language.
172    * </pre>
173    *
174    * <code>string language_code = 3;</code>
175    *
176    * @return The bytes for languageCode.
177    */
178   @java.lang.Override
getLanguageCodeBytes()179   public com.google.protobuf.ByteString getLanguageCodeBytes() {
180     java.lang.Object ref = languageCode_;
181     if (ref instanceof java.lang.String) {
182       com.google.protobuf.ByteString b =
183           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
184       languageCode_ = b;
185       return b;
186     } else {
187       return (com.google.protobuf.ByteString) ref;
188     }
189   }
190 
191   public static final int ENABLE_WORD_INFO_FIELD_NUMBER = 13;
192   private boolean enableWordInfo_ = false;
193   /**
194    *
195    *
196    * <pre>
197    * If `true`, Dialogflow returns
198    * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
199    * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
200    * with information about the recognized speech words, e.g. start and end time
201    * offsets. If false or unspecified, Speech doesn't return any word-level
202    * information.
203    * </pre>
204    *
205    * <code>bool enable_word_info = 13;</code>
206    *
207    * @return The enableWordInfo.
208    */
209   @java.lang.Override
getEnableWordInfo()210   public boolean getEnableWordInfo() {
211     return enableWordInfo_;
212   }
213 
214   public static final int PHRASE_HINTS_FIELD_NUMBER = 4;
215 
216   @SuppressWarnings("serial")
217   private com.google.protobuf.LazyStringList phraseHints_;
218   /**
219    *
220    *
221    * <pre>
222    * A list of strings containing words and phrases that the speech
223    * recognizer should recognize with higher likelihood.
224    * See [the Cloud Speech
225    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
226    * for more details.
227    * This field is deprecated. Please use [speech_contexts]() instead. If you
228    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
229    * treat the [phrase_hints]() as a single additional [SpeechContext]().
230    * </pre>
231    *
232    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
233    *
234    * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
235    *     google/cloud/dialogflow/v2/audio_config.proto;l=223
236    * @return A list containing the phraseHints.
237    */
238   @java.lang.Deprecated
getPhraseHintsList()239   public com.google.protobuf.ProtocolStringList getPhraseHintsList() {
240     return phraseHints_;
241   }
242   /**
243    *
244    *
245    * <pre>
246    * A list of strings containing words and phrases that the speech
247    * recognizer should recognize with higher likelihood.
248    * See [the Cloud Speech
249    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
250    * for more details.
251    * This field is deprecated. Please use [speech_contexts]() instead. If you
252    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
253    * treat the [phrase_hints]() as a single additional [SpeechContext]().
254    * </pre>
255    *
256    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
257    *
258    * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
259    *     google/cloud/dialogflow/v2/audio_config.proto;l=223
260    * @return The count of phraseHints.
261    */
262   @java.lang.Deprecated
getPhraseHintsCount()263   public int getPhraseHintsCount() {
264     return phraseHints_.size();
265   }
266   /**
267    *
268    *
269    * <pre>
270    * A list of strings containing words and phrases that the speech
271    * recognizer should recognize with higher likelihood.
272    * See [the Cloud Speech
273    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
274    * for more details.
275    * This field is deprecated. Please use [speech_contexts]() instead. If you
276    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
277    * treat the [phrase_hints]() as a single additional [SpeechContext]().
278    * </pre>
279    *
280    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
281    *
282    * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
283    *     google/cloud/dialogflow/v2/audio_config.proto;l=223
284    * @param index The index of the element to return.
285    * @return The phraseHints at the given index.
286    */
287   @java.lang.Deprecated
getPhraseHints(int index)288   public java.lang.String getPhraseHints(int index) {
289     return phraseHints_.get(index);
290   }
291   /**
292    *
293    *
294    * <pre>
295    * A list of strings containing words and phrases that the speech
296    * recognizer should recognize with higher likelihood.
297    * See [the Cloud Speech
298    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
299    * for more details.
300    * This field is deprecated. Please use [speech_contexts]() instead. If you
301    * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
302    * treat the [phrase_hints]() as a single additional [SpeechContext]().
303    * </pre>
304    *
305    * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
306    *
307    * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
308    *     google/cloud/dialogflow/v2/audio_config.proto;l=223
309    * @param index The index of the value to return.
310    * @return The bytes of the phraseHints at the given index.
311    */
312   @java.lang.Deprecated
getPhraseHintsBytes(int index)313   public com.google.protobuf.ByteString getPhraseHintsBytes(int index) {
314     return phraseHints_.getByteString(index);
315   }
316 
317   public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 11;
318 
319   @SuppressWarnings("serial")
320   private java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> speechContexts_;
321   /**
322    *
323    *
324    * <pre>
325    * Context information to assist speech recognition.
326    * See [the Cloud Speech
327    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
328    * for more details.
329    * </pre>
330    *
331    * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
332    */
333   @java.lang.Override
getSpeechContextsList()334   public java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> getSpeechContextsList() {
335     return speechContexts_;
336   }
337   /**
338    *
339    *
340    * <pre>
341    * Context information to assist speech recognition.
342    * See [the Cloud Speech
343    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
344    * for more details.
345    * </pre>
346    *
347    * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
348    */
349   @java.lang.Override
350   public java.util.List<? extends com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
getSpeechContextsOrBuilderList()351       getSpeechContextsOrBuilderList() {
352     return speechContexts_;
353   }
354   /**
355    *
356    *
357    * <pre>
358    * Context information to assist speech recognition.
359    * See [the Cloud Speech
360    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
361    * for more details.
362    * </pre>
363    *
364    * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
365    */
366   @java.lang.Override
getSpeechContextsCount()367   public int getSpeechContextsCount() {
368     return speechContexts_.size();
369   }
370   /**
371    *
372    *
373    * <pre>
374    * Context information to assist speech recognition.
375    * See [the Cloud Speech
376    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
377    * for more details.
378    * </pre>
379    *
380    * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
381    */
382   @java.lang.Override
getSpeechContexts(int index)383   public com.google.cloud.dialogflow.v2.SpeechContext getSpeechContexts(int index) {
384     return speechContexts_.get(index);
385   }
386   /**
387    *
388    *
389    * <pre>
390    * Context information to assist speech recognition.
391    * See [the Cloud Speech
392    * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
393    * for more details.
394    * </pre>
395    *
396    * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
397    */
398   @java.lang.Override
getSpeechContextsOrBuilder( int index)399   public com.google.cloud.dialogflow.v2.SpeechContextOrBuilder getSpeechContextsOrBuilder(
400       int index) {
401     return speechContexts_.get(index);
402   }
403 
404   public static final int MODEL_FIELD_NUMBER = 7;
405 
406   @SuppressWarnings("serial")
407   private volatile java.lang.Object model_ = "";
408   /**
409    *
410    *
411    * <pre>
412    * Which Speech model to select for the given request. Select the
413    * model best suited to your domain to get best results. If a model is not
414    * explicitly specified, then we auto-select a model based on the parameters
415    * in the InputAudioConfig.
416    * If enhanced speech model is enabled for the agent and an enhanced
417    * version of the specified model for the language does not exist, then the
418    * speech is recognized using the standard version of the specified model.
419    * Refer to
420    * [Cloud Speech API
421    * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
422    * for more details.
423    * If you specify a model, the following models typically have the best
424    * performance:
425    * - phone_call (best for Agent Assist and telephony)
426    * - latest_short (best for Dialogflow non-telephony)
427    * - command_and_search (best for very short utterances and commands)
428    * </pre>
429    *
430    * <code>string model = 7;</code>
431    *
432    * @return The model.
433    */
434   @java.lang.Override
getModel()435   public java.lang.String getModel() {
436     java.lang.Object ref = model_;
437     if (ref instanceof java.lang.String) {
438       return (java.lang.String) ref;
439     } else {
440       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
441       java.lang.String s = bs.toStringUtf8();
442       model_ = s;
443       return s;
444     }
445   }
446   /**
447    *
448    *
449    * <pre>
450    * Which Speech model to select for the given request. Select the
451    * model best suited to your domain to get best results. If a model is not
452    * explicitly specified, then we auto-select a model based on the parameters
453    * in the InputAudioConfig.
454    * If enhanced speech model is enabled for the agent and an enhanced
455    * version of the specified model for the language does not exist, then the
456    * speech is recognized using the standard version of the specified model.
457    * Refer to
458    * [Cloud Speech API
459    * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
460    * for more details.
461    * If you specify a model, the following models typically have the best
462    * performance:
463    * - phone_call (best for Agent Assist and telephony)
464    * - latest_short (best for Dialogflow non-telephony)
465    * - command_and_search (best for very short utterances and commands)
466    * </pre>
467    *
468    * <code>string model = 7;</code>
469    *
470    * @return The bytes for model.
471    */
472   @java.lang.Override
getModelBytes()473   public com.google.protobuf.ByteString getModelBytes() {
474     java.lang.Object ref = model_;
475     if (ref instanceof java.lang.String) {
476       com.google.protobuf.ByteString b =
477           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
478       model_ = b;
479       return b;
480     } else {
481       return (com.google.protobuf.ByteString) ref;
482     }
483   }
484 
485   public static final int MODEL_VARIANT_FIELD_NUMBER = 10;
486   private int modelVariant_ = 0;
487   /**
488    *
489    *
490    * <pre>
491    * Which variant of the [Speech
492    * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
493    * </pre>
494    *
495    * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
496    *
497    * @return The enum numeric value on the wire for modelVariant.
498    */
499   @java.lang.Override
getModelVariantValue()500   public int getModelVariantValue() {
501     return modelVariant_;
502   }
503   /**
504    *
505    *
506    * <pre>
507    * Which variant of the [Speech
508    * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
509    * </pre>
510    *
511    * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
512    *
513    * @return The modelVariant.
514    */
515   @java.lang.Override
getModelVariant()516   public com.google.cloud.dialogflow.v2.SpeechModelVariant getModelVariant() {
517     com.google.cloud.dialogflow.v2.SpeechModelVariant result =
518         com.google.cloud.dialogflow.v2.SpeechModelVariant.forNumber(modelVariant_);
519     return result == null ? com.google.cloud.dialogflow.v2.SpeechModelVariant.UNRECOGNIZED : result;
520   }
521 
522   public static final int SINGLE_UTTERANCE_FIELD_NUMBER = 8;
523   private boolean singleUtterance_ = false;
524   /**
525    *
526    *
527    * <pre>
528    * If `false` (default), recognition does not cease until the
529    * client closes the stream.
530    * If `true`, the recognizer will detect a single spoken utterance in input
531    * audio. Recognition ceases when it detects the audio's voice has
532    * stopped or paused. In this case, once a detected intent is received, the
533    * client should close the stream and start a new request with a new stream as
534    * needed.
535    * Note: This setting is relevant only for streaming methods.
536    * Note: When specified, InputAudioConfig.single_utterance takes precedence
537    * over StreamingDetectIntentRequest.single_utterance.
538    * </pre>
539    *
540    * <code>bool single_utterance = 8;</code>
541    *
542    * @return The singleUtterance.
543    */
544   @java.lang.Override
getSingleUtterance()545   public boolean getSingleUtterance() {
546     return singleUtterance_;
547   }
548 
549   public static final int DISABLE_NO_SPEECH_RECOGNIZED_EVENT_FIELD_NUMBER = 14;
550   private boolean disableNoSpeechRecognizedEvent_ = false;
551   /**
552    *
553    *
554    * <pre>
555    * Only used in
556    * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
557    * and
558    * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
559    * If `false` and recognition doesn't return any result, trigger
560    * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
561    * </pre>
562    *
563    * <code>bool disable_no_speech_recognized_event = 14;</code>
564    *
565    * @return The disableNoSpeechRecognizedEvent.
566    */
567   @java.lang.Override
getDisableNoSpeechRecognizedEvent()568   public boolean getDisableNoSpeechRecognizedEvent() {
569     return disableNoSpeechRecognizedEvent_;
570   }
571 
572   public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 17;
573   private boolean enableAutomaticPunctuation_ = false;
574   /**
575    *
576    *
577    * <pre>
578    * Enable automatic punctuation option at the speech backend.
579    * </pre>
580    *
581    * <code>bool enable_automatic_punctuation = 17;</code>
582    *
583    * @return The enableAutomaticPunctuation.
584    */
585   @java.lang.Override
getEnableAutomaticPunctuation()586   public boolean getEnableAutomaticPunctuation() {
587     return enableAutomaticPunctuation_;
588   }
589 
590   private byte memoizedIsInitialized = -1;
591 
592   @java.lang.Override
isInitialized()593   public final boolean isInitialized() {
594     byte isInitialized = memoizedIsInitialized;
595     if (isInitialized == 1) return true;
596     if (isInitialized == 0) return false;
597 
598     memoizedIsInitialized = 1;
599     return true;
600   }
601 
602   @java.lang.Override
writeTo(com.google.protobuf.CodedOutputStream output)603   public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
604     if (audioEncoding_
605         != com.google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) {
606       output.writeEnum(1, audioEncoding_);
607     }
608     if (sampleRateHertz_ != 0) {
609       output.writeInt32(2, sampleRateHertz_);
610     }
611     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
612       com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_);
613     }
614     for (int i = 0; i < phraseHints_.size(); i++) {
615       com.google.protobuf.GeneratedMessageV3.writeString(output, 4, phraseHints_.getRaw(i));
616     }
617     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
618       com.google.protobuf.GeneratedMessageV3.writeString(output, 7, model_);
619     }
620     if (singleUtterance_ != false) {
621       output.writeBool(8, singleUtterance_);
622     }
623     if (modelVariant_
624         != com.google.cloud.dialogflow.v2.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED
625             .getNumber()) {
626       output.writeEnum(10, modelVariant_);
627     }
628     for (int i = 0; i < speechContexts_.size(); i++) {
629       output.writeMessage(11, speechContexts_.get(i));
630     }
631     if (enableWordInfo_ != false) {
632       output.writeBool(13, enableWordInfo_);
633     }
634     if (disableNoSpeechRecognizedEvent_ != false) {
635       output.writeBool(14, disableNoSpeechRecognizedEvent_);
636     }
637     if (enableAutomaticPunctuation_ != false) {
638       output.writeBool(17, enableAutomaticPunctuation_);
639     }
640     getUnknownFields().writeTo(output);
641   }
642 
643   @java.lang.Override
getSerializedSize()644   public int getSerializedSize() {
645     int size = memoizedSize;
646     if (size != -1) return size;
647 
648     size = 0;
649     if (audioEncoding_
650         != com.google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) {
651       size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, audioEncoding_);
652     }
653     if (sampleRateHertz_ != 0) {
654       size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_);
655     }
656     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
657       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_);
658     }
659     {
660       int dataSize = 0;
661       for (int i = 0; i < phraseHints_.size(); i++) {
662         dataSize += computeStringSizeNoTag(phraseHints_.getRaw(i));
663       }
664       size += dataSize;
665       size += 1 * getPhraseHintsList().size();
666     }
667     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
668       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, model_);
669     }
670     if (singleUtterance_ != false) {
671       size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, singleUtterance_);
672     }
673     if (modelVariant_
674         != com.google.cloud.dialogflow.v2.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED
675             .getNumber()) {
676       size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, modelVariant_);
677     }
678     for (int i = 0; i < speechContexts_.size(); i++) {
679       size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, speechContexts_.get(i));
680     }
681     if (enableWordInfo_ != false) {
682       size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, enableWordInfo_);
683     }
684     if (disableNoSpeechRecognizedEvent_ != false) {
685       size +=
686           com.google.protobuf.CodedOutputStream.computeBoolSize(
687               14, disableNoSpeechRecognizedEvent_);
688     }
689     if (enableAutomaticPunctuation_ != false) {
690       size +=
691           com.google.protobuf.CodedOutputStream.computeBoolSize(17, enableAutomaticPunctuation_);
692     }
693     size += getUnknownFields().getSerializedSize();
694     memoizedSize = size;
695     return size;
696   }
697 
698   @java.lang.Override
equals(final java.lang.Object obj)699   public boolean equals(final java.lang.Object obj) {
700     if (obj == this) {
701       return true;
702     }
703     if (!(obj instanceof com.google.cloud.dialogflow.v2.InputAudioConfig)) {
704       return super.equals(obj);
705     }
706     com.google.cloud.dialogflow.v2.InputAudioConfig other =
707         (com.google.cloud.dialogflow.v2.InputAudioConfig) obj;
708 
709     if (audioEncoding_ != other.audioEncoding_) return false;
710     if (getSampleRateHertz() != other.getSampleRateHertz()) return false;
711     if (!getLanguageCode().equals(other.getLanguageCode())) return false;
712     if (getEnableWordInfo() != other.getEnableWordInfo()) return false;
713     if (!getPhraseHintsList().equals(other.getPhraseHintsList())) return false;
714     if (!getSpeechContextsList().equals(other.getSpeechContextsList())) return false;
715     if (!getModel().equals(other.getModel())) return false;
716     if (modelVariant_ != other.modelVariant_) return false;
717     if (getSingleUtterance() != other.getSingleUtterance()) return false;
718     if (getDisableNoSpeechRecognizedEvent() != other.getDisableNoSpeechRecognizedEvent())
719       return false;
720     if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false;
721     if (!getUnknownFields().equals(other.getUnknownFields())) return false;
722     return true;
723   }
724 
725   @java.lang.Override
hashCode()726   public int hashCode() {
727     if (memoizedHashCode != 0) {
728       return memoizedHashCode;
729     }
730     int hash = 41;
731     hash = (19 * hash) + getDescriptor().hashCode();
732     hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER;
733     hash = (53 * hash) + audioEncoding_;
734     hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER;
735     hash = (53 * hash) + getSampleRateHertz();
736     hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER;
737     hash = (53 * hash) + getLanguageCode().hashCode();
738     hash = (37 * hash) + ENABLE_WORD_INFO_FIELD_NUMBER;
739     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordInfo());
740     if (getPhraseHintsCount() > 0) {
741       hash = (37 * hash) + PHRASE_HINTS_FIELD_NUMBER;
742       hash = (53 * hash) + getPhraseHintsList().hashCode();
743     }
744     if (getSpeechContextsCount() > 0) {
745       hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER;
746       hash = (53 * hash) + getSpeechContextsList().hashCode();
747     }
748     hash = (37 * hash) + MODEL_FIELD_NUMBER;
749     hash = (53 * hash) + getModel().hashCode();
750     hash = (37 * hash) + MODEL_VARIANT_FIELD_NUMBER;
751     hash = (53 * hash) + modelVariant_;
752     hash = (37 * hash) + SINGLE_UTTERANCE_FIELD_NUMBER;
753     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSingleUtterance());
754     hash = (37 * hash) + DISABLE_NO_SPEECH_RECOGNIZED_EVENT_FIELD_NUMBER;
755     hash =
756         (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDisableNoSpeechRecognizedEvent());
757     hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER;
758     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation());
759     hash = (29 * hash) + getUnknownFields().hashCode();
760     memoizedHashCode = hash;
761     return hash;
762   }
763 
parseFrom(java.nio.ByteBuffer data)764   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(java.nio.ByteBuffer data)
765       throws com.google.protobuf.InvalidProtocolBufferException {
766     return PARSER.parseFrom(data);
767   }
768 
parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)769   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
770       java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
771       throws com.google.protobuf.InvalidProtocolBufferException {
772     return PARSER.parseFrom(data, extensionRegistry);
773   }
774 
parseFrom( com.google.protobuf.ByteString data)775   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
776       com.google.protobuf.ByteString data)
777       throws com.google.protobuf.InvalidProtocolBufferException {
778     return PARSER.parseFrom(data);
779   }
780 
parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)781   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
782       com.google.protobuf.ByteString data,
783       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
784       throws com.google.protobuf.InvalidProtocolBufferException {
785     return PARSER.parseFrom(data, extensionRegistry);
786   }
787 
parseFrom(byte[] data)788   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(byte[] data)
789       throws com.google.protobuf.InvalidProtocolBufferException {
790     return PARSER.parseFrom(data);
791   }
792 
parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)793   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
794       byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
795       throws com.google.protobuf.InvalidProtocolBufferException {
796     return PARSER.parseFrom(data, extensionRegistry);
797   }
798 
parseFrom(java.io.InputStream input)799   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(java.io.InputStream input)
800       throws java.io.IOException {
801     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
802   }
803 
parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)804   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
805       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
806       throws java.io.IOException {
807     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
808         PARSER, input, extensionRegistry);
809   }
810 
parseDelimitedFrom( java.io.InputStream input)811   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseDelimitedFrom(
812       java.io.InputStream input) throws java.io.IOException {
813     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
814   }
815 
parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)816   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseDelimitedFrom(
817       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
818       throws java.io.IOException {
819     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
820         PARSER, input, extensionRegistry);
821   }
822 
parseFrom( com.google.protobuf.CodedInputStream input)823   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
824       com.google.protobuf.CodedInputStream input) throws java.io.IOException {
825     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
826   }
827 
parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)828   public static com.google.cloud.dialogflow.v2.InputAudioConfig parseFrom(
829       com.google.protobuf.CodedInputStream input,
830       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
831       throws java.io.IOException {
832     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
833         PARSER, input, extensionRegistry);
834   }
835 
836   @java.lang.Override
newBuilderForType()837   public Builder newBuilderForType() {
838     return newBuilder();
839   }
840 
newBuilder()841   public static Builder newBuilder() {
842     return DEFAULT_INSTANCE.toBuilder();
843   }
844 
newBuilder(com.google.cloud.dialogflow.v2.InputAudioConfig prototype)845   public static Builder newBuilder(com.google.cloud.dialogflow.v2.InputAudioConfig prototype) {
846     return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
847   }
848 
849   @java.lang.Override
toBuilder()850   public Builder toBuilder() {
851     return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
852   }
853 
854   @java.lang.Override
newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)855   protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
856     Builder builder = new Builder(parent);
857     return builder;
858   }
859   /**
860    *
861    *
862    * <pre>
863    * Instructs the speech recognizer how to process the audio content.
864    * </pre>
865    *
866    * Protobuf type {@code google.cloud.dialogflow.v2.InputAudioConfig}
867    */
868   public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
869       implements
870       // @@protoc_insertion_point(builder_implements:google.cloud.dialogflow.v2.InputAudioConfig)
871       com.google.cloud.dialogflow.v2.InputAudioConfigOrBuilder {
getDescriptor()872     public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
873       return com.google.cloud.dialogflow.v2.AudioConfigProto
874           .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_descriptor;
875     }
876 
877     @java.lang.Override
878     protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()879         internalGetFieldAccessorTable() {
880       return com.google.cloud.dialogflow.v2.AudioConfigProto
881           .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_fieldAccessorTable
882           .ensureFieldAccessorsInitialized(
883               com.google.cloud.dialogflow.v2.InputAudioConfig.class,
884               com.google.cloud.dialogflow.v2.InputAudioConfig.Builder.class);
885     }
886 
887     // Construct using com.google.cloud.dialogflow.v2.InputAudioConfig.newBuilder()
Builder()888     private Builder() {}
889 
Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)890     private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
891       super(parent);
892     }
893 
894     @java.lang.Override
clear()895     public Builder clear() {
896       super.clear();
897       bitField0_ = 0;
898       audioEncoding_ = 0;
899       sampleRateHertz_ = 0;
900       languageCode_ = "";
901       enableWordInfo_ = false;
902       phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
903       bitField0_ = (bitField0_ & ~0x00000010);
904       if (speechContextsBuilder_ == null) {
905         speechContexts_ = java.util.Collections.emptyList();
906       } else {
907         speechContexts_ = null;
908         speechContextsBuilder_.clear();
909       }
910       bitField0_ = (bitField0_ & ~0x00000020);
911       model_ = "";
912       modelVariant_ = 0;
913       singleUtterance_ = false;
914       disableNoSpeechRecognizedEvent_ = false;
915       enableAutomaticPunctuation_ = false;
916       return this;
917     }
918 
919     @java.lang.Override
getDescriptorForType()920     public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
921       return com.google.cloud.dialogflow.v2.AudioConfigProto
922           .internal_static_google_cloud_dialogflow_v2_InputAudioConfig_descriptor;
923     }
924 
925     @java.lang.Override
getDefaultInstanceForType()926     public com.google.cloud.dialogflow.v2.InputAudioConfig getDefaultInstanceForType() {
927       return com.google.cloud.dialogflow.v2.InputAudioConfig.getDefaultInstance();
928     }
929 
930     @java.lang.Override
build()931     public com.google.cloud.dialogflow.v2.InputAudioConfig build() {
932       com.google.cloud.dialogflow.v2.InputAudioConfig result = buildPartial();
933       if (!result.isInitialized()) {
934         throw newUninitializedMessageException(result);
935       }
936       return result;
937     }
938 
939     @java.lang.Override
buildPartial()940     public com.google.cloud.dialogflow.v2.InputAudioConfig buildPartial() {
941       com.google.cloud.dialogflow.v2.InputAudioConfig result =
942           new com.google.cloud.dialogflow.v2.InputAudioConfig(this);
943       buildPartialRepeatedFields(result);
944       if (bitField0_ != 0) {
945         buildPartial0(result);
946       }
947       onBuilt();
948       return result;
949     }
950 
buildPartialRepeatedFields( com.google.cloud.dialogflow.v2.InputAudioConfig result)951     private void buildPartialRepeatedFields(
952         com.google.cloud.dialogflow.v2.InputAudioConfig result) {
953       if (((bitField0_ & 0x00000010) != 0)) {
954         phraseHints_ = phraseHints_.getUnmodifiableView();
955         bitField0_ = (bitField0_ & ~0x00000010);
956       }
957       result.phraseHints_ = phraseHints_;
958       if (speechContextsBuilder_ == null) {
959         if (((bitField0_ & 0x00000020) != 0)) {
960           speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_);
961           bitField0_ = (bitField0_ & ~0x00000020);
962         }
963         result.speechContexts_ = speechContexts_;
964       } else {
965         result.speechContexts_ = speechContextsBuilder_.build();
966       }
967     }
968 
buildPartial0(com.google.cloud.dialogflow.v2.InputAudioConfig result)969     private void buildPartial0(com.google.cloud.dialogflow.v2.InputAudioConfig result) {
970       int from_bitField0_ = bitField0_;
971       if (((from_bitField0_ & 0x00000001) != 0)) {
972         result.audioEncoding_ = audioEncoding_;
973       }
974       if (((from_bitField0_ & 0x00000002) != 0)) {
975         result.sampleRateHertz_ = sampleRateHertz_;
976       }
977       if (((from_bitField0_ & 0x00000004) != 0)) {
978         result.languageCode_ = languageCode_;
979       }
980       if (((from_bitField0_ & 0x00000008) != 0)) {
981         result.enableWordInfo_ = enableWordInfo_;
982       }
983       if (((from_bitField0_ & 0x00000040) != 0)) {
984         result.model_ = model_;
985       }
986       if (((from_bitField0_ & 0x00000080) != 0)) {
987         result.modelVariant_ = modelVariant_;
988       }
989       if (((from_bitField0_ & 0x00000100) != 0)) {
990         result.singleUtterance_ = singleUtterance_;
991       }
992       if (((from_bitField0_ & 0x00000200) != 0)) {
993         result.disableNoSpeechRecognizedEvent_ = disableNoSpeechRecognizedEvent_;
994       }
995       if (((from_bitField0_ & 0x00000400) != 0)) {
996         result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_;
997       }
998     }
999 
1000     @java.lang.Override
clone()1001     public Builder clone() {
1002       return super.clone();
1003     }
1004 
1005     @java.lang.Override
setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1006     public Builder setField(
1007         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1008       return super.setField(field, value);
1009     }
1010 
1011     @java.lang.Override
clearField(com.google.protobuf.Descriptors.FieldDescriptor field)1012     public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
1013       return super.clearField(field);
1014     }
1015 
1016     @java.lang.Override
clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)1017     public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
1018       return super.clearOneof(oneof);
1019     }
1020 
1021     @java.lang.Override
setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value)1022     public Builder setRepeatedField(
1023         com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
1024       return super.setRepeatedField(field, index, value);
1025     }
1026 
1027     @java.lang.Override
addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1028     public Builder addRepeatedField(
1029         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1030       return super.addRepeatedField(field, value);
1031     }
1032 
1033     @java.lang.Override
mergeFrom(com.google.protobuf.Message other)1034     public Builder mergeFrom(com.google.protobuf.Message other) {
1035       if (other instanceof com.google.cloud.dialogflow.v2.InputAudioConfig) {
1036         return mergeFrom((com.google.cloud.dialogflow.v2.InputAudioConfig) other);
1037       } else {
1038         super.mergeFrom(other);
1039         return this;
1040       }
1041     }
1042 
mergeFrom(com.google.cloud.dialogflow.v2.InputAudioConfig other)1043     public Builder mergeFrom(com.google.cloud.dialogflow.v2.InputAudioConfig other) {
1044       if (other == com.google.cloud.dialogflow.v2.InputAudioConfig.getDefaultInstance())
1045         return this;
1046       if (other.audioEncoding_ != 0) {
1047         setAudioEncodingValue(other.getAudioEncodingValue());
1048       }
1049       if (other.getSampleRateHertz() != 0) {
1050         setSampleRateHertz(other.getSampleRateHertz());
1051       }
1052       if (!other.getLanguageCode().isEmpty()) {
1053         languageCode_ = other.languageCode_;
1054         bitField0_ |= 0x00000004;
1055         onChanged();
1056       }
1057       if (other.getEnableWordInfo() != false) {
1058         setEnableWordInfo(other.getEnableWordInfo());
1059       }
1060       if (!other.phraseHints_.isEmpty()) {
1061         if (phraseHints_.isEmpty()) {
1062           phraseHints_ = other.phraseHints_;
1063           bitField0_ = (bitField0_ & ~0x00000010);
1064         } else {
1065           ensurePhraseHintsIsMutable();
1066           phraseHints_.addAll(other.phraseHints_);
1067         }
1068         onChanged();
1069       }
1070       if (speechContextsBuilder_ == null) {
1071         if (!other.speechContexts_.isEmpty()) {
1072           if (speechContexts_.isEmpty()) {
1073             speechContexts_ = other.speechContexts_;
1074             bitField0_ = (bitField0_ & ~0x00000020);
1075           } else {
1076             ensureSpeechContextsIsMutable();
1077             speechContexts_.addAll(other.speechContexts_);
1078           }
1079           onChanged();
1080         }
1081       } else {
1082         if (!other.speechContexts_.isEmpty()) {
1083           if (speechContextsBuilder_.isEmpty()) {
1084             speechContextsBuilder_.dispose();
1085             speechContextsBuilder_ = null;
1086             speechContexts_ = other.speechContexts_;
1087             bitField0_ = (bitField0_ & ~0x00000020);
1088             speechContextsBuilder_ =
1089                 com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
1090                     ? getSpeechContextsFieldBuilder()
1091                     : null;
1092           } else {
1093             speechContextsBuilder_.addAllMessages(other.speechContexts_);
1094           }
1095         }
1096       }
1097       if (!other.getModel().isEmpty()) {
1098         model_ = other.model_;
1099         bitField0_ |= 0x00000040;
1100         onChanged();
1101       }
1102       if (other.modelVariant_ != 0) {
1103         setModelVariantValue(other.getModelVariantValue());
1104       }
1105       if (other.getSingleUtterance() != false) {
1106         setSingleUtterance(other.getSingleUtterance());
1107       }
1108       if (other.getDisableNoSpeechRecognizedEvent() != false) {
1109         setDisableNoSpeechRecognizedEvent(other.getDisableNoSpeechRecognizedEvent());
1110       }
1111       if (other.getEnableAutomaticPunctuation() != false) {
1112         setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation());
1113       }
1114       this.mergeUnknownFields(other.getUnknownFields());
1115       onChanged();
1116       return this;
1117     }
1118 
1119     @java.lang.Override
isInitialized()1120     public final boolean isInitialized() {
1121       return true;
1122     }
1123 
1124     @java.lang.Override
mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1125     public Builder mergeFrom(
1126         com.google.protobuf.CodedInputStream input,
1127         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1128         throws java.io.IOException {
1129       if (extensionRegistry == null) {
1130         throw new java.lang.NullPointerException();
1131       }
1132       try {
1133         boolean done = false;
1134         while (!done) {
1135           int tag = input.readTag();
1136           switch (tag) {
1137             case 0:
1138               done = true;
1139               break;
1140             case 8:
1141               {
1142                 audioEncoding_ = input.readEnum();
1143                 bitField0_ |= 0x00000001;
1144                 break;
1145               } // case 8
1146             case 16:
1147               {
1148                 sampleRateHertz_ = input.readInt32();
1149                 bitField0_ |= 0x00000002;
1150                 break;
1151               } // case 16
1152             case 26:
1153               {
1154                 languageCode_ = input.readStringRequireUtf8();
1155                 bitField0_ |= 0x00000004;
1156                 break;
1157               } // case 26
1158             case 34:
1159               {
1160                 java.lang.String s = input.readStringRequireUtf8();
1161                 ensurePhraseHintsIsMutable();
1162                 phraseHints_.add(s);
1163                 break;
1164               } // case 34
1165             case 58:
1166               {
1167                 model_ = input.readStringRequireUtf8();
1168                 bitField0_ |= 0x00000040;
1169                 break;
1170               } // case 58
1171             case 64:
1172               {
1173                 singleUtterance_ = input.readBool();
1174                 bitField0_ |= 0x00000100;
1175                 break;
1176               } // case 64
1177             case 80:
1178               {
1179                 modelVariant_ = input.readEnum();
1180                 bitField0_ |= 0x00000080;
1181                 break;
1182               } // case 80
1183             case 90:
1184               {
1185                 com.google.cloud.dialogflow.v2.SpeechContext m =
1186                     input.readMessage(
1187                         com.google.cloud.dialogflow.v2.SpeechContext.parser(), extensionRegistry);
1188                 if (speechContextsBuilder_ == null) {
1189                   ensureSpeechContextsIsMutable();
1190                   speechContexts_.add(m);
1191                 } else {
1192                   speechContextsBuilder_.addMessage(m);
1193                 }
1194                 break;
1195               } // case 90
1196             case 104:
1197               {
1198                 enableWordInfo_ = input.readBool();
1199                 bitField0_ |= 0x00000008;
1200                 break;
1201               } // case 104
1202             case 112:
1203               {
1204                 disableNoSpeechRecognizedEvent_ = input.readBool();
1205                 bitField0_ |= 0x00000200;
1206                 break;
1207               } // case 112
1208             case 136:
1209               {
1210                 enableAutomaticPunctuation_ = input.readBool();
1211                 bitField0_ |= 0x00000400;
1212                 break;
1213               } // case 136
1214             default:
1215               {
1216                 if (!super.parseUnknownField(input, extensionRegistry, tag)) {
1217                   done = true; // was an endgroup tag
1218                 }
1219                 break;
1220               } // default:
1221           } // switch (tag)
1222         } // while (!done)
1223       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1224         throw e.unwrapIOException();
1225       } finally {
1226         onChanged();
1227       } // finally
1228       return this;
1229     }
1230 
1231     private int bitField0_;
1232 
1233     private int audioEncoding_ = 0;
1234     /**
1235      *
1236      *
1237      * <pre>
1238      * Required. Audio encoding of the audio content to process.
1239      * </pre>
1240      *
1241      * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
1242      *
1243      * @return The enum numeric value on the wire for audioEncoding.
1244      */
1245     @java.lang.Override
getAudioEncodingValue()1246     public int getAudioEncodingValue() {
1247       return audioEncoding_;
1248     }
1249     /**
1250      *
1251      *
1252      * <pre>
1253      * Required. Audio encoding of the audio content to process.
1254      * </pre>
1255      *
1256      * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
1257      *
1258      * @param value The enum numeric value on the wire for audioEncoding to set.
1259      * @return This builder for chaining.
1260      */
setAudioEncodingValue(int value)1261     public Builder setAudioEncodingValue(int value) {
1262       audioEncoding_ = value;
1263       bitField0_ |= 0x00000001;
1264       onChanged();
1265       return this;
1266     }
1267     /**
1268      *
1269      *
1270      * <pre>
1271      * Required. Audio encoding of the audio content to process.
1272      * </pre>
1273      *
1274      * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
1275      *
1276      * @return The audioEncoding.
1277      */
1278     @java.lang.Override
getAudioEncoding()1279     public com.google.cloud.dialogflow.v2.AudioEncoding getAudioEncoding() {
1280       com.google.cloud.dialogflow.v2.AudioEncoding result =
1281           com.google.cloud.dialogflow.v2.AudioEncoding.forNumber(audioEncoding_);
1282       return result == null ? com.google.cloud.dialogflow.v2.AudioEncoding.UNRECOGNIZED : result;
1283     }
1284     /**
1285      *
1286      *
1287      * <pre>
1288      * Required. Audio encoding of the audio content to process.
1289      * </pre>
1290      *
1291      * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
1292      *
1293      * @param value The audioEncoding to set.
1294      * @return This builder for chaining.
1295      */
setAudioEncoding(com.google.cloud.dialogflow.v2.AudioEncoding value)1296     public Builder setAudioEncoding(com.google.cloud.dialogflow.v2.AudioEncoding value) {
1297       if (value == null) {
1298         throw new NullPointerException();
1299       }
1300       bitField0_ |= 0x00000001;
1301       audioEncoding_ = value.getNumber();
1302       onChanged();
1303       return this;
1304     }
1305     /**
1306      *
1307      *
1308      * <pre>
1309      * Required. Audio encoding of the audio content to process.
1310      * </pre>
1311      *
1312      * <code>.google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 1;</code>
1313      *
1314      * @return This builder for chaining.
1315      */
clearAudioEncoding()1316     public Builder clearAudioEncoding() {
1317       bitField0_ = (bitField0_ & ~0x00000001);
1318       audioEncoding_ = 0;
1319       onChanged();
1320       return this;
1321     }
1322 
1323     private int sampleRateHertz_;
1324     /**
1325      *
1326      *
1327      * <pre>
1328      * Required. Sample rate (in Hertz) of the audio content sent in the query.
1329      * Refer to
1330      * [Cloud Speech API
1331      * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
1332      * more details.
1333      * </pre>
1334      *
1335      * <code>int32 sample_rate_hertz = 2;</code>
1336      *
1337      * @return The sampleRateHertz.
1338      */
1339     @java.lang.Override
getSampleRateHertz()1340     public int getSampleRateHertz() {
1341       return sampleRateHertz_;
1342     }
1343     /**
1344      *
1345      *
1346      * <pre>
1347      * Required. Sample rate (in Hertz) of the audio content sent in the query.
1348      * Refer to
1349      * [Cloud Speech API
1350      * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
1351      * more details.
1352      * </pre>
1353      *
1354      * <code>int32 sample_rate_hertz = 2;</code>
1355      *
1356      * @param value The sampleRateHertz to set.
1357      * @return This builder for chaining.
1358      */
setSampleRateHertz(int value)1359     public Builder setSampleRateHertz(int value) {
1360 
1361       sampleRateHertz_ = value;
1362       bitField0_ |= 0x00000002;
1363       onChanged();
1364       return this;
1365     }
1366     /**
1367      *
1368      *
1369      * <pre>
1370      * Required. Sample rate (in Hertz) of the audio content sent in the query.
1371      * Refer to
1372      * [Cloud Speech API
1373      * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
1374      * more details.
1375      * </pre>
1376      *
1377      * <code>int32 sample_rate_hertz = 2;</code>
1378      *
1379      * @return This builder for chaining.
1380      */
clearSampleRateHertz()1381     public Builder clearSampleRateHertz() {
1382       bitField0_ = (bitField0_ & ~0x00000002);
1383       sampleRateHertz_ = 0;
1384       onChanged();
1385       return this;
1386     }
1387 
1388     private java.lang.Object languageCode_ = "";
1389     /**
1390      *
1391      *
1392      * <pre>
1393      * Required. The language of the supplied audio. Dialogflow does not do
1394      * translations. See [Language
1395      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1396      * for a list of the currently supported language codes. Note that queries in
1397      * the same session do not necessarily need to specify the same language.
1398      * </pre>
1399      *
1400      * <code>string language_code = 3;</code>
1401      *
1402      * @return The languageCode.
1403      */
getLanguageCode()1404     public java.lang.String getLanguageCode() {
1405       java.lang.Object ref = languageCode_;
1406       if (!(ref instanceof java.lang.String)) {
1407         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
1408         java.lang.String s = bs.toStringUtf8();
1409         languageCode_ = s;
1410         return s;
1411       } else {
1412         return (java.lang.String) ref;
1413       }
1414     }
1415     /**
1416      *
1417      *
1418      * <pre>
1419      * Required. The language of the supplied audio. Dialogflow does not do
1420      * translations. See [Language
1421      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1422      * for a list of the currently supported language codes. Note that queries in
1423      * the same session do not necessarily need to specify the same language.
1424      * </pre>
1425      *
1426      * <code>string language_code = 3;</code>
1427      *
1428      * @return The bytes for languageCode.
1429      */
getLanguageCodeBytes()1430     public com.google.protobuf.ByteString getLanguageCodeBytes() {
1431       java.lang.Object ref = languageCode_;
1432       if (ref instanceof String) {
1433         com.google.protobuf.ByteString b =
1434             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
1435         languageCode_ = b;
1436         return b;
1437       } else {
1438         return (com.google.protobuf.ByteString) ref;
1439       }
1440     }
1441     /**
1442      *
1443      *
1444      * <pre>
1445      * Required. The language of the supplied audio. Dialogflow does not do
1446      * translations. See [Language
1447      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1448      * for a list of the currently supported language codes. Note that queries in
1449      * the same session do not necessarily need to specify the same language.
1450      * </pre>
1451      *
1452      * <code>string language_code = 3;</code>
1453      *
1454      * @param value The languageCode to set.
1455      * @return This builder for chaining.
1456      */
setLanguageCode(java.lang.String value)1457     public Builder setLanguageCode(java.lang.String value) {
1458       if (value == null) {
1459         throw new NullPointerException();
1460       }
1461       languageCode_ = value;
1462       bitField0_ |= 0x00000004;
1463       onChanged();
1464       return this;
1465     }
1466     /**
1467      *
1468      *
1469      * <pre>
1470      * Required. The language of the supplied audio. Dialogflow does not do
1471      * translations. See [Language
1472      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1473      * for a list of the currently supported language codes. Note that queries in
1474      * the same session do not necessarily need to specify the same language.
1475      * </pre>
1476      *
1477      * <code>string language_code = 3;</code>
1478      *
1479      * @return This builder for chaining.
1480      */
clearLanguageCode()1481     public Builder clearLanguageCode() {
1482       languageCode_ = getDefaultInstance().getLanguageCode();
1483       bitField0_ = (bitField0_ & ~0x00000004);
1484       onChanged();
1485       return this;
1486     }
1487     /**
1488      *
1489      *
1490      * <pre>
1491      * Required. The language of the supplied audio. Dialogflow does not do
1492      * translations. See [Language
1493      * Support](https://cloud.google.com/dialogflow/docs/reference/language)
1494      * for a list of the currently supported language codes. Note that queries in
1495      * the same session do not necessarily need to specify the same language.
1496      * </pre>
1497      *
1498      * <code>string language_code = 3;</code>
1499      *
1500      * @param value The bytes for languageCode to set.
1501      * @return This builder for chaining.
1502      */
setLanguageCodeBytes(com.google.protobuf.ByteString value)1503     public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) {
1504       if (value == null) {
1505         throw new NullPointerException();
1506       }
1507       checkByteStringIsUtf8(value);
1508       languageCode_ = value;
1509       bitField0_ |= 0x00000004;
1510       onChanged();
1511       return this;
1512     }
1513 
1514     private boolean enableWordInfo_;
1515     /**
1516      *
1517      *
1518      * <pre>
1519      * If `true`, Dialogflow returns
1520      * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
1521      * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
1522      * with information about the recognized speech words, e.g. start and end time
1523      * offsets. If false or unspecified, Speech doesn't return any word-level
1524      * information.
1525      * </pre>
1526      *
1527      * <code>bool enable_word_info = 13;</code>
1528      *
1529      * @return The enableWordInfo.
1530      */
1531     @java.lang.Override
getEnableWordInfo()1532     public boolean getEnableWordInfo() {
1533       return enableWordInfo_;
1534     }
1535     /**
1536      *
1537      *
1538      * <pre>
1539      * If `true`, Dialogflow returns
1540      * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
1541      * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
1542      * with information about the recognized speech words, e.g. start and end time
1543      * offsets. If false or unspecified, Speech doesn't return any word-level
1544      * information.
1545      * </pre>
1546      *
1547      * <code>bool enable_word_info = 13;</code>
1548      *
1549      * @param value The enableWordInfo to set.
1550      * @return This builder for chaining.
1551      */
setEnableWordInfo(boolean value)1552     public Builder setEnableWordInfo(boolean value) {
1553 
1554       enableWordInfo_ = value;
1555       bitField0_ |= 0x00000008;
1556       onChanged();
1557       return this;
1558     }
1559     /**
1560      *
1561      *
1562      * <pre>
1563      * If `true`, Dialogflow returns
1564      * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
1565      * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
1566      * with information about the recognized speech words, e.g. start and end time
1567      * offsets. If false or unspecified, Speech doesn't return any word-level
1568      * information.
1569      * </pre>
1570      *
1571      * <code>bool enable_word_info = 13;</code>
1572      *
1573      * @return This builder for chaining.
1574      */
clearEnableWordInfo()1575     public Builder clearEnableWordInfo() {
1576       bitField0_ = (bitField0_ & ~0x00000008);
1577       enableWordInfo_ = false;
1578       onChanged();
1579       return this;
1580     }
1581 
1582     private com.google.protobuf.LazyStringList phraseHints_ =
1583         com.google.protobuf.LazyStringArrayList.EMPTY;
1584 
ensurePhraseHintsIsMutable()1585     private void ensurePhraseHintsIsMutable() {
1586       if (!((bitField0_ & 0x00000010) != 0)) {
1587         phraseHints_ = new com.google.protobuf.LazyStringArrayList(phraseHints_);
1588         bitField0_ |= 0x00000010;
1589       }
1590     }
1591     /**
1592      *
1593      *
1594      * <pre>
1595      * A list of strings containing words and phrases that the speech
1596      * recognizer should recognize with higher likelihood.
1597      * See [the Cloud Speech
1598      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1599      * for more details.
1600      * This field is deprecated. Please use [speech_contexts]() instead. If you
1601      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1602      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1603      * </pre>
1604      *
1605      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1606      *
1607      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1608      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1609      * @return A list containing the phraseHints.
1610      */
1611     @java.lang.Deprecated
getPhraseHintsList()1612     public com.google.protobuf.ProtocolStringList getPhraseHintsList() {
1613       return phraseHints_.getUnmodifiableView();
1614     }
1615     /**
1616      *
1617      *
1618      * <pre>
1619      * A list of strings containing words and phrases that the speech
1620      * recognizer should recognize with higher likelihood.
1621      * See [the Cloud Speech
1622      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1623      * for more details.
1624      * This field is deprecated. Please use [speech_contexts]() instead. If you
1625      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1626      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1627      * </pre>
1628      *
1629      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1630      *
1631      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1632      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1633      * @return The count of phraseHints.
1634      */
1635     @java.lang.Deprecated
getPhraseHintsCount()1636     public int getPhraseHintsCount() {
1637       return phraseHints_.size();
1638     }
1639     /**
1640      *
1641      *
1642      * <pre>
1643      * A list of strings containing words and phrases that the speech
1644      * recognizer should recognize with higher likelihood.
1645      * See [the Cloud Speech
1646      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1647      * for more details.
1648      * This field is deprecated. Please use [speech_contexts]() instead. If you
1649      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1650      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1651      * </pre>
1652      *
1653      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1654      *
1655      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1656      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1657      * @param index The index of the element to return.
1658      * @return The phraseHints at the given index.
1659      */
1660     @java.lang.Deprecated
getPhraseHints(int index)1661     public java.lang.String getPhraseHints(int index) {
1662       return phraseHints_.get(index);
1663     }
1664     /**
1665      *
1666      *
1667      * <pre>
1668      * A list of strings containing words and phrases that the speech
1669      * recognizer should recognize with higher likelihood.
1670      * See [the Cloud Speech
1671      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1672      * for more details.
1673      * This field is deprecated. Please use [speech_contexts]() instead. If you
1674      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1675      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1676      * </pre>
1677      *
1678      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1679      *
1680      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1681      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1682      * @param index The index of the value to return.
1683      * @return The bytes of the phraseHints at the given index.
1684      */
1685     @java.lang.Deprecated
getPhraseHintsBytes(int index)1686     public com.google.protobuf.ByteString getPhraseHintsBytes(int index) {
1687       return phraseHints_.getByteString(index);
1688     }
1689     /**
1690      *
1691      *
1692      * <pre>
1693      * A list of strings containing words and phrases that the speech
1694      * recognizer should recognize with higher likelihood.
1695      * See [the Cloud Speech
1696      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1697      * for more details.
1698      * This field is deprecated. Please use [speech_contexts]() instead. If you
1699      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1700      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1701      * </pre>
1702      *
1703      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1704      *
1705      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1706      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1707      * @param index The index to set the value at.
1708      * @param value The phraseHints to set.
1709      * @return This builder for chaining.
1710      */
1711     @java.lang.Deprecated
setPhraseHints(int index, java.lang.String value)1712     public Builder setPhraseHints(int index, java.lang.String value) {
1713       if (value == null) {
1714         throw new NullPointerException();
1715       }
1716       ensurePhraseHintsIsMutable();
1717       phraseHints_.set(index, value);
1718       onChanged();
1719       return this;
1720     }
1721     /**
1722      *
1723      *
1724      * <pre>
1725      * A list of strings containing words and phrases that the speech
1726      * recognizer should recognize with higher likelihood.
1727      * See [the Cloud Speech
1728      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1729      * for more details.
1730      * This field is deprecated. Please use [speech_contexts]() instead. If you
1731      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1732      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1733      * </pre>
1734      *
1735      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1736      *
1737      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1738      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1739      * @param value The phraseHints to add.
1740      * @return This builder for chaining.
1741      */
1742     @java.lang.Deprecated
addPhraseHints(java.lang.String value)1743     public Builder addPhraseHints(java.lang.String value) {
1744       if (value == null) {
1745         throw new NullPointerException();
1746       }
1747       ensurePhraseHintsIsMutable();
1748       phraseHints_.add(value);
1749       onChanged();
1750       return this;
1751     }
1752     /**
1753      *
1754      *
1755      * <pre>
1756      * A list of strings containing words and phrases that the speech
1757      * recognizer should recognize with higher likelihood.
1758      * See [the Cloud Speech
1759      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1760      * for more details.
1761      * This field is deprecated. Please use [speech_contexts]() instead. If you
1762      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1763      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1764      * </pre>
1765      *
1766      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1767      *
1768      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1769      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1770      * @param values The phraseHints to add.
1771      * @return This builder for chaining.
1772      */
1773     @java.lang.Deprecated
addAllPhraseHints(java.lang.Iterable<java.lang.String> values)1774     public Builder addAllPhraseHints(java.lang.Iterable<java.lang.String> values) {
1775       ensurePhraseHintsIsMutable();
1776       com.google.protobuf.AbstractMessageLite.Builder.addAll(values, phraseHints_);
1777       onChanged();
1778       return this;
1779     }
1780     /**
1781      *
1782      *
1783      * <pre>
1784      * A list of strings containing words and phrases that the speech
1785      * recognizer should recognize with higher likelihood.
1786      * See [the Cloud Speech
1787      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1788      * for more details.
1789      * This field is deprecated. Please use [speech_contexts]() instead. If you
1790      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1791      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1792      * </pre>
1793      *
1794      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1795      *
1796      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1797      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1798      * @return This builder for chaining.
1799      */
1800     @java.lang.Deprecated
clearPhraseHints()1801     public Builder clearPhraseHints() {
1802       phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
1803       bitField0_ = (bitField0_ & ~0x00000010);
1804       onChanged();
1805       return this;
1806     }
1807     /**
1808      *
1809      *
1810      * <pre>
1811      * A list of strings containing words and phrases that the speech
1812      * recognizer should recognize with higher likelihood.
1813      * See [the Cloud Speech
1814      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1815      * for more details.
1816      * This field is deprecated. Please use [speech_contexts]() instead. If you
1817      * specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
1818      * treat the [phrase_hints]() as a single additional [SpeechContext]().
1819      * </pre>
1820      *
1821      * <code>repeated string phrase_hints = 4 [deprecated = true];</code>
1822      *
1823      * @deprecated google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints is deprecated. See
1824      *     google/cloud/dialogflow/v2/audio_config.proto;l=223
1825      * @param value The bytes of the phraseHints to add.
1826      * @return This builder for chaining.
1827      */
1828     @java.lang.Deprecated
addPhraseHintsBytes(com.google.protobuf.ByteString value)1829     public Builder addPhraseHintsBytes(com.google.protobuf.ByteString value) {
1830       if (value == null) {
1831         throw new NullPointerException();
1832       }
1833       checkByteStringIsUtf8(value);
1834       ensurePhraseHintsIsMutable();
1835       phraseHints_.add(value);
1836       onChanged();
1837       return this;
1838     }
1839 
1840     private java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> speechContexts_ =
1841         java.util.Collections.emptyList();
1842 
ensureSpeechContextsIsMutable()1843     private void ensureSpeechContextsIsMutable() {
1844       if (!((bitField0_ & 0x00000020) != 0)) {
1845         speechContexts_ =
1846             new java.util.ArrayList<com.google.cloud.dialogflow.v2.SpeechContext>(speechContexts_);
1847         bitField0_ |= 0x00000020;
1848       }
1849     }
1850 
1851     private com.google.protobuf.RepeatedFieldBuilderV3<
1852             com.google.cloud.dialogflow.v2.SpeechContext,
1853             com.google.cloud.dialogflow.v2.SpeechContext.Builder,
1854             com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
1855         speechContextsBuilder_;
1856 
1857     /**
1858      *
1859      *
1860      * <pre>
1861      * Context information to assist speech recognition.
1862      * See [the Cloud Speech
1863      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1864      * for more details.
1865      * </pre>
1866      *
1867      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
1868      */
getSpeechContextsList()1869     public java.util.List<com.google.cloud.dialogflow.v2.SpeechContext> getSpeechContextsList() {
1870       if (speechContextsBuilder_ == null) {
1871         return java.util.Collections.unmodifiableList(speechContexts_);
1872       } else {
1873         return speechContextsBuilder_.getMessageList();
1874       }
1875     }
1876     /**
1877      *
1878      *
1879      * <pre>
1880      * Context information to assist speech recognition.
1881      * See [the Cloud Speech
1882      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1883      * for more details.
1884      * </pre>
1885      *
1886      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
1887      */
getSpeechContextsCount()1888     public int getSpeechContextsCount() {
1889       if (speechContextsBuilder_ == null) {
1890         return speechContexts_.size();
1891       } else {
1892         return speechContextsBuilder_.getCount();
1893       }
1894     }
1895     /**
1896      *
1897      *
1898      * <pre>
1899      * Context information to assist speech recognition.
1900      * See [the Cloud Speech
1901      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1902      * for more details.
1903      * </pre>
1904      *
1905      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
1906      */
getSpeechContexts(int index)1907     public com.google.cloud.dialogflow.v2.SpeechContext getSpeechContexts(int index) {
1908       if (speechContextsBuilder_ == null) {
1909         return speechContexts_.get(index);
1910       } else {
1911         return speechContextsBuilder_.getMessage(index);
1912       }
1913     }
1914     /**
1915      *
1916      *
1917      * <pre>
1918      * Context information to assist speech recognition.
1919      * See [the Cloud Speech
1920      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1921      * for more details.
1922      * </pre>
1923      *
1924      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
1925      */
setSpeechContexts( int index, com.google.cloud.dialogflow.v2.SpeechContext value)1926     public Builder setSpeechContexts(
1927         int index, com.google.cloud.dialogflow.v2.SpeechContext value) {
1928       if (speechContextsBuilder_ == null) {
1929         if (value == null) {
1930           throw new NullPointerException();
1931         }
1932         ensureSpeechContextsIsMutable();
1933         speechContexts_.set(index, value);
1934         onChanged();
1935       } else {
1936         speechContextsBuilder_.setMessage(index, value);
1937       }
1938       return this;
1939     }
1940     /**
1941      *
1942      *
1943      * <pre>
1944      * Context information to assist speech recognition.
1945      * See [the Cloud Speech
1946      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1947      * for more details.
1948      * </pre>
1949      *
1950      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
1951      */
setSpeechContexts( int index, com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue)1952     public Builder setSpeechContexts(
1953         int index, com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue) {
1954       if (speechContextsBuilder_ == null) {
1955         ensureSpeechContextsIsMutable();
1956         speechContexts_.set(index, builderForValue.build());
1957         onChanged();
1958       } else {
1959         speechContextsBuilder_.setMessage(index, builderForValue.build());
1960       }
1961       return this;
1962     }
1963     /**
1964      *
1965      *
1966      * <pre>
1967      * Context information to assist speech recognition.
1968      * See [the Cloud Speech
1969      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1970      * for more details.
1971      * </pre>
1972      *
1973      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
1974      */
addSpeechContexts(com.google.cloud.dialogflow.v2.SpeechContext value)1975     public Builder addSpeechContexts(com.google.cloud.dialogflow.v2.SpeechContext value) {
1976       if (speechContextsBuilder_ == null) {
1977         if (value == null) {
1978           throw new NullPointerException();
1979         }
1980         ensureSpeechContextsIsMutable();
1981         speechContexts_.add(value);
1982         onChanged();
1983       } else {
1984         speechContextsBuilder_.addMessage(value);
1985       }
1986       return this;
1987     }
1988     /**
1989      *
1990      *
1991      * <pre>
1992      * Context information to assist speech recognition.
1993      * See [the Cloud Speech
1994      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
1995      * for more details.
1996      * </pre>
1997      *
1998      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
1999      */
addSpeechContexts( int index, com.google.cloud.dialogflow.v2.SpeechContext value)2000     public Builder addSpeechContexts(
2001         int index, com.google.cloud.dialogflow.v2.SpeechContext value) {
2002       if (speechContextsBuilder_ == null) {
2003         if (value == null) {
2004           throw new NullPointerException();
2005         }
2006         ensureSpeechContextsIsMutable();
2007         speechContexts_.add(index, value);
2008         onChanged();
2009       } else {
2010         speechContextsBuilder_.addMessage(index, value);
2011       }
2012       return this;
2013     }
2014     /**
2015      *
2016      *
2017      * <pre>
2018      * Context information to assist speech recognition.
2019      * See [the Cloud Speech
2020      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2021      * for more details.
2022      * </pre>
2023      *
2024      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2025      */
addSpeechContexts( com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue)2026     public Builder addSpeechContexts(
2027         com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue) {
2028       if (speechContextsBuilder_ == null) {
2029         ensureSpeechContextsIsMutable();
2030         speechContexts_.add(builderForValue.build());
2031         onChanged();
2032       } else {
2033         speechContextsBuilder_.addMessage(builderForValue.build());
2034       }
2035       return this;
2036     }
2037     /**
2038      *
2039      *
2040      * <pre>
2041      * Context information to assist speech recognition.
2042      * See [the Cloud Speech
2043      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2044      * for more details.
2045      * </pre>
2046      *
2047      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2048      */
addSpeechContexts( int index, com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue)2049     public Builder addSpeechContexts(
2050         int index, com.google.cloud.dialogflow.v2.SpeechContext.Builder builderForValue) {
2051       if (speechContextsBuilder_ == null) {
2052         ensureSpeechContextsIsMutable();
2053         speechContexts_.add(index, builderForValue.build());
2054         onChanged();
2055       } else {
2056         speechContextsBuilder_.addMessage(index, builderForValue.build());
2057       }
2058       return this;
2059     }
2060     /**
2061      *
2062      *
2063      * <pre>
2064      * Context information to assist speech recognition.
2065      * See [the Cloud Speech
2066      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2067      * for more details.
2068      * </pre>
2069      *
2070      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2071      */
addAllSpeechContexts( java.lang.Iterable<? extends com.google.cloud.dialogflow.v2.SpeechContext> values)2072     public Builder addAllSpeechContexts(
2073         java.lang.Iterable<? extends com.google.cloud.dialogflow.v2.SpeechContext> values) {
2074       if (speechContextsBuilder_ == null) {
2075         ensureSpeechContextsIsMutable();
2076         com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_);
2077         onChanged();
2078       } else {
2079         speechContextsBuilder_.addAllMessages(values);
2080       }
2081       return this;
2082     }
2083     /**
2084      *
2085      *
2086      * <pre>
2087      * Context information to assist speech recognition.
2088      * See [the Cloud Speech
2089      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2090      * for more details.
2091      * </pre>
2092      *
2093      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2094      */
clearSpeechContexts()2095     public Builder clearSpeechContexts() {
2096       if (speechContextsBuilder_ == null) {
2097         speechContexts_ = java.util.Collections.emptyList();
2098         bitField0_ = (bitField0_ & ~0x00000020);
2099         onChanged();
2100       } else {
2101         speechContextsBuilder_.clear();
2102       }
2103       return this;
2104     }
2105     /**
2106      *
2107      *
2108      * <pre>
2109      * Context information to assist speech recognition.
2110      * See [the Cloud Speech
2111      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2112      * for more details.
2113      * </pre>
2114      *
2115      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2116      */
removeSpeechContexts(int index)2117     public Builder removeSpeechContexts(int index) {
2118       if (speechContextsBuilder_ == null) {
2119         ensureSpeechContextsIsMutable();
2120         speechContexts_.remove(index);
2121         onChanged();
2122       } else {
2123         speechContextsBuilder_.remove(index);
2124       }
2125       return this;
2126     }
2127     /**
2128      *
2129      *
2130      * <pre>
2131      * Context information to assist speech recognition.
2132      * See [the Cloud Speech
2133      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2134      * for more details.
2135      * </pre>
2136      *
2137      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2138      */
getSpeechContextsBuilder( int index)2139     public com.google.cloud.dialogflow.v2.SpeechContext.Builder getSpeechContextsBuilder(
2140         int index) {
2141       return getSpeechContextsFieldBuilder().getBuilder(index);
2142     }
2143     /**
2144      *
2145      *
2146      * <pre>
2147      * Context information to assist speech recognition.
2148      * See [the Cloud Speech
2149      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2150      * for more details.
2151      * </pre>
2152      *
2153      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2154      */
getSpeechContextsOrBuilder( int index)2155     public com.google.cloud.dialogflow.v2.SpeechContextOrBuilder getSpeechContextsOrBuilder(
2156         int index) {
2157       if (speechContextsBuilder_ == null) {
2158         return speechContexts_.get(index);
2159       } else {
2160         return speechContextsBuilder_.getMessageOrBuilder(index);
2161       }
2162     }
2163     /**
2164      *
2165      *
2166      * <pre>
2167      * Context information to assist speech recognition.
2168      * See [the Cloud Speech
2169      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2170      * for more details.
2171      * </pre>
2172      *
2173      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2174      */
2175     public java.util.List<? extends com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
getSpeechContextsOrBuilderList()2176         getSpeechContextsOrBuilderList() {
2177       if (speechContextsBuilder_ != null) {
2178         return speechContextsBuilder_.getMessageOrBuilderList();
2179       } else {
2180         return java.util.Collections.unmodifiableList(speechContexts_);
2181       }
2182     }
2183     /**
2184      *
2185      *
2186      * <pre>
2187      * Context information to assist speech recognition.
2188      * See [the Cloud Speech
2189      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2190      * for more details.
2191      * </pre>
2192      *
2193      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2194      */
addSpeechContextsBuilder()2195     public com.google.cloud.dialogflow.v2.SpeechContext.Builder addSpeechContextsBuilder() {
2196       return getSpeechContextsFieldBuilder()
2197           .addBuilder(com.google.cloud.dialogflow.v2.SpeechContext.getDefaultInstance());
2198     }
2199     /**
2200      *
2201      *
2202      * <pre>
2203      * Context information to assist speech recognition.
2204      * See [the Cloud Speech
2205      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2206      * for more details.
2207      * </pre>
2208      *
2209      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2210      */
addSpeechContextsBuilder( int index)2211     public com.google.cloud.dialogflow.v2.SpeechContext.Builder addSpeechContextsBuilder(
2212         int index) {
2213       return getSpeechContextsFieldBuilder()
2214           .addBuilder(index, com.google.cloud.dialogflow.v2.SpeechContext.getDefaultInstance());
2215     }
2216     /**
2217      *
2218      *
2219      * <pre>
2220      * Context information to assist speech recognition.
2221      * See [the Cloud Speech
2222      * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
2223      * for more details.
2224      * </pre>
2225      *
2226      * <code>repeated .google.cloud.dialogflow.v2.SpeechContext speech_contexts = 11;</code>
2227      */
2228     public java.util.List<com.google.cloud.dialogflow.v2.SpeechContext.Builder>
getSpeechContextsBuilderList()2229         getSpeechContextsBuilderList() {
2230       return getSpeechContextsFieldBuilder().getBuilderList();
2231     }
2232 
2233     private com.google.protobuf.RepeatedFieldBuilderV3<
2234             com.google.cloud.dialogflow.v2.SpeechContext,
2235             com.google.cloud.dialogflow.v2.SpeechContext.Builder,
2236             com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>
getSpeechContextsFieldBuilder()2237         getSpeechContextsFieldBuilder() {
2238       if (speechContextsBuilder_ == null) {
2239         speechContextsBuilder_ =
2240             new com.google.protobuf.RepeatedFieldBuilderV3<
2241                 com.google.cloud.dialogflow.v2.SpeechContext,
2242                 com.google.cloud.dialogflow.v2.SpeechContext.Builder,
2243                 com.google.cloud.dialogflow.v2.SpeechContextOrBuilder>(
2244                 speechContexts_,
2245                 ((bitField0_ & 0x00000020) != 0),
2246                 getParentForChildren(),
2247                 isClean());
2248         speechContexts_ = null;
2249       }
2250       return speechContextsBuilder_;
2251     }
2252 
2253     private java.lang.Object model_ = "";
2254     /**
2255      *
2256      *
2257      * <pre>
2258      * Which Speech model to select for the given request. Select the
2259      * model best suited to your domain to get best results. If a model is not
2260      * explicitly specified, then we auto-select a model based on the parameters
2261      * in the InputAudioConfig.
2262      * If enhanced speech model is enabled for the agent and an enhanced
2263      * version of the specified model for the language does not exist, then the
2264      * speech is recognized using the standard version of the specified model.
2265      * Refer to
2266      * [Cloud Speech API
2267      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2268      * for more details.
2269      * If you specify a model, the following models typically have the best
2270      * performance:
2271      * - phone_call (best for Agent Assist and telephony)
2272      * - latest_short (best for Dialogflow non-telephony)
2273      * - command_and_search (best for very short utterances and commands)
2274      * </pre>
2275      *
2276      * <code>string model = 7;</code>
2277      *
2278      * @return The model.
2279      */
getModel()2280     public java.lang.String getModel() {
2281       java.lang.Object ref = model_;
2282       if (!(ref instanceof java.lang.String)) {
2283         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
2284         java.lang.String s = bs.toStringUtf8();
2285         model_ = s;
2286         return s;
2287       } else {
2288         return (java.lang.String) ref;
2289       }
2290     }
2291     /**
2292      *
2293      *
2294      * <pre>
2295      * Which Speech model to select for the given request. Select the
2296      * model best suited to your domain to get best results. If a model is not
2297      * explicitly specified, then we auto-select a model based on the parameters
2298      * in the InputAudioConfig.
2299      * If enhanced speech model is enabled for the agent and an enhanced
2300      * version of the specified model for the language does not exist, then the
2301      * speech is recognized using the standard version of the specified model.
2302      * Refer to
2303      * [Cloud Speech API
2304      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2305      * for more details.
2306      * If you specify a model, the following models typically have the best
2307      * performance:
2308      * - phone_call (best for Agent Assist and telephony)
2309      * - latest_short (best for Dialogflow non-telephony)
2310      * - command_and_search (best for very short utterances and commands)
2311      * </pre>
2312      *
2313      * <code>string model = 7;</code>
2314      *
2315      * @return The bytes for model.
2316      */
getModelBytes()2317     public com.google.protobuf.ByteString getModelBytes() {
2318       java.lang.Object ref = model_;
2319       if (ref instanceof String) {
2320         com.google.protobuf.ByteString b =
2321             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
2322         model_ = b;
2323         return b;
2324       } else {
2325         return (com.google.protobuf.ByteString) ref;
2326       }
2327     }
2328     /**
2329      *
2330      *
2331      * <pre>
2332      * Which Speech model to select for the given request. Select the
2333      * model best suited to your domain to get best results. If a model is not
2334      * explicitly specified, then we auto-select a model based on the parameters
2335      * in the InputAudioConfig.
2336      * If enhanced speech model is enabled for the agent and an enhanced
2337      * version of the specified model for the language does not exist, then the
2338      * speech is recognized using the standard version of the specified model.
2339      * Refer to
2340      * [Cloud Speech API
2341      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2342      * for more details.
2343      * If you specify a model, the following models typically have the best
2344      * performance:
2345      * - phone_call (best for Agent Assist and telephony)
2346      * - latest_short (best for Dialogflow non-telephony)
2347      * - command_and_search (best for very short utterances and commands)
2348      * </pre>
2349      *
2350      * <code>string model = 7;</code>
2351      *
2352      * @param value The model to set.
2353      * @return This builder for chaining.
2354      */
setModel(java.lang.String value)2355     public Builder setModel(java.lang.String value) {
2356       if (value == null) {
2357         throw new NullPointerException();
2358       }
2359       model_ = value;
2360       bitField0_ |= 0x00000040;
2361       onChanged();
2362       return this;
2363     }
2364     /**
2365      *
2366      *
2367      * <pre>
2368      * Which Speech model to select for the given request. Select the
2369      * model best suited to your domain to get best results. If a model is not
2370      * explicitly specified, then we auto-select a model based on the parameters
2371      * in the InputAudioConfig.
2372      * If enhanced speech model is enabled for the agent and an enhanced
2373      * version of the specified model for the language does not exist, then the
2374      * speech is recognized using the standard version of the specified model.
2375      * Refer to
2376      * [Cloud Speech API
2377      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2378      * for more details.
2379      * If you specify a model, the following models typically have the best
2380      * performance:
2381      * - phone_call (best for Agent Assist and telephony)
2382      * - latest_short (best for Dialogflow non-telephony)
2383      * - command_and_search (best for very short utterances and commands)
2384      * </pre>
2385      *
2386      * <code>string model = 7;</code>
2387      *
2388      * @return This builder for chaining.
2389      */
clearModel()2390     public Builder clearModel() {
2391       model_ = getDefaultInstance().getModel();
2392       bitField0_ = (bitField0_ & ~0x00000040);
2393       onChanged();
2394       return this;
2395     }
2396     /**
2397      *
2398      *
2399      * <pre>
2400      * Which Speech model to select for the given request. Select the
2401      * model best suited to your domain to get best results. If a model is not
2402      * explicitly specified, then we auto-select a model based on the parameters
2403      * in the InputAudioConfig.
2404      * If enhanced speech model is enabled for the agent and an enhanced
2405      * version of the specified model for the language does not exist, then the
2406      * speech is recognized using the standard version of the specified model.
2407      * Refer to
2408      * [Cloud Speech API
2409      * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
2410      * for more details.
2411      * If you specify a model, the following models typically have the best
2412      * performance:
2413      * - phone_call (best for Agent Assist and telephony)
2414      * - latest_short (best for Dialogflow non-telephony)
2415      * - command_and_search (best for very short utterances and commands)
2416      * </pre>
2417      *
2418      * <code>string model = 7;</code>
2419      *
2420      * @param value The bytes for model to set.
2421      * @return This builder for chaining.
2422      */
setModelBytes(com.google.protobuf.ByteString value)2423     public Builder setModelBytes(com.google.protobuf.ByteString value) {
2424       if (value == null) {
2425         throw new NullPointerException();
2426       }
2427       checkByteStringIsUtf8(value);
2428       model_ = value;
2429       bitField0_ |= 0x00000040;
2430       onChanged();
2431       return this;
2432     }
2433 
2434     private int modelVariant_ = 0;
2435     /**
2436      *
2437      *
2438      * <pre>
2439      * Which variant of the [Speech
2440      * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
2441      * </pre>
2442      *
2443      * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
2444      *
2445      * @return The enum numeric value on the wire for modelVariant.
2446      */
2447     @java.lang.Override
getModelVariantValue()2448     public int getModelVariantValue() {
2449       return modelVariant_;
2450     }
2451     /**
2452      *
2453      *
2454      * <pre>
2455      * Which variant of the [Speech
2456      * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
2457      * </pre>
2458      *
2459      * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
2460      *
2461      * @param value The enum numeric value on the wire for modelVariant to set.
2462      * @return This builder for chaining.
2463      */
setModelVariantValue(int value)2464     public Builder setModelVariantValue(int value) {
2465       modelVariant_ = value;
2466       bitField0_ |= 0x00000080;
2467       onChanged();
2468       return this;
2469     }
2470     /**
2471      *
2472      *
2473      * <pre>
2474      * Which variant of the [Speech
2475      * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
2476      * </pre>
2477      *
2478      * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
2479      *
2480      * @return The modelVariant.
2481      */
2482     @java.lang.Override
getModelVariant()2483     public com.google.cloud.dialogflow.v2.SpeechModelVariant getModelVariant() {
2484       com.google.cloud.dialogflow.v2.SpeechModelVariant result =
2485           com.google.cloud.dialogflow.v2.SpeechModelVariant.forNumber(modelVariant_);
2486       return result == null
2487           ? com.google.cloud.dialogflow.v2.SpeechModelVariant.UNRECOGNIZED
2488           : result;
2489     }
2490     /**
2491      *
2492      *
2493      * <pre>
2494      * Which variant of the [Speech
2495      * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
2496      * </pre>
2497      *
2498      * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
2499      *
2500      * @param value The modelVariant to set.
2501      * @return This builder for chaining.
2502      */
setModelVariant(com.google.cloud.dialogflow.v2.SpeechModelVariant value)2503     public Builder setModelVariant(com.google.cloud.dialogflow.v2.SpeechModelVariant value) {
2504       if (value == null) {
2505         throw new NullPointerException();
2506       }
2507       bitField0_ |= 0x00000080;
2508       modelVariant_ = value.getNumber();
2509       onChanged();
2510       return this;
2511     }
2512     /**
2513      *
2514      *
2515      * <pre>
2516      * Which variant of the [Speech
2517      * model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
2518      * </pre>
2519      *
2520      * <code>.google.cloud.dialogflow.v2.SpeechModelVariant model_variant = 10;</code>
2521      *
2522      * @return This builder for chaining.
2523      */
clearModelVariant()2524     public Builder clearModelVariant() {
2525       bitField0_ = (bitField0_ & ~0x00000080);
2526       modelVariant_ = 0;
2527       onChanged();
2528       return this;
2529     }
2530 
2531     private boolean singleUtterance_;
2532     /**
2533      *
2534      *
2535      * <pre>
2536      * If `false` (default), recognition does not cease until the
2537      * client closes the stream.
2538      * If `true`, the recognizer will detect a single spoken utterance in input
2539      * audio. Recognition ceases when it detects the audio's voice has
2540      * stopped or paused. In this case, once a detected intent is received, the
2541      * client should close the stream and start a new request with a new stream as
2542      * needed.
2543      * Note: This setting is relevant only for streaming methods.
2544      * Note: When specified, InputAudioConfig.single_utterance takes precedence
2545      * over StreamingDetectIntentRequest.single_utterance.
2546      * </pre>
2547      *
2548      * <code>bool single_utterance = 8;</code>
2549      *
2550      * @return The singleUtterance.
2551      */
2552     @java.lang.Override
getSingleUtterance()2553     public boolean getSingleUtterance() {
2554       return singleUtterance_;
2555     }
2556     /**
2557      *
2558      *
2559      * <pre>
2560      * If `false` (default), recognition does not cease until the
2561      * client closes the stream.
2562      * If `true`, the recognizer will detect a single spoken utterance in input
2563      * audio. Recognition ceases when it detects the audio's voice has
2564      * stopped or paused. In this case, once a detected intent is received, the
2565      * client should close the stream and start a new request with a new stream as
2566      * needed.
2567      * Note: This setting is relevant only for streaming methods.
2568      * Note: When specified, InputAudioConfig.single_utterance takes precedence
2569      * over StreamingDetectIntentRequest.single_utterance.
2570      * </pre>
2571      *
2572      * <code>bool single_utterance = 8;</code>
2573      *
2574      * @param value The singleUtterance to set.
2575      * @return This builder for chaining.
2576      */
setSingleUtterance(boolean value)2577     public Builder setSingleUtterance(boolean value) {
2578 
2579       singleUtterance_ = value;
2580       bitField0_ |= 0x00000100;
2581       onChanged();
2582       return this;
2583     }
2584     /**
2585      *
2586      *
2587      * <pre>
2588      * If `false` (default), recognition does not cease until the
2589      * client closes the stream.
2590      * If `true`, the recognizer will detect a single spoken utterance in input
2591      * audio. Recognition ceases when it detects the audio's voice has
2592      * stopped or paused. In this case, once a detected intent is received, the
2593      * client should close the stream and start a new request with a new stream as
2594      * needed.
2595      * Note: This setting is relevant only for streaming methods.
2596      * Note: When specified, InputAudioConfig.single_utterance takes precedence
2597      * over StreamingDetectIntentRequest.single_utterance.
2598      * </pre>
2599      *
2600      * <code>bool single_utterance = 8;</code>
2601      *
2602      * @return This builder for chaining.
2603      */
clearSingleUtterance()2604     public Builder clearSingleUtterance() {
2605       bitField0_ = (bitField0_ & ~0x00000100);
2606       singleUtterance_ = false;
2607       onChanged();
2608       return this;
2609     }
2610 
2611     private boolean disableNoSpeechRecognizedEvent_;
2612     /**
2613      *
2614      *
2615      * <pre>
2616      * Only used in
2617      * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
2618      * and
2619      * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
2620      * If `false` and recognition doesn't return any result, trigger
2621      * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
2622      * </pre>
2623      *
2624      * <code>bool disable_no_speech_recognized_event = 14;</code>
2625      *
2626      * @return The disableNoSpeechRecognizedEvent.
2627      */
2628     @java.lang.Override
getDisableNoSpeechRecognizedEvent()2629     public boolean getDisableNoSpeechRecognizedEvent() {
2630       return disableNoSpeechRecognizedEvent_;
2631     }
2632     /**
2633      *
2634      *
2635      * <pre>
2636      * Only used in
2637      * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
2638      * and
2639      * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
2640      * If `false` and recognition doesn't return any result, trigger
2641      * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
2642      * </pre>
2643      *
2644      * <code>bool disable_no_speech_recognized_event = 14;</code>
2645      *
2646      * @param value The disableNoSpeechRecognizedEvent to set.
2647      * @return This builder for chaining.
2648      */
setDisableNoSpeechRecognizedEvent(boolean value)2649     public Builder setDisableNoSpeechRecognizedEvent(boolean value) {
2650 
2651       disableNoSpeechRecognizedEvent_ = value;
2652       bitField0_ |= 0x00000200;
2653       onChanged();
2654       return this;
2655     }
2656     /**
2657      *
2658      *
2659      * <pre>
2660      * Only used in
2661      * [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
2662      * and
2663      * [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
2664      * If `false` and recognition doesn't return any result, trigger
2665      * `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
2666      * </pre>
2667      *
2668      * <code>bool disable_no_speech_recognized_event = 14;</code>
2669      *
2670      * @return This builder for chaining.
2671      */
clearDisableNoSpeechRecognizedEvent()2672     public Builder clearDisableNoSpeechRecognizedEvent() {
2673       bitField0_ = (bitField0_ & ~0x00000200);
2674       disableNoSpeechRecognizedEvent_ = false;
2675       onChanged();
2676       return this;
2677     }
2678 
2679     private boolean enableAutomaticPunctuation_;
2680     /**
2681      *
2682      *
2683      * <pre>
2684      * Enable automatic punctuation option at the speech backend.
2685      * </pre>
2686      *
2687      * <code>bool enable_automatic_punctuation = 17;</code>
2688      *
2689      * @return The enableAutomaticPunctuation.
2690      */
2691     @java.lang.Override
getEnableAutomaticPunctuation()2692     public boolean getEnableAutomaticPunctuation() {
2693       return enableAutomaticPunctuation_;
2694     }
2695     /**
2696      *
2697      *
2698      * <pre>
2699      * Enable automatic punctuation option at the speech backend.
2700      * </pre>
2701      *
2702      * <code>bool enable_automatic_punctuation = 17;</code>
2703      *
2704      * @param value The enableAutomaticPunctuation to set.
2705      * @return This builder for chaining.
2706      */
setEnableAutomaticPunctuation(boolean value)2707     public Builder setEnableAutomaticPunctuation(boolean value) {
2708 
2709       enableAutomaticPunctuation_ = value;
2710       bitField0_ |= 0x00000400;
2711       onChanged();
2712       return this;
2713     }
2714     /**
2715      *
2716      *
2717      * <pre>
2718      * Enable automatic punctuation option at the speech backend.
2719      * </pre>
2720      *
2721      * <code>bool enable_automatic_punctuation = 17;</code>
2722      *
2723      * @return This builder for chaining.
2724      */
clearEnableAutomaticPunctuation()2725     public Builder clearEnableAutomaticPunctuation() {
2726       bitField0_ = (bitField0_ & ~0x00000400);
2727       enableAutomaticPunctuation_ = false;
2728       onChanged();
2729       return this;
2730     }
2731 
2732     @java.lang.Override
setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields)2733     public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
2734       return super.setUnknownFields(unknownFields);
2735     }
2736 
2737     @java.lang.Override
mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields)2738     public final Builder mergeUnknownFields(
2739         final com.google.protobuf.UnknownFieldSet unknownFields) {
2740       return super.mergeUnknownFields(unknownFields);
2741     }
2742 
2743     // @@protoc_insertion_point(builder_scope:google.cloud.dialogflow.v2.InputAudioConfig)
2744   }
2745 
2746   // @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.InputAudioConfig)
2747   private static final com.google.cloud.dialogflow.v2.InputAudioConfig DEFAULT_INSTANCE;
2748 
2749   static {
2750     DEFAULT_INSTANCE = new com.google.cloud.dialogflow.v2.InputAudioConfig();
2751   }
2752 
getDefaultInstance()2753   public static com.google.cloud.dialogflow.v2.InputAudioConfig getDefaultInstance() {
2754     return DEFAULT_INSTANCE;
2755   }
2756 
2757   private static final com.google.protobuf.Parser<InputAudioConfig> PARSER =
2758       new com.google.protobuf.AbstractParser<InputAudioConfig>() {
2759         @java.lang.Override
2760         public InputAudioConfig parsePartialFrom(
2761             com.google.protobuf.CodedInputStream input,
2762             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2763             throws com.google.protobuf.InvalidProtocolBufferException {
2764           Builder builder = newBuilder();
2765           try {
2766             builder.mergeFrom(input, extensionRegistry);
2767           } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2768             throw e.setUnfinishedMessage(builder.buildPartial());
2769           } catch (com.google.protobuf.UninitializedMessageException e) {
2770             throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
2771           } catch (java.io.IOException e) {
2772             throw new com.google.protobuf.InvalidProtocolBufferException(e)
2773                 .setUnfinishedMessage(builder.buildPartial());
2774           }
2775           return builder.buildPartial();
2776         }
2777       };
2778 
parser()2779   public static com.google.protobuf.Parser<InputAudioConfig> parser() {
2780     return PARSER;
2781   }
2782 
2783   @java.lang.Override
getParserForType()2784   public com.google.protobuf.Parser<InputAudioConfig> getParserForType() {
2785     return PARSER;
2786   }
2787 
2788   @java.lang.Override
getDefaultInstanceForType()2789   public com.google.cloud.dialogflow.v2.InputAudioConfig getDefaultInstanceForType() {
2790     return DEFAULT_INSTANCE;
2791   }
2792 }
2793