• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // Generated by the protocol buffer compiler.  DO NOT EDIT!
17 // source: google/cloud/texttospeech/v1beta1/cloud_tts.proto
18 
19 package com.google.cloud.texttospeech.v1beta1;
20 
21 /**
22  *
23  *
24  * <pre>
25  * The message returned to the client by the `SynthesizeSpeech` method.
26  * </pre>
27  *
28  * Protobuf type {@code google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse}
29  */
30 public final class SynthesizeSpeechResponse extends com.google.protobuf.GeneratedMessageV3
31     implements
32     // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse)
33     SynthesizeSpeechResponseOrBuilder {
34   private static final long serialVersionUID = 0L;
35   // Use SynthesizeSpeechResponse.newBuilder() to construct.
SynthesizeSpeechResponse(com.google.protobuf.GeneratedMessageV3.Builder<?> builder)36   private SynthesizeSpeechResponse(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
37     super(builder);
38   }
39 
SynthesizeSpeechResponse()40   private SynthesizeSpeechResponse() {
41     audioContent_ = com.google.protobuf.ByteString.EMPTY;
42     timepoints_ = java.util.Collections.emptyList();
43   }
44 
45   @java.lang.Override
46   @SuppressWarnings({"unused"})
newInstance(UnusedPrivateParameter unused)47   protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
48     return new SynthesizeSpeechResponse();
49   }
50 
51   @java.lang.Override
getUnknownFields()52   public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
53     return this.unknownFields;
54   }
55 
getDescriptor()56   public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
57     return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
58         .internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor;
59   }
60 
61   @java.lang.Override
62   protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()63       internalGetFieldAccessorTable() {
64     return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
65         .internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_fieldAccessorTable
66         .ensureFieldAccessorsInitialized(
67             com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse.class,
68             com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse.Builder.class);
69   }
70 
71   public static final int AUDIO_CONTENT_FIELD_NUMBER = 1;
72   private com.google.protobuf.ByteString audioContent_ = com.google.protobuf.ByteString.EMPTY;
73   /**
74    *
75    *
76    * <pre>
77    * The audio data bytes encoded as specified in the request, including the
78    * header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
79    * For LINEAR16 audio, we include the WAV header. Note: as
80    * with all bytes fields, protobuffers use a pure binary representation,
81    * whereas JSON representations use base64.
82    * </pre>
83    *
84    * <code>bytes audio_content = 1;</code>
85    *
86    * @return The audioContent.
87    */
88   @java.lang.Override
getAudioContent()89   public com.google.protobuf.ByteString getAudioContent() {
90     return audioContent_;
91   }
92 
93   public static final int TIMEPOINTS_FIELD_NUMBER = 2;
94 
95   @SuppressWarnings("serial")
96   private java.util.List<com.google.cloud.texttospeech.v1beta1.Timepoint> timepoints_;
97   /**
98    *
99    *
100    * <pre>
101    * A link between a position in the original request input and a corresponding
102    * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
103    * </pre>
104    *
105    * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
106    */
107   @java.lang.Override
getTimepointsList()108   public java.util.List<com.google.cloud.texttospeech.v1beta1.Timepoint> getTimepointsList() {
109     return timepoints_;
110   }
111   /**
112    *
113    *
114    * <pre>
115    * A link between a position in the original request input and a corresponding
116    * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
117    * </pre>
118    *
119    * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
120    */
121   @java.lang.Override
122   public java.util.List<? extends com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder>
getTimepointsOrBuilderList()123       getTimepointsOrBuilderList() {
124     return timepoints_;
125   }
126   /**
127    *
128    *
129    * <pre>
130    * A link between a position in the original request input and a corresponding
131    * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
132    * </pre>
133    *
134    * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
135    */
136   @java.lang.Override
getTimepointsCount()137   public int getTimepointsCount() {
138     return timepoints_.size();
139   }
140   /**
141    *
142    *
143    * <pre>
144    * A link between a position in the original request input and a corresponding
145    * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
146    * </pre>
147    *
148    * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
149    */
150   @java.lang.Override
getTimepoints(int index)151   public com.google.cloud.texttospeech.v1beta1.Timepoint getTimepoints(int index) {
152     return timepoints_.get(index);
153   }
154   /**
155    *
156    *
157    * <pre>
158    * A link between a position in the original request input and a corresponding
159    * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
160    * </pre>
161    *
162    * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
163    */
164   @java.lang.Override
getTimepointsOrBuilder( int index)165   public com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder getTimepointsOrBuilder(
166       int index) {
167     return timepoints_.get(index);
168   }
169 
170   public static final int AUDIO_CONFIG_FIELD_NUMBER = 4;
171   private com.google.cloud.texttospeech.v1beta1.AudioConfig audioConfig_;
172   /**
173    *
174    *
175    * <pre>
176    * The audio metadata of `audio_content`.
177    * </pre>
178    *
179    * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
180    *
181    * @return Whether the audioConfig field is set.
182    */
183   @java.lang.Override
hasAudioConfig()184   public boolean hasAudioConfig() {
185     return audioConfig_ != null;
186   }
187   /**
188    *
189    *
190    * <pre>
191    * The audio metadata of `audio_content`.
192    * </pre>
193    *
194    * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
195    *
196    * @return The audioConfig.
197    */
198   @java.lang.Override
getAudioConfig()199   public com.google.cloud.texttospeech.v1beta1.AudioConfig getAudioConfig() {
200     return audioConfig_ == null
201         ? com.google.cloud.texttospeech.v1beta1.AudioConfig.getDefaultInstance()
202         : audioConfig_;
203   }
204   /**
205    *
206    *
207    * <pre>
208    * The audio metadata of `audio_content`.
209    * </pre>
210    *
211    * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
212    */
213   @java.lang.Override
getAudioConfigOrBuilder()214   public com.google.cloud.texttospeech.v1beta1.AudioConfigOrBuilder getAudioConfigOrBuilder() {
215     return audioConfig_ == null
216         ? com.google.cloud.texttospeech.v1beta1.AudioConfig.getDefaultInstance()
217         : audioConfig_;
218   }
219 
220   private byte memoizedIsInitialized = -1;
221 
222   @java.lang.Override
isInitialized()223   public final boolean isInitialized() {
224     byte isInitialized = memoizedIsInitialized;
225     if (isInitialized == 1) return true;
226     if (isInitialized == 0) return false;
227 
228     memoizedIsInitialized = 1;
229     return true;
230   }
231 
232   @java.lang.Override
writeTo(com.google.protobuf.CodedOutputStream output)233   public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
234     if (!audioContent_.isEmpty()) {
235       output.writeBytes(1, audioContent_);
236     }
237     for (int i = 0; i < timepoints_.size(); i++) {
238       output.writeMessage(2, timepoints_.get(i));
239     }
240     if (audioConfig_ != null) {
241       output.writeMessage(4, getAudioConfig());
242     }
243     getUnknownFields().writeTo(output);
244   }
245 
246   @java.lang.Override
getSerializedSize()247   public int getSerializedSize() {
248     int size = memoizedSize;
249     if (size != -1) return size;
250 
251     size = 0;
252     if (!audioContent_.isEmpty()) {
253       size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, audioContent_);
254     }
255     for (int i = 0; i < timepoints_.size(); i++) {
256       size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, timepoints_.get(i));
257     }
258     if (audioConfig_ != null) {
259       size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getAudioConfig());
260     }
261     size += getUnknownFields().getSerializedSize();
262     memoizedSize = size;
263     return size;
264   }
265 
266   @java.lang.Override
equals(final java.lang.Object obj)267   public boolean equals(final java.lang.Object obj) {
268     if (obj == this) {
269       return true;
270     }
271     if (!(obj instanceof com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse)) {
272       return super.equals(obj);
273     }
274     com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse other =
275         (com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse) obj;
276 
277     if (!getAudioContent().equals(other.getAudioContent())) return false;
278     if (!getTimepointsList().equals(other.getTimepointsList())) return false;
279     if (hasAudioConfig() != other.hasAudioConfig()) return false;
280     if (hasAudioConfig()) {
281       if (!getAudioConfig().equals(other.getAudioConfig())) return false;
282     }
283     if (!getUnknownFields().equals(other.getUnknownFields())) return false;
284     return true;
285   }
286 
287   @java.lang.Override
hashCode()288   public int hashCode() {
289     if (memoizedHashCode != 0) {
290       return memoizedHashCode;
291     }
292     int hash = 41;
293     hash = (19 * hash) + getDescriptor().hashCode();
294     hash = (37 * hash) + AUDIO_CONTENT_FIELD_NUMBER;
295     hash = (53 * hash) + getAudioContent().hashCode();
296     if (getTimepointsCount() > 0) {
297       hash = (37 * hash) + TIMEPOINTS_FIELD_NUMBER;
298       hash = (53 * hash) + getTimepointsList().hashCode();
299     }
300     if (hasAudioConfig()) {
301       hash = (37 * hash) + AUDIO_CONFIG_FIELD_NUMBER;
302       hash = (53 * hash) + getAudioConfig().hashCode();
303     }
304     hash = (29 * hash) + getUnknownFields().hashCode();
305     memoizedHashCode = hash;
306     return hash;
307   }
308 
parseFrom( java.nio.ByteBuffer data)309   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
310       java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
311     return PARSER.parseFrom(data);
312   }
313 
parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)314   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
315       java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
316       throws com.google.protobuf.InvalidProtocolBufferException {
317     return PARSER.parseFrom(data, extensionRegistry);
318   }
319 
parseFrom( com.google.protobuf.ByteString data)320   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
321       com.google.protobuf.ByteString data)
322       throws com.google.protobuf.InvalidProtocolBufferException {
323     return PARSER.parseFrom(data);
324   }
325 
parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)326   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
327       com.google.protobuf.ByteString data,
328       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
329       throws com.google.protobuf.InvalidProtocolBufferException {
330     return PARSER.parseFrom(data, extensionRegistry);
331   }
332 
parseFrom( byte[] data)333   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
334       byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
335     return PARSER.parseFrom(data);
336   }
337 
parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)338   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
339       byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
340       throws com.google.protobuf.InvalidProtocolBufferException {
341     return PARSER.parseFrom(data, extensionRegistry);
342   }
343 
parseFrom( java.io.InputStream input)344   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
345       java.io.InputStream input) throws java.io.IOException {
346     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
347   }
348 
parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)349   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
350       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
351       throws java.io.IOException {
352     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
353         PARSER, input, extensionRegistry);
354   }
355 
parseDelimitedFrom( java.io.InputStream input)356   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseDelimitedFrom(
357       java.io.InputStream input) throws java.io.IOException {
358     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
359   }
360 
parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)361   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseDelimitedFrom(
362       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
363       throws java.io.IOException {
364     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
365         PARSER, input, extensionRegistry);
366   }
367 
parseFrom( com.google.protobuf.CodedInputStream input)368   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
369       com.google.protobuf.CodedInputStream input) throws java.io.IOException {
370     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
371   }
372 
parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)373   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse parseFrom(
374       com.google.protobuf.CodedInputStream input,
375       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
376       throws java.io.IOException {
377     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
378         PARSER, input, extensionRegistry);
379   }
380 
381   @java.lang.Override
newBuilderForType()382   public Builder newBuilderForType() {
383     return newBuilder();
384   }
385 
newBuilder()386   public static Builder newBuilder() {
387     return DEFAULT_INSTANCE.toBuilder();
388   }
389 
newBuilder( com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse prototype)390   public static Builder newBuilder(
391       com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse prototype) {
392     return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
393   }
394 
395   @java.lang.Override
toBuilder()396   public Builder toBuilder() {
397     return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
398   }
399 
400   @java.lang.Override
newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)401   protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
402     Builder builder = new Builder(parent);
403     return builder;
404   }
405   /**
406    *
407    *
408    * <pre>
409    * The message returned to the client by the `SynthesizeSpeech` method.
410    * </pre>
411    *
412    * Protobuf type {@code google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse}
413    */
414   public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
415       implements
416       // @@protoc_insertion_point(builder_implements:google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse)
417       com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponseOrBuilder {
getDescriptor()418     public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
419       return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
420           .internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor;
421     }
422 
423     @java.lang.Override
424     protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()425         internalGetFieldAccessorTable() {
426       return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
427           .internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_fieldAccessorTable
428           .ensureFieldAccessorsInitialized(
429               com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse.class,
430               com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse.Builder.class);
431     }
432 
433     // Construct using com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse.newBuilder()
Builder()434     private Builder() {}
435 
Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)436     private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
437       super(parent);
438     }
439 
440     @java.lang.Override
clear()441     public Builder clear() {
442       super.clear();
443       bitField0_ = 0;
444       audioContent_ = com.google.protobuf.ByteString.EMPTY;
445       if (timepointsBuilder_ == null) {
446         timepoints_ = java.util.Collections.emptyList();
447       } else {
448         timepoints_ = null;
449         timepointsBuilder_.clear();
450       }
451       bitField0_ = (bitField0_ & ~0x00000002);
452       audioConfig_ = null;
453       if (audioConfigBuilder_ != null) {
454         audioConfigBuilder_.dispose();
455         audioConfigBuilder_ = null;
456       }
457       return this;
458     }
459 
460     @java.lang.Override
getDescriptorForType()461     public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
462       return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
463           .internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor;
464     }
465 
466     @java.lang.Override
467     public com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse
getDefaultInstanceForType()468         getDefaultInstanceForType() {
469       return com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse.getDefaultInstance();
470     }
471 
472     @java.lang.Override
build()473     public com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse build() {
474       com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse result = buildPartial();
475       if (!result.isInitialized()) {
476         throw newUninitializedMessageException(result);
477       }
478       return result;
479     }
480 
481     @java.lang.Override
buildPartial()482     public com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse buildPartial() {
483       com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse result =
484           new com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse(this);
485       buildPartialRepeatedFields(result);
486       if (bitField0_ != 0) {
487         buildPartial0(result);
488       }
489       onBuilt();
490       return result;
491     }
492 
buildPartialRepeatedFields( com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse result)493     private void buildPartialRepeatedFields(
494         com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse result) {
495       if (timepointsBuilder_ == null) {
496         if (((bitField0_ & 0x00000002) != 0)) {
497           timepoints_ = java.util.Collections.unmodifiableList(timepoints_);
498           bitField0_ = (bitField0_ & ~0x00000002);
499         }
500         result.timepoints_ = timepoints_;
501       } else {
502         result.timepoints_ = timepointsBuilder_.build();
503       }
504     }
505 
buildPartial0( com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse result)506     private void buildPartial0(
507         com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse result) {
508       int from_bitField0_ = bitField0_;
509       if (((from_bitField0_ & 0x00000001) != 0)) {
510         result.audioContent_ = audioContent_;
511       }
512       if (((from_bitField0_ & 0x00000004) != 0)) {
513         result.audioConfig_ =
514             audioConfigBuilder_ == null ? audioConfig_ : audioConfigBuilder_.build();
515       }
516     }
517 
518     @java.lang.Override
clone()519     public Builder clone() {
520       return super.clone();
521     }
522 
523     @java.lang.Override
setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)524     public Builder setField(
525         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
526       return super.setField(field, value);
527     }
528 
529     @java.lang.Override
clearField(com.google.protobuf.Descriptors.FieldDescriptor field)530     public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
531       return super.clearField(field);
532     }
533 
534     @java.lang.Override
clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)535     public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
536       return super.clearOneof(oneof);
537     }
538 
539     @java.lang.Override
setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value)540     public Builder setRepeatedField(
541         com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
542       return super.setRepeatedField(field, index, value);
543     }
544 
545     @java.lang.Override
addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)546     public Builder addRepeatedField(
547         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
548       return super.addRepeatedField(field, value);
549     }
550 
551     @java.lang.Override
mergeFrom(com.google.protobuf.Message other)552     public Builder mergeFrom(com.google.protobuf.Message other) {
553       if (other instanceof com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse) {
554         return mergeFrom((com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse) other);
555       } else {
556         super.mergeFrom(other);
557         return this;
558       }
559     }
560 
mergeFrom(com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse other)561     public Builder mergeFrom(com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse other) {
562       if (other
563           == com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse.getDefaultInstance())
564         return this;
565       if (other.getAudioContent() != com.google.protobuf.ByteString.EMPTY) {
566         setAudioContent(other.getAudioContent());
567       }
568       if (timepointsBuilder_ == null) {
569         if (!other.timepoints_.isEmpty()) {
570           if (timepoints_.isEmpty()) {
571             timepoints_ = other.timepoints_;
572             bitField0_ = (bitField0_ & ~0x00000002);
573           } else {
574             ensureTimepointsIsMutable();
575             timepoints_.addAll(other.timepoints_);
576           }
577           onChanged();
578         }
579       } else {
580         if (!other.timepoints_.isEmpty()) {
581           if (timepointsBuilder_.isEmpty()) {
582             timepointsBuilder_.dispose();
583             timepointsBuilder_ = null;
584             timepoints_ = other.timepoints_;
585             bitField0_ = (bitField0_ & ~0x00000002);
586             timepointsBuilder_ =
587                 com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
588                     ? getTimepointsFieldBuilder()
589                     : null;
590           } else {
591             timepointsBuilder_.addAllMessages(other.timepoints_);
592           }
593         }
594       }
595       if (other.hasAudioConfig()) {
596         mergeAudioConfig(other.getAudioConfig());
597       }
598       this.mergeUnknownFields(other.getUnknownFields());
599       onChanged();
600       return this;
601     }
602 
603     @java.lang.Override
isInitialized()604     public final boolean isInitialized() {
605       return true;
606     }
607 
608     @java.lang.Override
mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)609     public Builder mergeFrom(
610         com.google.protobuf.CodedInputStream input,
611         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
612         throws java.io.IOException {
613       if (extensionRegistry == null) {
614         throw new java.lang.NullPointerException();
615       }
616       try {
617         boolean done = false;
618         while (!done) {
619           int tag = input.readTag();
620           switch (tag) {
621             case 0:
622               done = true;
623               break;
624             case 10:
625               {
626                 audioContent_ = input.readBytes();
627                 bitField0_ |= 0x00000001;
628                 break;
629               } // case 10
630             case 18:
631               {
632                 com.google.cloud.texttospeech.v1beta1.Timepoint m =
633                     input.readMessage(
634                         com.google.cloud.texttospeech.v1beta1.Timepoint.parser(),
635                         extensionRegistry);
636                 if (timepointsBuilder_ == null) {
637                   ensureTimepointsIsMutable();
638                   timepoints_.add(m);
639                 } else {
640                   timepointsBuilder_.addMessage(m);
641                 }
642                 break;
643               } // case 18
644             case 34:
645               {
646                 input.readMessage(getAudioConfigFieldBuilder().getBuilder(), extensionRegistry);
647                 bitField0_ |= 0x00000004;
648                 break;
649               } // case 34
650             default:
651               {
652                 if (!super.parseUnknownField(input, extensionRegistry, tag)) {
653                   done = true; // was an endgroup tag
654                 }
655                 break;
656               } // default:
657           } // switch (tag)
658         } // while (!done)
659       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
660         throw e.unwrapIOException();
661       } finally {
662         onChanged();
663       } // finally
664       return this;
665     }
666 
667     private int bitField0_;
668 
669     private com.google.protobuf.ByteString audioContent_ = com.google.protobuf.ByteString.EMPTY;
670     /**
671      *
672      *
673      * <pre>
674      * The audio data bytes encoded as specified in the request, including the
675      * header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
676      * For LINEAR16 audio, we include the WAV header. Note: as
677      * with all bytes fields, protobuffers use a pure binary representation,
678      * whereas JSON representations use base64.
679      * </pre>
680      *
681      * <code>bytes audio_content = 1;</code>
682      *
683      * @return The audioContent.
684      */
685     @java.lang.Override
getAudioContent()686     public com.google.protobuf.ByteString getAudioContent() {
687       return audioContent_;
688     }
689     /**
690      *
691      *
692      * <pre>
693      * The audio data bytes encoded as specified in the request, including the
694      * header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
695      * For LINEAR16 audio, we include the WAV header. Note: as
696      * with all bytes fields, protobuffers use a pure binary representation,
697      * whereas JSON representations use base64.
698      * </pre>
699      *
700      * <code>bytes audio_content = 1;</code>
701      *
702      * @param value The audioContent to set.
703      * @return This builder for chaining.
704      */
setAudioContent(com.google.protobuf.ByteString value)705     public Builder setAudioContent(com.google.protobuf.ByteString value) {
706       if (value == null) {
707         throw new NullPointerException();
708       }
709       audioContent_ = value;
710       bitField0_ |= 0x00000001;
711       onChanged();
712       return this;
713     }
714     /**
715      *
716      *
717      * <pre>
718      * The audio data bytes encoded as specified in the request, including the
719      * header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
720      * For LINEAR16 audio, we include the WAV header. Note: as
721      * with all bytes fields, protobuffers use a pure binary representation,
722      * whereas JSON representations use base64.
723      * </pre>
724      *
725      * <code>bytes audio_content = 1;</code>
726      *
727      * @return This builder for chaining.
728      */
clearAudioContent()729     public Builder clearAudioContent() {
730       bitField0_ = (bitField0_ & ~0x00000001);
731       audioContent_ = getDefaultInstance().getAudioContent();
732       onChanged();
733       return this;
734     }
735 
736     private java.util.List<com.google.cloud.texttospeech.v1beta1.Timepoint> timepoints_ =
737         java.util.Collections.emptyList();
738 
ensureTimepointsIsMutable()739     private void ensureTimepointsIsMutable() {
740       if (!((bitField0_ & 0x00000002) != 0)) {
741         timepoints_ =
742             new java.util.ArrayList<com.google.cloud.texttospeech.v1beta1.Timepoint>(timepoints_);
743         bitField0_ |= 0x00000002;
744       }
745     }
746 
747     private com.google.protobuf.RepeatedFieldBuilderV3<
748             com.google.cloud.texttospeech.v1beta1.Timepoint,
749             com.google.cloud.texttospeech.v1beta1.Timepoint.Builder,
750             com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder>
751         timepointsBuilder_;
752 
753     /**
754      *
755      *
756      * <pre>
757      * A link between a position in the original request input and a corresponding
758      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
759      * </pre>
760      *
761      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
762      */
getTimepointsList()763     public java.util.List<com.google.cloud.texttospeech.v1beta1.Timepoint> getTimepointsList() {
764       if (timepointsBuilder_ == null) {
765         return java.util.Collections.unmodifiableList(timepoints_);
766       } else {
767         return timepointsBuilder_.getMessageList();
768       }
769     }
770     /**
771      *
772      *
773      * <pre>
774      * A link between a position in the original request input and a corresponding
775      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
776      * </pre>
777      *
778      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
779      */
getTimepointsCount()780     public int getTimepointsCount() {
781       if (timepointsBuilder_ == null) {
782         return timepoints_.size();
783       } else {
784         return timepointsBuilder_.getCount();
785       }
786     }
787     /**
788      *
789      *
790      * <pre>
791      * A link between a position in the original request input and a corresponding
792      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
793      * </pre>
794      *
795      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
796      */
getTimepoints(int index)797     public com.google.cloud.texttospeech.v1beta1.Timepoint getTimepoints(int index) {
798       if (timepointsBuilder_ == null) {
799         return timepoints_.get(index);
800       } else {
801         return timepointsBuilder_.getMessage(index);
802       }
803     }
804     /**
805      *
806      *
807      * <pre>
808      * A link between a position in the original request input and a corresponding
809      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
810      * </pre>
811      *
812      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
813      */
setTimepoints(int index, com.google.cloud.texttospeech.v1beta1.Timepoint value)814     public Builder setTimepoints(int index, com.google.cloud.texttospeech.v1beta1.Timepoint value) {
815       if (timepointsBuilder_ == null) {
816         if (value == null) {
817           throw new NullPointerException();
818         }
819         ensureTimepointsIsMutable();
820         timepoints_.set(index, value);
821         onChanged();
822       } else {
823         timepointsBuilder_.setMessage(index, value);
824       }
825       return this;
826     }
827     /**
828      *
829      *
830      * <pre>
831      * A link between a position in the original request input and a corresponding
832      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
833      * </pre>
834      *
835      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
836      */
setTimepoints( int index, com.google.cloud.texttospeech.v1beta1.Timepoint.Builder builderForValue)837     public Builder setTimepoints(
838         int index, com.google.cloud.texttospeech.v1beta1.Timepoint.Builder builderForValue) {
839       if (timepointsBuilder_ == null) {
840         ensureTimepointsIsMutable();
841         timepoints_.set(index, builderForValue.build());
842         onChanged();
843       } else {
844         timepointsBuilder_.setMessage(index, builderForValue.build());
845       }
846       return this;
847     }
848     /**
849      *
850      *
851      * <pre>
852      * A link between a position in the original request input and a corresponding
853      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
854      * </pre>
855      *
856      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
857      */
addTimepoints(com.google.cloud.texttospeech.v1beta1.Timepoint value)858     public Builder addTimepoints(com.google.cloud.texttospeech.v1beta1.Timepoint value) {
859       if (timepointsBuilder_ == null) {
860         if (value == null) {
861           throw new NullPointerException();
862         }
863         ensureTimepointsIsMutable();
864         timepoints_.add(value);
865         onChanged();
866       } else {
867         timepointsBuilder_.addMessage(value);
868       }
869       return this;
870     }
871     /**
872      *
873      *
874      * <pre>
875      * A link between a position in the original request input and a corresponding
876      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
877      * </pre>
878      *
879      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
880      */
addTimepoints(int index, com.google.cloud.texttospeech.v1beta1.Timepoint value)881     public Builder addTimepoints(int index, com.google.cloud.texttospeech.v1beta1.Timepoint value) {
882       if (timepointsBuilder_ == null) {
883         if (value == null) {
884           throw new NullPointerException();
885         }
886         ensureTimepointsIsMutable();
887         timepoints_.add(index, value);
888         onChanged();
889       } else {
890         timepointsBuilder_.addMessage(index, value);
891       }
892       return this;
893     }
894     /**
895      *
896      *
897      * <pre>
898      * A link between a position in the original request input and a corresponding
899      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
900      * </pre>
901      *
902      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
903      */
addTimepoints( com.google.cloud.texttospeech.v1beta1.Timepoint.Builder builderForValue)904     public Builder addTimepoints(
905         com.google.cloud.texttospeech.v1beta1.Timepoint.Builder builderForValue) {
906       if (timepointsBuilder_ == null) {
907         ensureTimepointsIsMutable();
908         timepoints_.add(builderForValue.build());
909         onChanged();
910       } else {
911         timepointsBuilder_.addMessage(builderForValue.build());
912       }
913       return this;
914     }
915     /**
916      *
917      *
918      * <pre>
919      * A link between a position in the original request input and a corresponding
920      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
921      * </pre>
922      *
923      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
924      */
addTimepoints( int index, com.google.cloud.texttospeech.v1beta1.Timepoint.Builder builderForValue)925     public Builder addTimepoints(
926         int index, com.google.cloud.texttospeech.v1beta1.Timepoint.Builder builderForValue) {
927       if (timepointsBuilder_ == null) {
928         ensureTimepointsIsMutable();
929         timepoints_.add(index, builderForValue.build());
930         onChanged();
931       } else {
932         timepointsBuilder_.addMessage(index, builderForValue.build());
933       }
934       return this;
935     }
936     /**
937      *
938      *
939      * <pre>
940      * A link between a position in the original request input and a corresponding
941      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
942      * </pre>
943      *
944      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
945      */
addAllTimepoints( java.lang.Iterable<? extends com.google.cloud.texttospeech.v1beta1.Timepoint> values)946     public Builder addAllTimepoints(
947         java.lang.Iterable<? extends com.google.cloud.texttospeech.v1beta1.Timepoint> values) {
948       if (timepointsBuilder_ == null) {
949         ensureTimepointsIsMutable();
950         com.google.protobuf.AbstractMessageLite.Builder.addAll(values, timepoints_);
951         onChanged();
952       } else {
953         timepointsBuilder_.addAllMessages(values);
954       }
955       return this;
956     }
957     /**
958      *
959      *
960      * <pre>
961      * A link between a position in the original request input and a corresponding
962      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
963      * </pre>
964      *
965      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
966      */
clearTimepoints()967     public Builder clearTimepoints() {
968       if (timepointsBuilder_ == null) {
969         timepoints_ = java.util.Collections.emptyList();
970         bitField0_ = (bitField0_ & ~0x00000002);
971         onChanged();
972       } else {
973         timepointsBuilder_.clear();
974       }
975       return this;
976     }
977     /**
978      *
979      *
980      * <pre>
981      * A link between a position in the original request input and a corresponding
982      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
983      * </pre>
984      *
985      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
986      */
removeTimepoints(int index)987     public Builder removeTimepoints(int index) {
988       if (timepointsBuilder_ == null) {
989         ensureTimepointsIsMutable();
990         timepoints_.remove(index);
991         onChanged();
992       } else {
993         timepointsBuilder_.remove(index);
994       }
995       return this;
996     }
997     /**
998      *
999      *
1000      * <pre>
1001      * A link between a position in the original request input and a corresponding
1002      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
1003      * </pre>
1004      *
1005      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
1006      */
getTimepointsBuilder(int index)1007     public com.google.cloud.texttospeech.v1beta1.Timepoint.Builder getTimepointsBuilder(int index) {
1008       return getTimepointsFieldBuilder().getBuilder(index);
1009     }
1010     /**
1011      *
1012      *
1013      * <pre>
1014      * A link between a position in the original request input and a corresponding
1015      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
1016      * </pre>
1017      *
1018      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
1019      */
getTimepointsOrBuilder( int index)1020     public com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder getTimepointsOrBuilder(
1021         int index) {
1022       if (timepointsBuilder_ == null) {
1023         return timepoints_.get(index);
1024       } else {
1025         return timepointsBuilder_.getMessageOrBuilder(index);
1026       }
1027     }
1028     /**
1029      *
1030      *
1031      * <pre>
1032      * A link between a position in the original request input and a corresponding
1033      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
1034      * </pre>
1035      *
1036      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
1037      */
1038     public java.util.List<? extends com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder>
getTimepointsOrBuilderList()1039         getTimepointsOrBuilderList() {
1040       if (timepointsBuilder_ != null) {
1041         return timepointsBuilder_.getMessageOrBuilderList();
1042       } else {
1043         return java.util.Collections.unmodifiableList(timepoints_);
1044       }
1045     }
1046     /**
1047      *
1048      *
1049      * <pre>
1050      * A link between a position in the original request input and a corresponding
1051      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
1052      * </pre>
1053      *
1054      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
1055      */
addTimepointsBuilder()1056     public com.google.cloud.texttospeech.v1beta1.Timepoint.Builder addTimepointsBuilder() {
1057       return getTimepointsFieldBuilder()
1058           .addBuilder(com.google.cloud.texttospeech.v1beta1.Timepoint.getDefaultInstance());
1059     }
1060     /**
1061      *
1062      *
1063      * <pre>
1064      * A link between a position in the original request input and a corresponding
1065      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
1066      * </pre>
1067      *
1068      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
1069      */
addTimepointsBuilder(int index)1070     public com.google.cloud.texttospeech.v1beta1.Timepoint.Builder addTimepointsBuilder(int index) {
1071       return getTimepointsFieldBuilder()
1072           .addBuilder(index, com.google.cloud.texttospeech.v1beta1.Timepoint.getDefaultInstance());
1073     }
1074     /**
1075      *
1076      *
1077      * <pre>
1078      * A link between a position in the original request input and a corresponding
1079      * time in the output audio. It's only supported via `&lt;mark&gt;` of SSML input.
1080      * </pre>
1081      *
1082      * <code>repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;</code>
1083      */
1084     public java.util.List<com.google.cloud.texttospeech.v1beta1.Timepoint.Builder>
getTimepointsBuilderList()1085         getTimepointsBuilderList() {
1086       return getTimepointsFieldBuilder().getBuilderList();
1087     }
1088 
1089     private com.google.protobuf.RepeatedFieldBuilderV3<
1090             com.google.cloud.texttospeech.v1beta1.Timepoint,
1091             com.google.cloud.texttospeech.v1beta1.Timepoint.Builder,
1092             com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder>
getTimepointsFieldBuilder()1093         getTimepointsFieldBuilder() {
1094       if (timepointsBuilder_ == null) {
1095         timepointsBuilder_ =
1096             new com.google.protobuf.RepeatedFieldBuilderV3<
1097                 com.google.cloud.texttospeech.v1beta1.Timepoint,
1098                 com.google.cloud.texttospeech.v1beta1.Timepoint.Builder,
1099                 com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder>(
1100                 timepoints_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean());
1101         timepoints_ = null;
1102       }
1103       return timepointsBuilder_;
1104     }
1105 
1106     private com.google.cloud.texttospeech.v1beta1.AudioConfig audioConfig_;
1107     private com.google.protobuf.SingleFieldBuilderV3<
1108             com.google.cloud.texttospeech.v1beta1.AudioConfig,
1109             com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder,
1110             com.google.cloud.texttospeech.v1beta1.AudioConfigOrBuilder>
1111         audioConfigBuilder_;
1112     /**
1113      *
1114      *
1115      * <pre>
1116      * The audio metadata of `audio_content`.
1117      * </pre>
1118      *
1119      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1120      *
1121      * @return Whether the audioConfig field is set.
1122      */
hasAudioConfig()1123     public boolean hasAudioConfig() {
1124       return ((bitField0_ & 0x00000004) != 0);
1125     }
1126     /**
1127      *
1128      *
1129      * <pre>
1130      * The audio metadata of `audio_content`.
1131      * </pre>
1132      *
1133      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1134      *
1135      * @return The audioConfig.
1136      */
getAudioConfig()1137     public com.google.cloud.texttospeech.v1beta1.AudioConfig getAudioConfig() {
1138       if (audioConfigBuilder_ == null) {
1139         return audioConfig_ == null
1140             ? com.google.cloud.texttospeech.v1beta1.AudioConfig.getDefaultInstance()
1141             : audioConfig_;
1142       } else {
1143         return audioConfigBuilder_.getMessage();
1144       }
1145     }
1146     /**
1147      *
1148      *
1149      * <pre>
1150      * The audio metadata of `audio_content`.
1151      * </pre>
1152      *
1153      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1154      */
setAudioConfig(com.google.cloud.texttospeech.v1beta1.AudioConfig value)1155     public Builder setAudioConfig(com.google.cloud.texttospeech.v1beta1.AudioConfig value) {
1156       if (audioConfigBuilder_ == null) {
1157         if (value == null) {
1158           throw new NullPointerException();
1159         }
1160         audioConfig_ = value;
1161       } else {
1162         audioConfigBuilder_.setMessage(value);
1163       }
1164       bitField0_ |= 0x00000004;
1165       onChanged();
1166       return this;
1167     }
1168     /**
1169      *
1170      *
1171      * <pre>
1172      * The audio metadata of `audio_content`.
1173      * </pre>
1174      *
1175      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1176      */
setAudioConfig( com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder builderForValue)1177     public Builder setAudioConfig(
1178         com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder builderForValue) {
1179       if (audioConfigBuilder_ == null) {
1180         audioConfig_ = builderForValue.build();
1181       } else {
1182         audioConfigBuilder_.setMessage(builderForValue.build());
1183       }
1184       bitField0_ |= 0x00000004;
1185       onChanged();
1186       return this;
1187     }
1188     /**
1189      *
1190      *
1191      * <pre>
1192      * The audio metadata of `audio_content`.
1193      * </pre>
1194      *
1195      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1196      */
mergeAudioConfig(com.google.cloud.texttospeech.v1beta1.AudioConfig value)1197     public Builder mergeAudioConfig(com.google.cloud.texttospeech.v1beta1.AudioConfig value) {
1198       if (audioConfigBuilder_ == null) {
1199         if (((bitField0_ & 0x00000004) != 0)
1200             && audioConfig_ != null
1201             && audioConfig_
1202                 != com.google.cloud.texttospeech.v1beta1.AudioConfig.getDefaultInstance()) {
1203           getAudioConfigBuilder().mergeFrom(value);
1204         } else {
1205           audioConfig_ = value;
1206         }
1207       } else {
1208         audioConfigBuilder_.mergeFrom(value);
1209       }
1210       bitField0_ |= 0x00000004;
1211       onChanged();
1212       return this;
1213     }
1214     /**
1215      *
1216      *
1217      * <pre>
1218      * The audio metadata of `audio_content`.
1219      * </pre>
1220      *
1221      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1222      */
clearAudioConfig()1223     public Builder clearAudioConfig() {
1224       bitField0_ = (bitField0_ & ~0x00000004);
1225       audioConfig_ = null;
1226       if (audioConfigBuilder_ != null) {
1227         audioConfigBuilder_.dispose();
1228         audioConfigBuilder_ = null;
1229       }
1230       onChanged();
1231       return this;
1232     }
1233     /**
1234      *
1235      *
1236      * <pre>
1237      * The audio metadata of `audio_content`.
1238      * </pre>
1239      *
1240      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1241      */
getAudioConfigBuilder()1242     public com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder getAudioConfigBuilder() {
1243       bitField0_ |= 0x00000004;
1244       onChanged();
1245       return getAudioConfigFieldBuilder().getBuilder();
1246     }
1247     /**
1248      *
1249      *
1250      * <pre>
1251      * The audio metadata of `audio_content`.
1252      * </pre>
1253      *
1254      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1255      */
getAudioConfigOrBuilder()1256     public com.google.cloud.texttospeech.v1beta1.AudioConfigOrBuilder getAudioConfigOrBuilder() {
1257       if (audioConfigBuilder_ != null) {
1258         return audioConfigBuilder_.getMessageOrBuilder();
1259       } else {
1260         return audioConfig_ == null
1261             ? com.google.cloud.texttospeech.v1beta1.AudioConfig.getDefaultInstance()
1262             : audioConfig_;
1263       }
1264     }
1265     /**
1266      *
1267      *
1268      * <pre>
1269      * The audio metadata of `audio_content`.
1270      * </pre>
1271      *
1272      * <code>.google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;</code>
1273      */
1274     private com.google.protobuf.SingleFieldBuilderV3<
1275             com.google.cloud.texttospeech.v1beta1.AudioConfig,
1276             com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder,
1277             com.google.cloud.texttospeech.v1beta1.AudioConfigOrBuilder>
getAudioConfigFieldBuilder()1278         getAudioConfigFieldBuilder() {
1279       if (audioConfigBuilder_ == null) {
1280         audioConfigBuilder_ =
1281             new com.google.protobuf.SingleFieldBuilderV3<
1282                 com.google.cloud.texttospeech.v1beta1.AudioConfig,
1283                 com.google.cloud.texttospeech.v1beta1.AudioConfig.Builder,
1284                 com.google.cloud.texttospeech.v1beta1.AudioConfigOrBuilder>(
1285                 getAudioConfig(), getParentForChildren(), isClean());
1286         audioConfig_ = null;
1287       }
1288       return audioConfigBuilder_;
1289     }
1290 
1291     @java.lang.Override
setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields)1292     public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
1293       return super.setUnknownFields(unknownFields);
1294     }
1295 
1296     @java.lang.Override
mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields)1297     public final Builder mergeUnknownFields(
1298         final com.google.protobuf.UnknownFieldSet unknownFields) {
1299       return super.mergeUnknownFields(unknownFields);
1300     }
1301 
1302     // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse)
1303   }
1304 
1305   // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse)
1306   private static final com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse
1307       DEFAULT_INSTANCE;
1308 
1309   static {
1310     DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse();
1311   }
1312 
1313   public static com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse
getDefaultInstance()1314       getDefaultInstance() {
1315     return DEFAULT_INSTANCE;
1316   }
1317 
1318   private static final com.google.protobuf.Parser<SynthesizeSpeechResponse> PARSER =
1319       new com.google.protobuf.AbstractParser<SynthesizeSpeechResponse>() {
1320         @java.lang.Override
1321         public SynthesizeSpeechResponse parsePartialFrom(
1322             com.google.protobuf.CodedInputStream input,
1323             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1324             throws com.google.protobuf.InvalidProtocolBufferException {
1325           Builder builder = newBuilder();
1326           try {
1327             builder.mergeFrom(input, extensionRegistry);
1328           } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1329             throw e.setUnfinishedMessage(builder.buildPartial());
1330           } catch (com.google.protobuf.UninitializedMessageException e) {
1331             throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
1332           } catch (java.io.IOException e) {
1333             throw new com.google.protobuf.InvalidProtocolBufferException(e)
1334                 .setUnfinishedMessage(builder.buildPartial());
1335           }
1336           return builder.buildPartial();
1337         }
1338       };
1339 
parser()1340   public static com.google.protobuf.Parser<SynthesizeSpeechResponse> parser() {
1341     return PARSER;
1342   }
1343 
1344   @java.lang.Override
getParserForType()1345   public com.google.protobuf.Parser<SynthesizeSpeechResponse> getParserForType() {
1346     return PARSER;
1347   }
1348 
1349   @java.lang.Override
1350   public com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse
getDefaultInstanceForType()1351       getDefaultInstanceForType() {
1352     return DEFAULT_INSTANCE;
1353   }
1354 }
1355