• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // Generated by the protocol buffer compiler.  DO NOT EDIT!
17 // source: google/cloud/aiplatform/v1/training_pipeline.proto
18 
19 package com.google.cloud.aiplatform.v1;
20 
21 /**
22  *
23  *
24  * <pre>
25  * Specifies Vertex AI owned input data to be used for training, and
26  * possibly evaluating, the Model.
27  * </pre>
28  *
29  * Protobuf type {@code google.cloud.aiplatform.v1.InputDataConfig}
30  */
31 public final class InputDataConfig extends com.google.protobuf.GeneratedMessageV3
32     implements
33     // @@protoc_insertion_point(message_implements:google.cloud.aiplatform.v1.InputDataConfig)
34     InputDataConfigOrBuilder {
35   private static final long serialVersionUID = 0L;
36   // Use InputDataConfig.newBuilder() to construct.
InputDataConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder)37   private InputDataConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
38     super(builder);
39   }
40 
InputDataConfig()41   private InputDataConfig() {
42     datasetId_ = "";
43     annotationsFilter_ = "";
44     annotationSchemaUri_ = "";
45     savedQueryId_ = "";
46   }
47 
48   @java.lang.Override
49   @SuppressWarnings({"unused"})
newInstance(UnusedPrivateParameter unused)50   protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
51     return new InputDataConfig();
52   }
53 
54   @java.lang.Override
getUnknownFields()55   public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
56     return this.unknownFields;
57   }
58 
getDescriptor()59   public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
60     return com.google.cloud.aiplatform.v1.TrainingPipelineProto
61         .internal_static_google_cloud_aiplatform_v1_InputDataConfig_descriptor;
62   }
63 
64   @java.lang.Override
65   protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()66       internalGetFieldAccessorTable() {
67     return com.google.cloud.aiplatform.v1.TrainingPipelineProto
68         .internal_static_google_cloud_aiplatform_v1_InputDataConfig_fieldAccessorTable
69         .ensureFieldAccessorsInitialized(
70             com.google.cloud.aiplatform.v1.InputDataConfig.class,
71             com.google.cloud.aiplatform.v1.InputDataConfig.Builder.class);
72   }
73 
74   private int splitCase_ = 0;
75   private java.lang.Object split_;
76 
77   public enum SplitCase
78       implements
79           com.google.protobuf.Internal.EnumLite,
80           com.google.protobuf.AbstractMessage.InternalOneOfEnum {
81     FRACTION_SPLIT(2),
82     FILTER_SPLIT(3),
83     PREDEFINED_SPLIT(4),
84     TIMESTAMP_SPLIT(5),
85     STRATIFIED_SPLIT(12),
86     SPLIT_NOT_SET(0);
87     private final int value;
88 
SplitCase(int value)89     private SplitCase(int value) {
90       this.value = value;
91     }
92     /**
93      * @param value The number of the enum to look for.
94      * @return The enum associated with the given number.
95      * @deprecated Use {@link #forNumber(int)} instead.
96      */
97     @java.lang.Deprecated
valueOf(int value)98     public static SplitCase valueOf(int value) {
99       return forNumber(value);
100     }
101 
forNumber(int value)102     public static SplitCase forNumber(int value) {
103       switch (value) {
104         case 2:
105           return FRACTION_SPLIT;
106         case 3:
107           return FILTER_SPLIT;
108         case 4:
109           return PREDEFINED_SPLIT;
110         case 5:
111           return TIMESTAMP_SPLIT;
112         case 12:
113           return STRATIFIED_SPLIT;
114         case 0:
115           return SPLIT_NOT_SET;
116         default:
117           return null;
118       }
119     }
120 
getNumber()121     public int getNumber() {
122       return this.value;
123     }
124   };
125 
getSplitCase()126   public SplitCase getSplitCase() {
127     return SplitCase.forNumber(splitCase_);
128   }
129 
130   private int destinationCase_ = 0;
131   private java.lang.Object destination_;
132 
133   public enum DestinationCase
134       implements
135           com.google.protobuf.Internal.EnumLite,
136           com.google.protobuf.AbstractMessage.InternalOneOfEnum {
137     GCS_DESTINATION(8),
138     BIGQUERY_DESTINATION(10),
139     DESTINATION_NOT_SET(0);
140     private final int value;
141 
DestinationCase(int value)142     private DestinationCase(int value) {
143       this.value = value;
144     }
145     /**
146      * @param value The number of the enum to look for.
147      * @return The enum associated with the given number.
148      * @deprecated Use {@link #forNumber(int)} instead.
149      */
150     @java.lang.Deprecated
valueOf(int value)151     public static DestinationCase valueOf(int value) {
152       return forNumber(value);
153     }
154 
forNumber(int value)155     public static DestinationCase forNumber(int value) {
156       switch (value) {
157         case 8:
158           return GCS_DESTINATION;
159         case 10:
160           return BIGQUERY_DESTINATION;
161         case 0:
162           return DESTINATION_NOT_SET;
163         default:
164           return null;
165       }
166     }
167 
getNumber()168     public int getNumber() {
169       return this.value;
170     }
171   };
172 
getDestinationCase()173   public DestinationCase getDestinationCase() {
174     return DestinationCase.forNumber(destinationCase_);
175   }
176 
177   public static final int FRACTION_SPLIT_FIELD_NUMBER = 2;
178   /**
179    *
180    *
181    * <pre>
182    * Split based on fractions defining the size of each set.
183    * </pre>
184    *
185    * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
186    *
187    * @return Whether the fractionSplit field is set.
188    */
189   @java.lang.Override
hasFractionSplit()190   public boolean hasFractionSplit() {
191     return splitCase_ == 2;
192   }
193   /**
194    *
195    *
196    * <pre>
197    * Split based on fractions defining the size of each set.
198    * </pre>
199    *
200    * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
201    *
202    * @return The fractionSplit.
203    */
204   @java.lang.Override
getFractionSplit()205   public com.google.cloud.aiplatform.v1.FractionSplit getFractionSplit() {
206     if (splitCase_ == 2) {
207       return (com.google.cloud.aiplatform.v1.FractionSplit) split_;
208     }
209     return com.google.cloud.aiplatform.v1.FractionSplit.getDefaultInstance();
210   }
211   /**
212    *
213    *
214    * <pre>
215    * Split based on fractions defining the size of each set.
216    * </pre>
217    *
218    * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
219    */
220   @java.lang.Override
getFractionSplitOrBuilder()221   public com.google.cloud.aiplatform.v1.FractionSplitOrBuilder getFractionSplitOrBuilder() {
222     if (splitCase_ == 2) {
223       return (com.google.cloud.aiplatform.v1.FractionSplit) split_;
224     }
225     return com.google.cloud.aiplatform.v1.FractionSplit.getDefaultInstance();
226   }
227 
228   public static final int FILTER_SPLIT_FIELD_NUMBER = 3;
229   /**
230    *
231    *
232    * <pre>
233    * Split based on the provided filters for each set.
234    * </pre>
235    *
236    * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
237    *
238    * @return Whether the filterSplit field is set.
239    */
240   @java.lang.Override
hasFilterSplit()241   public boolean hasFilterSplit() {
242     return splitCase_ == 3;
243   }
244   /**
245    *
246    *
247    * <pre>
248    * Split based on the provided filters for each set.
249    * </pre>
250    *
251    * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
252    *
253    * @return The filterSplit.
254    */
255   @java.lang.Override
getFilterSplit()256   public com.google.cloud.aiplatform.v1.FilterSplit getFilterSplit() {
257     if (splitCase_ == 3) {
258       return (com.google.cloud.aiplatform.v1.FilterSplit) split_;
259     }
260     return com.google.cloud.aiplatform.v1.FilterSplit.getDefaultInstance();
261   }
262   /**
263    *
264    *
265    * <pre>
266    * Split based on the provided filters for each set.
267    * </pre>
268    *
269    * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
270    */
271   @java.lang.Override
getFilterSplitOrBuilder()272   public com.google.cloud.aiplatform.v1.FilterSplitOrBuilder getFilterSplitOrBuilder() {
273     if (splitCase_ == 3) {
274       return (com.google.cloud.aiplatform.v1.FilterSplit) split_;
275     }
276     return com.google.cloud.aiplatform.v1.FilterSplit.getDefaultInstance();
277   }
278 
279   public static final int PREDEFINED_SPLIT_FIELD_NUMBER = 4;
280   /**
281    *
282    *
283    * <pre>
284    * Supported only for tabular Datasets.
285    * Split based on a predefined key.
286    * </pre>
287    *
288    * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
289    *
290    * @return Whether the predefinedSplit field is set.
291    */
292   @java.lang.Override
hasPredefinedSplit()293   public boolean hasPredefinedSplit() {
294     return splitCase_ == 4;
295   }
296   /**
297    *
298    *
299    * <pre>
300    * Supported only for tabular Datasets.
301    * Split based on a predefined key.
302    * </pre>
303    *
304    * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
305    *
306    * @return The predefinedSplit.
307    */
308   @java.lang.Override
getPredefinedSplit()309   public com.google.cloud.aiplatform.v1.PredefinedSplit getPredefinedSplit() {
310     if (splitCase_ == 4) {
311       return (com.google.cloud.aiplatform.v1.PredefinedSplit) split_;
312     }
313     return com.google.cloud.aiplatform.v1.PredefinedSplit.getDefaultInstance();
314   }
315   /**
316    *
317    *
318    * <pre>
319    * Supported only for tabular Datasets.
320    * Split based on a predefined key.
321    * </pre>
322    *
323    * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
324    */
325   @java.lang.Override
getPredefinedSplitOrBuilder()326   public com.google.cloud.aiplatform.v1.PredefinedSplitOrBuilder getPredefinedSplitOrBuilder() {
327     if (splitCase_ == 4) {
328       return (com.google.cloud.aiplatform.v1.PredefinedSplit) split_;
329     }
330     return com.google.cloud.aiplatform.v1.PredefinedSplit.getDefaultInstance();
331   }
332 
333   public static final int TIMESTAMP_SPLIT_FIELD_NUMBER = 5;
334   /**
335    *
336    *
337    * <pre>
338    * Supported only for tabular Datasets.
339    * Split based on the timestamp of the input data pieces.
340    * </pre>
341    *
342    * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
343    *
344    * @return Whether the timestampSplit field is set.
345    */
346   @java.lang.Override
hasTimestampSplit()347   public boolean hasTimestampSplit() {
348     return splitCase_ == 5;
349   }
350   /**
351    *
352    *
353    * <pre>
354    * Supported only for tabular Datasets.
355    * Split based on the timestamp of the input data pieces.
356    * </pre>
357    *
358    * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
359    *
360    * @return The timestampSplit.
361    */
362   @java.lang.Override
getTimestampSplit()363   public com.google.cloud.aiplatform.v1.TimestampSplit getTimestampSplit() {
364     if (splitCase_ == 5) {
365       return (com.google.cloud.aiplatform.v1.TimestampSplit) split_;
366     }
367     return com.google.cloud.aiplatform.v1.TimestampSplit.getDefaultInstance();
368   }
369   /**
370    *
371    *
372    * <pre>
373    * Supported only for tabular Datasets.
374    * Split based on the timestamp of the input data pieces.
375    * </pre>
376    *
377    * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
378    */
379   @java.lang.Override
getTimestampSplitOrBuilder()380   public com.google.cloud.aiplatform.v1.TimestampSplitOrBuilder getTimestampSplitOrBuilder() {
381     if (splitCase_ == 5) {
382       return (com.google.cloud.aiplatform.v1.TimestampSplit) split_;
383     }
384     return com.google.cloud.aiplatform.v1.TimestampSplit.getDefaultInstance();
385   }
386 
387   public static final int STRATIFIED_SPLIT_FIELD_NUMBER = 12;
388   /**
389    *
390    *
391    * <pre>
392    * Supported only for tabular Datasets.
393    * Split based on the distribution of the specified column.
394    * </pre>
395    *
396    * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
397    *
398    * @return Whether the stratifiedSplit field is set.
399    */
400   @java.lang.Override
hasStratifiedSplit()401   public boolean hasStratifiedSplit() {
402     return splitCase_ == 12;
403   }
404   /**
405    *
406    *
407    * <pre>
408    * Supported only for tabular Datasets.
409    * Split based on the distribution of the specified column.
410    * </pre>
411    *
412    * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
413    *
414    * @return The stratifiedSplit.
415    */
416   @java.lang.Override
getStratifiedSplit()417   public com.google.cloud.aiplatform.v1.StratifiedSplit getStratifiedSplit() {
418     if (splitCase_ == 12) {
419       return (com.google.cloud.aiplatform.v1.StratifiedSplit) split_;
420     }
421     return com.google.cloud.aiplatform.v1.StratifiedSplit.getDefaultInstance();
422   }
423   /**
424    *
425    *
426    * <pre>
427    * Supported only for tabular Datasets.
428    * Split based on the distribution of the specified column.
429    * </pre>
430    *
431    * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
432    */
433   @java.lang.Override
getStratifiedSplitOrBuilder()434   public com.google.cloud.aiplatform.v1.StratifiedSplitOrBuilder getStratifiedSplitOrBuilder() {
435     if (splitCase_ == 12) {
436       return (com.google.cloud.aiplatform.v1.StratifiedSplit) split_;
437     }
438     return com.google.cloud.aiplatform.v1.StratifiedSplit.getDefaultInstance();
439   }
440 
441   public static final int GCS_DESTINATION_FIELD_NUMBER = 8;
442   /**
443    *
444    *
445    * <pre>
446    * The Cloud Storage location where the training data is to be
447    * written to. In the given directory a new directory is created with
448    * name:
449    * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
450    * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
451    * All training input data is written into that directory.
452    * The Vertex AI environment variables representing Cloud Storage
453    * data URIs are represented in the Cloud Storage wildcard
454    * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
455    * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
456    * * AIP_TRAINING_DATA_URI =
457    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
458    * * AIP_VALIDATION_DATA_URI =
459    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
460    * * AIP_TEST_DATA_URI =
461    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
462    * </pre>
463    *
464    * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
465    *
466    * @return Whether the gcsDestination field is set.
467    */
468   @java.lang.Override
hasGcsDestination()469   public boolean hasGcsDestination() {
470     return destinationCase_ == 8;
471   }
472   /**
473    *
474    *
475    * <pre>
476    * The Cloud Storage location where the training data is to be
477    * written to. In the given directory a new directory is created with
478    * name:
479    * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
480    * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
481    * All training input data is written into that directory.
482    * The Vertex AI environment variables representing Cloud Storage
483    * data URIs are represented in the Cloud Storage wildcard
484    * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
485    * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
486    * * AIP_TRAINING_DATA_URI =
487    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
488    * * AIP_VALIDATION_DATA_URI =
489    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
490    * * AIP_TEST_DATA_URI =
491    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
492    * </pre>
493    *
494    * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
495    *
496    * @return The gcsDestination.
497    */
498   @java.lang.Override
getGcsDestination()499   public com.google.cloud.aiplatform.v1.GcsDestination getGcsDestination() {
500     if (destinationCase_ == 8) {
501       return (com.google.cloud.aiplatform.v1.GcsDestination) destination_;
502     }
503     return com.google.cloud.aiplatform.v1.GcsDestination.getDefaultInstance();
504   }
505   /**
506    *
507    *
508    * <pre>
509    * The Cloud Storage location where the training data is to be
510    * written to. In the given directory a new directory is created with
511    * name:
512    * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
513    * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
514    * All training input data is written into that directory.
515    * The Vertex AI environment variables representing Cloud Storage
516    * data URIs are represented in the Cloud Storage wildcard
517    * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
518    * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
519    * * AIP_TRAINING_DATA_URI =
520    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
521    * * AIP_VALIDATION_DATA_URI =
522    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
523    * * AIP_TEST_DATA_URI =
524    * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
525    * </pre>
526    *
527    * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
528    */
529   @java.lang.Override
getGcsDestinationOrBuilder()530   public com.google.cloud.aiplatform.v1.GcsDestinationOrBuilder getGcsDestinationOrBuilder() {
531     if (destinationCase_ == 8) {
532       return (com.google.cloud.aiplatform.v1.GcsDestination) destination_;
533     }
534     return com.google.cloud.aiplatform.v1.GcsDestination.getDefaultInstance();
535   }
536 
537   public static final int BIGQUERY_DESTINATION_FIELD_NUMBER = 10;
538   /**
539    *
540    *
541    * <pre>
542    * Only applicable to custom training with tabular Dataset with BigQuery
543    * source.
544    * The BigQuery project location where the training data is to be written
545    * to. In the given project a new dataset is created with name
546    * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
547    * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
548    * input data is written into that dataset. In the dataset three
549    * tables are created, `training`, `validation` and `test`.
550    * * AIP_DATA_FORMAT = "bigquery".
551    * * AIP_TRAINING_DATA_URI  =
552    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
553    * * AIP_VALIDATION_DATA_URI =
554    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
555    * * AIP_TEST_DATA_URI =
556    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
557    * </pre>
558    *
559    * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
560    *
561    * @return Whether the bigqueryDestination field is set.
562    */
563   @java.lang.Override
hasBigqueryDestination()564   public boolean hasBigqueryDestination() {
565     return destinationCase_ == 10;
566   }
567   /**
568    *
569    *
570    * <pre>
571    * Only applicable to custom training with tabular Dataset with BigQuery
572    * source.
573    * The BigQuery project location where the training data is to be written
574    * to. In the given project a new dataset is created with name
575    * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
576    * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
577    * input data is written into that dataset. In the dataset three
578    * tables are created, `training`, `validation` and `test`.
579    * * AIP_DATA_FORMAT = "bigquery".
580    * * AIP_TRAINING_DATA_URI  =
581    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
582    * * AIP_VALIDATION_DATA_URI =
583    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
584    * * AIP_TEST_DATA_URI =
585    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
586    * </pre>
587    *
588    * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
589    *
590    * @return The bigqueryDestination.
591    */
592   @java.lang.Override
getBigqueryDestination()593   public com.google.cloud.aiplatform.v1.BigQueryDestination getBigqueryDestination() {
594     if (destinationCase_ == 10) {
595       return (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_;
596     }
597     return com.google.cloud.aiplatform.v1.BigQueryDestination.getDefaultInstance();
598   }
599   /**
600    *
601    *
602    * <pre>
603    * Only applicable to custom training with tabular Dataset with BigQuery
604    * source.
605    * The BigQuery project location where the training data is to be written
606    * to. In the given project a new dataset is created with name
607    * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
608    * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
609    * input data is written into that dataset. In the dataset three
610    * tables are created, `training`, `validation` and `test`.
611    * * AIP_DATA_FORMAT = "bigquery".
612    * * AIP_TRAINING_DATA_URI  =
613    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
614    * * AIP_VALIDATION_DATA_URI =
615    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
616    * * AIP_TEST_DATA_URI =
617    * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
618    * </pre>
619    *
620    * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
621    */
622   @java.lang.Override
623   public com.google.cloud.aiplatform.v1.BigQueryDestinationOrBuilder
getBigqueryDestinationOrBuilder()624       getBigqueryDestinationOrBuilder() {
625     if (destinationCase_ == 10) {
626       return (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_;
627     }
628     return com.google.cloud.aiplatform.v1.BigQueryDestination.getDefaultInstance();
629   }
630 
631   public static final int DATASET_ID_FIELD_NUMBER = 1;
632 
633   @SuppressWarnings("serial")
634   private volatile java.lang.Object datasetId_ = "";
635   /**
636    *
637    *
638    * <pre>
639    * Required. The ID of the Dataset in the same Project and Location which data
640    * will be used to train the Model. The Dataset must use schema compatible
641    * with Model being trained, and what is compatible should be described in the
642    * used TrainingPipeline's [training_task_definition]
643    * [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
644    * For tabular Datasets, all their data is exported to training, to pick
645    * and choose from.
646    * </pre>
647    *
648    * <code>string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];</code>
649    *
650    * @return The datasetId.
651    */
652   @java.lang.Override
getDatasetId()653   public java.lang.String getDatasetId() {
654     java.lang.Object ref = datasetId_;
655     if (ref instanceof java.lang.String) {
656       return (java.lang.String) ref;
657     } else {
658       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
659       java.lang.String s = bs.toStringUtf8();
660       datasetId_ = s;
661       return s;
662     }
663   }
664   /**
665    *
666    *
667    * <pre>
668    * Required. The ID of the Dataset in the same Project and Location which data
669    * will be used to train the Model. The Dataset must use schema compatible
670    * with Model being trained, and what is compatible should be described in the
671    * used TrainingPipeline's [training_task_definition]
672    * [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
673    * For tabular Datasets, all their data is exported to training, to pick
674    * and choose from.
675    * </pre>
676    *
677    * <code>string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];</code>
678    *
679    * @return The bytes for datasetId.
680    */
681   @java.lang.Override
getDatasetIdBytes()682   public com.google.protobuf.ByteString getDatasetIdBytes() {
683     java.lang.Object ref = datasetId_;
684     if (ref instanceof java.lang.String) {
685       com.google.protobuf.ByteString b =
686           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
687       datasetId_ = b;
688       return b;
689     } else {
690       return (com.google.protobuf.ByteString) ref;
691     }
692   }
693 
694   public static final int ANNOTATIONS_FILTER_FIELD_NUMBER = 6;
695 
696   @SuppressWarnings("serial")
697   private volatile java.lang.Object annotationsFilter_ = "";
698   /**
699    *
700    *
701    * <pre>
702    * Applicable only to Datasets that have DataItems and Annotations.
703    * A filter on Annotations of the Dataset. Only Annotations that both
704    * match this filter and belong to DataItems not ignored by the split method
705    * are used in respectively training, validation or test role, depending on
706    * the role of the DataItem they are on (for the auto-assigned that role is
707    * decided by Vertex AI). A filter with same syntax as the one used in
708    * [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
709    * may be used, but note here it filters across all Annotations of the
710    * Dataset, and not just within a single DataItem.
711    * </pre>
712    *
713    * <code>string annotations_filter = 6;</code>
714    *
715    * @return The annotationsFilter.
716    */
717   @java.lang.Override
getAnnotationsFilter()718   public java.lang.String getAnnotationsFilter() {
719     java.lang.Object ref = annotationsFilter_;
720     if (ref instanceof java.lang.String) {
721       return (java.lang.String) ref;
722     } else {
723       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
724       java.lang.String s = bs.toStringUtf8();
725       annotationsFilter_ = s;
726       return s;
727     }
728   }
729   /**
730    *
731    *
732    * <pre>
733    * Applicable only to Datasets that have DataItems and Annotations.
734    * A filter on Annotations of the Dataset. Only Annotations that both
735    * match this filter and belong to DataItems not ignored by the split method
736    * are used in respectively training, validation or test role, depending on
737    * the role of the DataItem they are on (for the auto-assigned that role is
738    * decided by Vertex AI). A filter with same syntax as the one used in
739    * [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
740    * may be used, but note here it filters across all Annotations of the
741    * Dataset, and not just within a single DataItem.
742    * </pre>
743    *
744    * <code>string annotations_filter = 6;</code>
745    *
746    * @return The bytes for annotationsFilter.
747    */
748   @java.lang.Override
getAnnotationsFilterBytes()749   public com.google.protobuf.ByteString getAnnotationsFilterBytes() {
750     java.lang.Object ref = annotationsFilter_;
751     if (ref instanceof java.lang.String) {
752       com.google.protobuf.ByteString b =
753           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
754       annotationsFilter_ = b;
755       return b;
756     } else {
757       return (com.google.protobuf.ByteString) ref;
758     }
759   }
760 
761   public static final int ANNOTATION_SCHEMA_URI_FIELD_NUMBER = 9;
762 
763   @SuppressWarnings("serial")
764   private volatile java.lang.Object annotationSchemaUri_ = "";
765   /**
766    *
767    *
768    * <pre>
769    * Applicable only to custom training with Datasets that have DataItems and
770    * Annotations.
771    * Cloud Storage URI that points to a YAML file describing the annotation
772    * schema. The schema is defined as an OpenAPI 3.0.2 [Schema
773    * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
774    * The schema files that can be used here are found in
775    * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
776    * chosen schema must be consistent with
777    * [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
778    * Dataset specified by
779    * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
780    * Only Annotations that both match this schema and belong to DataItems not
781    * ignored by the split method are used in respectively training, validation
782    * or test role, depending on the role of the DataItem they are on.
783    * When used in conjunction with
784    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
785    * the Annotations used for training are filtered by both
786    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
787    * and
788    * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
789    * </pre>
790    *
791    * <code>string annotation_schema_uri = 9;</code>
792    *
793    * @return The annotationSchemaUri.
794    */
795   @java.lang.Override
getAnnotationSchemaUri()796   public java.lang.String getAnnotationSchemaUri() {
797     java.lang.Object ref = annotationSchemaUri_;
798     if (ref instanceof java.lang.String) {
799       return (java.lang.String) ref;
800     } else {
801       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
802       java.lang.String s = bs.toStringUtf8();
803       annotationSchemaUri_ = s;
804       return s;
805     }
806   }
807   /**
808    *
809    *
810    * <pre>
811    * Applicable only to custom training with Datasets that have DataItems and
812    * Annotations.
813    * Cloud Storage URI that points to a YAML file describing the annotation
814    * schema. The schema is defined as an OpenAPI 3.0.2 [Schema
815    * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
816    * The schema files that can be used here are found in
817    * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
818    * chosen schema must be consistent with
819    * [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
820    * Dataset specified by
821    * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
822    * Only Annotations that both match this schema and belong to DataItems not
823    * ignored by the split method are used in respectively training, validation
824    * or test role, depending on the role of the DataItem they are on.
825    * When used in conjunction with
826    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
827    * the Annotations used for training are filtered by both
828    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
829    * and
830    * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
831    * </pre>
832    *
833    * <code>string annotation_schema_uri = 9;</code>
834    *
835    * @return The bytes for annotationSchemaUri.
836    */
837   @java.lang.Override
getAnnotationSchemaUriBytes()838   public com.google.protobuf.ByteString getAnnotationSchemaUriBytes() {
839     java.lang.Object ref = annotationSchemaUri_;
840     if (ref instanceof java.lang.String) {
841       com.google.protobuf.ByteString b =
842           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
843       annotationSchemaUri_ = b;
844       return b;
845     } else {
846       return (com.google.protobuf.ByteString) ref;
847     }
848   }
849 
850   public static final int SAVED_QUERY_ID_FIELD_NUMBER = 7;
851 
852   @SuppressWarnings("serial")
853   private volatile java.lang.Object savedQueryId_ = "";
854   /**
855    *
856    *
857    * <pre>
858    * Only applicable to Datasets that have SavedQueries.
859    * The ID of a SavedQuery (annotation set) under the Dataset specified by
860    * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
861    * for filtering Annotations for training.
862    * Only Annotations that are associated with this SavedQuery are used in
863    * respectively training. When used in conjunction with
864    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
865    * the Annotations used for training are filtered by both
866    * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
867    * and
868    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
869    * Only one of
870    * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
871    * and
872    * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
873    * should be specified as both of them represent the same thing: problem type.
874    * </pre>
875    *
876    * <code>string saved_query_id = 7;</code>
877    *
878    * @return The savedQueryId.
879    */
880   @java.lang.Override
getSavedQueryId()881   public java.lang.String getSavedQueryId() {
882     java.lang.Object ref = savedQueryId_;
883     if (ref instanceof java.lang.String) {
884       return (java.lang.String) ref;
885     } else {
886       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
887       java.lang.String s = bs.toStringUtf8();
888       savedQueryId_ = s;
889       return s;
890     }
891   }
892   /**
893    *
894    *
895    * <pre>
896    * Only applicable to Datasets that have SavedQueries.
897    * The ID of a SavedQuery (annotation set) under the Dataset specified by
898    * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
899    * for filtering Annotations for training.
900    * Only Annotations that are associated with this SavedQuery are used in
901    * respectively training. When used in conjunction with
902    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
903    * the Annotations used for training are filtered by both
904    * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
905    * and
906    * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
907    * Only one of
908    * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
909    * and
910    * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
911    * should be specified as both of them represent the same thing: problem type.
912    * </pre>
913    *
914    * <code>string saved_query_id = 7;</code>
915    *
916    * @return The bytes for savedQueryId.
917    */
918   @java.lang.Override
getSavedQueryIdBytes()919   public com.google.protobuf.ByteString getSavedQueryIdBytes() {
920     java.lang.Object ref = savedQueryId_;
921     if (ref instanceof java.lang.String) {
922       com.google.protobuf.ByteString b =
923           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
924       savedQueryId_ = b;
925       return b;
926     } else {
927       return (com.google.protobuf.ByteString) ref;
928     }
929   }
930 
931   public static final int PERSIST_ML_USE_ASSIGNMENT_FIELD_NUMBER = 11;
932   private boolean persistMlUseAssignment_ = false;
933   /**
934    *
935    *
936    * <pre>
937    * Whether to persist the ML use assignment to data item system labels.
938    * </pre>
939    *
940    * <code>bool persist_ml_use_assignment = 11;</code>
941    *
942    * @return The persistMlUseAssignment.
943    */
944   @java.lang.Override
getPersistMlUseAssignment()945   public boolean getPersistMlUseAssignment() {
946     return persistMlUseAssignment_;
947   }
948 
949   private byte memoizedIsInitialized = -1;
950 
951   @java.lang.Override
isInitialized()952   public final boolean isInitialized() {
953     byte isInitialized = memoizedIsInitialized;
954     if (isInitialized == 1) return true;
955     if (isInitialized == 0) return false;
956 
957     memoizedIsInitialized = 1;
958     return true;
959   }
960 
961   @java.lang.Override
writeTo(com.google.protobuf.CodedOutputStream output)962   public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
963     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(datasetId_)) {
964       com.google.protobuf.GeneratedMessageV3.writeString(output, 1, datasetId_);
965     }
966     if (splitCase_ == 2) {
967       output.writeMessage(2, (com.google.cloud.aiplatform.v1.FractionSplit) split_);
968     }
969     if (splitCase_ == 3) {
970       output.writeMessage(3, (com.google.cloud.aiplatform.v1.FilterSplit) split_);
971     }
972     if (splitCase_ == 4) {
973       output.writeMessage(4, (com.google.cloud.aiplatform.v1.PredefinedSplit) split_);
974     }
975     if (splitCase_ == 5) {
976       output.writeMessage(5, (com.google.cloud.aiplatform.v1.TimestampSplit) split_);
977     }
978     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(annotationsFilter_)) {
979       com.google.protobuf.GeneratedMessageV3.writeString(output, 6, annotationsFilter_);
980     }
981     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(savedQueryId_)) {
982       com.google.protobuf.GeneratedMessageV3.writeString(output, 7, savedQueryId_);
983     }
984     if (destinationCase_ == 8) {
985       output.writeMessage(8, (com.google.cloud.aiplatform.v1.GcsDestination) destination_);
986     }
987     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(annotationSchemaUri_)) {
988       com.google.protobuf.GeneratedMessageV3.writeString(output, 9, annotationSchemaUri_);
989     }
990     if (destinationCase_ == 10) {
991       output.writeMessage(10, (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_);
992     }
993     if (persistMlUseAssignment_ != false) {
994       output.writeBool(11, persistMlUseAssignment_);
995     }
996     if (splitCase_ == 12) {
997       output.writeMessage(12, (com.google.cloud.aiplatform.v1.StratifiedSplit) split_);
998     }
999     getUnknownFields().writeTo(output);
1000   }
1001 
1002   @java.lang.Override
getSerializedSize()1003   public int getSerializedSize() {
1004     int size = memoizedSize;
1005     if (size != -1) return size;
1006 
1007     size = 0;
1008     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(datasetId_)) {
1009       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, datasetId_);
1010     }
1011     if (splitCase_ == 2) {
1012       size +=
1013           com.google.protobuf.CodedOutputStream.computeMessageSize(
1014               2, (com.google.cloud.aiplatform.v1.FractionSplit) split_);
1015     }
1016     if (splitCase_ == 3) {
1017       size +=
1018           com.google.protobuf.CodedOutputStream.computeMessageSize(
1019               3, (com.google.cloud.aiplatform.v1.FilterSplit) split_);
1020     }
1021     if (splitCase_ == 4) {
1022       size +=
1023           com.google.protobuf.CodedOutputStream.computeMessageSize(
1024               4, (com.google.cloud.aiplatform.v1.PredefinedSplit) split_);
1025     }
1026     if (splitCase_ == 5) {
1027       size +=
1028           com.google.protobuf.CodedOutputStream.computeMessageSize(
1029               5, (com.google.cloud.aiplatform.v1.TimestampSplit) split_);
1030     }
1031     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(annotationsFilter_)) {
1032       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, annotationsFilter_);
1033     }
1034     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(savedQueryId_)) {
1035       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, savedQueryId_);
1036     }
1037     if (destinationCase_ == 8) {
1038       size +=
1039           com.google.protobuf.CodedOutputStream.computeMessageSize(
1040               8, (com.google.cloud.aiplatform.v1.GcsDestination) destination_);
1041     }
1042     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(annotationSchemaUri_)) {
1043       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(9, annotationSchemaUri_);
1044     }
1045     if (destinationCase_ == 10) {
1046       size +=
1047           com.google.protobuf.CodedOutputStream.computeMessageSize(
1048               10, (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_);
1049     }
1050     if (persistMlUseAssignment_ != false) {
1051       size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, persistMlUseAssignment_);
1052     }
1053     if (splitCase_ == 12) {
1054       size +=
1055           com.google.protobuf.CodedOutputStream.computeMessageSize(
1056               12, (com.google.cloud.aiplatform.v1.StratifiedSplit) split_);
1057     }
1058     size += getUnknownFields().getSerializedSize();
1059     memoizedSize = size;
1060     return size;
1061   }
1062 
1063   @java.lang.Override
equals(final java.lang.Object obj)1064   public boolean equals(final java.lang.Object obj) {
1065     if (obj == this) {
1066       return true;
1067     }
1068     if (!(obj instanceof com.google.cloud.aiplatform.v1.InputDataConfig)) {
1069       return super.equals(obj);
1070     }
1071     com.google.cloud.aiplatform.v1.InputDataConfig other =
1072         (com.google.cloud.aiplatform.v1.InputDataConfig) obj;
1073 
1074     if (!getDatasetId().equals(other.getDatasetId())) return false;
1075     if (!getAnnotationsFilter().equals(other.getAnnotationsFilter())) return false;
1076     if (!getAnnotationSchemaUri().equals(other.getAnnotationSchemaUri())) return false;
1077     if (!getSavedQueryId().equals(other.getSavedQueryId())) return false;
1078     if (getPersistMlUseAssignment() != other.getPersistMlUseAssignment()) return false;
1079     if (!getSplitCase().equals(other.getSplitCase())) return false;
1080     switch (splitCase_) {
1081       case 2:
1082         if (!getFractionSplit().equals(other.getFractionSplit())) return false;
1083         break;
1084       case 3:
1085         if (!getFilterSplit().equals(other.getFilterSplit())) return false;
1086         break;
1087       case 4:
1088         if (!getPredefinedSplit().equals(other.getPredefinedSplit())) return false;
1089         break;
1090       case 5:
1091         if (!getTimestampSplit().equals(other.getTimestampSplit())) return false;
1092         break;
1093       case 12:
1094         if (!getStratifiedSplit().equals(other.getStratifiedSplit())) return false;
1095         break;
1096       case 0:
1097       default:
1098     }
1099     if (!getDestinationCase().equals(other.getDestinationCase())) return false;
1100     switch (destinationCase_) {
1101       case 8:
1102         if (!getGcsDestination().equals(other.getGcsDestination())) return false;
1103         break;
1104       case 10:
1105         if (!getBigqueryDestination().equals(other.getBigqueryDestination())) return false;
1106         break;
1107       case 0:
1108       default:
1109     }
1110     if (!getUnknownFields().equals(other.getUnknownFields())) return false;
1111     return true;
1112   }
1113 
1114   @java.lang.Override
hashCode()1115   public int hashCode() {
1116     if (memoizedHashCode != 0) {
1117       return memoizedHashCode;
1118     }
1119     int hash = 41;
1120     hash = (19 * hash) + getDescriptor().hashCode();
1121     hash = (37 * hash) + DATASET_ID_FIELD_NUMBER;
1122     hash = (53 * hash) + getDatasetId().hashCode();
1123     hash = (37 * hash) + ANNOTATIONS_FILTER_FIELD_NUMBER;
1124     hash = (53 * hash) + getAnnotationsFilter().hashCode();
1125     hash = (37 * hash) + ANNOTATION_SCHEMA_URI_FIELD_NUMBER;
1126     hash = (53 * hash) + getAnnotationSchemaUri().hashCode();
1127     hash = (37 * hash) + SAVED_QUERY_ID_FIELD_NUMBER;
1128     hash = (53 * hash) + getSavedQueryId().hashCode();
1129     hash = (37 * hash) + PERSIST_ML_USE_ASSIGNMENT_FIELD_NUMBER;
1130     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getPersistMlUseAssignment());
1131     switch (splitCase_) {
1132       case 2:
1133         hash = (37 * hash) + FRACTION_SPLIT_FIELD_NUMBER;
1134         hash = (53 * hash) + getFractionSplit().hashCode();
1135         break;
1136       case 3:
1137         hash = (37 * hash) + FILTER_SPLIT_FIELD_NUMBER;
1138         hash = (53 * hash) + getFilterSplit().hashCode();
1139         break;
1140       case 4:
1141         hash = (37 * hash) + PREDEFINED_SPLIT_FIELD_NUMBER;
1142         hash = (53 * hash) + getPredefinedSplit().hashCode();
1143         break;
1144       case 5:
1145         hash = (37 * hash) + TIMESTAMP_SPLIT_FIELD_NUMBER;
1146         hash = (53 * hash) + getTimestampSplit().hashCode();
1147         break;
1148       case 12:
1149         hash = (37 * hash) + STRATIFIED_SPLIT_FIELD_NUMBER;
1150         hash = (53 * hash) + getStratifiedSplit().hashCode();
1151         break;
1152       case 0:
1153       default:
1154     }
1155     switch (destinationCase_) {
1156       case 8:
1157         hash = (37 * hash) + GCS_DESTINATION_FIELD_NUMBER;
1158         hash = (53 * hash) + getGcsDestination().hashCode();
1159         break;
1160       case 10:
1161         hash = (37 * hash) + BIGQUERY_DESTINATION_FIELD_NUMBER;
1162         hash = (53 * hash) + getBigqueryDestination().hashCode();
1163         break;
1164       case 0:
1165       default:
1166     }
1167     hash = (29 * hash) + getUnknownFields().hashCode();
1168     memoizedHashCode = hash;
1169     return hash;
1170   }
1171 
parseFrom(java.nio.ByteBuffer data)1172   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(java.nio.ByteBuffer data)
1173       throws com.google.protobuf.InvalidProtocolBufferException {
1174     return PARSER.parseFrom(data);
1175   }
1176 
parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1177   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(
1178       java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1179       throws com.google.protobuf.InvalidProtocolBufferException {
1180     return PARSER.parseFrom(data, extensionRegistry);
1181   }
1182 
parseFrom( com.google.protobuf.ByteString data)1183   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(
1184       com.google.protobuf.ByteString data)
1185       throws com.google.protobuf.InvalidProtocolBufferException {
1186     return PARSER.parseFrom(data);
1187   }
1188 
parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1189   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(
1190       com.google.protobuf.ByteString data,
1191       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1192       throws com.google.protobuf.InvalidProtocolBufferException {
1193     return PARSER.parseFrom(data, extensionRegistry);
1194   }
1195 
parseFrom(byte[] data)1196   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(byte[] data)
1197       throws com.google.protobuf.InvalidProtocolBufferException {
1198     return PARSER.parseFrom(data);
1199   }
1200 
parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1201   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(
1202       byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1203       throws com.google.protobuf.InvalidProtocolBufferException {
1204     return PARSER.parseFrom(data, extensionRegistry);
1205   }
1206 
parseFrom(java.io.InputStream input)1207   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(java.io.InputStream input)
1208       throws java.io.IOException {
1209     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
1210   }
1211 
parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1212   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(
1213       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1214       throws java.io.IOException {
1215     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
1216         PARSER, input, extensionRegistry);
1217   }
1218 
parseDelimitedFrom( java.io.InputStream input)1219   public static com.google.cloud.aiplatform.v1.InputDataConfig parseDelimitedFrom(
1220       java.io.InputStream input) throws java.io.IOException {
1221     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
1222   }
1223 
parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1224   public static com.google.cloud.aiplatform.v1.InputDataConfig parseDelimitedFrom(
1225       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1226       throws java.io.IOException {
1227     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
1228         PARSER, input, extensionRegistry);
1229   }
1230 
parseFrom( com.google.protobuf.CodedInputStream input)1231   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(
1232       com.google.protobuf.CodedInputStream input) throws java.io.IOException {
1233     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
1234   }
1235 
parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1236   public static com.google.cloud.aiplatform.v1.InputDataConfig parseFrom(
1237       com.google.protobuf.CodedInputStream input,
1238       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1239       throws java.io.IOException {
1240     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
1241         PARSER, input, extensionRegistry);
1242   }
1243 
1244   @java.lang.Override
newBuilderForType()1245   public Builder newBuilderForType() {
1246     return newBuilder();
1247   }
1248 
newBuilder()1249   public static Builder newBuilder() {
1250     return DEFAULT_INSTANCE.toBuilder();
1251   }
1252 
newBuilder(com.google.cloud.aiplatform.v1.InputDataConfig prototype)1253   public static Builder newBuilder(com.google.cloud.aiplatform.v1.InputDataConfig prototype) {
1254     return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
1255   }
1256 
1257   @java.lang.Override
toBuilder()1258   public Builder toBuilder() {
1259     return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
1260   }
1261 
1262   @java.lang.Override
newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)1263   protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
1264     Builder builder = new Builder(parent);
1265     return builder;
1266   }
1267   /**
1268    *
1269    *
1270    * <pre>
1271    * Specifies Vertex AI owned input data to be used for training, and
1272    * possibly evaluating, the Model.
1273    * </pre>
1274    *
1275    * Protobuf type {@code google.cloud.aiplatform.v1.InputDataConfig}
1276    */
1277   public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
1278       implements
1279       // @@protoc_insertion_point(builder_implements:google.cloud.aiplatform.v1.InputDataConfig)
1280       com.google.cloud.aiplatform.v1.InputDataConfigOrBuilder {
getDescriptor()1281     public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
1282       return com.google.cloud.aiplatform.v1.TrainingPipelineProto
1283           .internal_static_google_cloud_aiplatform_v1_InputDataConfig_descriptor;
1284     }
1285 
1286     @java.lang.Override
1287     protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()1288         internalGetFieldAccessorTable() {
1289       return com.google.cloud.aiplatform.v1.TrainingPipelineProto
1290           .internal_static_google_cloud_aiplatform_v1_InputDataConfig_fieldAccessorTable
1291           .ensureFieldAccessorsInitialized(
1292               com.google.cloud.aiplatform.v1.InputDataConfig.class,
1293               com.google.cloud.aiplatform.v1.InputDataConfig.Builder.class);
1294     }
1295 
1296     // Construct using com.google.cloud.aiplatform.v1.InputDataConfig.newBuilder()
Builder()1297     private Builder() {}
1298 
Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)1299     private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
1300       super(parent);
1301     }
1302 
1303     @java.lang.Override
clear()1304     public Builder clear() {
1305       super.clear();
1306       bitField0_ = 0;
1307       if (fractionSplitBuilder_ != null) {
1308         fractionSplitBuilder_.clear();
1309       }
1310       if (filterSplitBuilder_ != null) {
1311         filterSplitBuilder_.clear();
1312       }
1313       if (predefinedSplitBuilder_ != null) {
1314         predefinedSplitBuilder_.clear();
1315       }
1316       if (timestampSplitBuilder_ != null) {
1317         timestampSplitBuilder_.clear();
1318       }
1319       if (stratifiedSplitBuilder_ != null) {
1320         stratifiedSplitBuilder_.clear();
1321       }
1322       if (gcsDestinationBuilder_ != null) {
1323         gcsDestinationBuilder_.clear();
1324       }
1325       if (bigqueryDestinationBuilder_ != null) {
1326         bigqueryDestinationBuilder_.clear();
1327       }
1328       datasetId_ = "";
1329       annotationsFilter_ = "";
1330       annotationSchemaUri_ = "";
1331       savedQueryId_ = "";
1332       persistMlUseAssignment_ = false;
1333       splitCase_ = 0;
1334       split_ = null;
1335       destinationCase_ = 0;
1336       destination_ = null;
1337       return this;
1338     }
1339 
1340     @java.lang.Override
getDescriptorForType()1341     public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
1342       return com.google.cloud.aiplatform.v1.TrainingPipelineProto
1343           .internal_static_google_cloud_aiplatform_v1_InputDataConfig_descriptor;
1344     }
1345 
1346     @java.lang.Override
getDefaultInstanceForType()1347     public com.google.cloud.aiplatform.v1.InputDataConfig getDefaultInstanceForType() {
1348       return com.google.cloud.aiplatform.v1.InputDataConfig.getDefaultInstance();
1349     }
1350 
1351     @java.lang.Override
build()1352     public com.google.cloud.aiplatform.v1.InputDataConfig build() {
1353       com.google.cloud.aiplatform.v1.InputDataConfig result = buildPartial();
1354       if (!result.isInitialized()) {
1355         throw newUninitializedMessageException(result);
1356       }
1357       return result;
1358     }
1359 
1360     @java.lang.Override
buildPartial()1361     public com.google.cloud.aiplatform.v1.InputDataConfig buildPartial() {
1362       com.google.cloud.aiplatform.v1.InputDataConfig result =
1363           new com.google.cloud.aiplatform.v1.InputDataConfig(this);
1364       if (bitField0_ != 0) {
1365         buildPartial0(result);
1366       }
1367       buildPartialOneofs(result);
1368       onBuilt();
1369       return result;
1370     }
1371 
buildPartial0(com.google.cloud.aiplatform.v1.InputDataConfig result)1372     private void buildPartial0(com.google.cloud.aiplatform.v1.InputDataConfig result) {
1373       int from_bitField0_ = bitField0_;
1374       if (((from_bitField0_ & 0x00000080) != 0)) {
1375         result.datasetId_ = datasetId_;
1376       }
1377       if (((from_bitField0_ & 0x00000100) != 0)) {
1378         result.annotationsFilter_ = annotationsFilter_;
1379       }
1380       if (((from_bitField0_ & 0x00000200) != 0)) {
1381         result.annotationSchemaUri_ = annotationSchemaUri_;
1382       }
1383       if (((from_bitField0_ & 0x00000400) != 0)) {
1384         result.savedQueryId_ = savedQueryId_;
1385       }
1386       if (((from_bitField0_ & 0x00000800) != 0)) {
1387         result.persistMlUseAssignment_ = persistMlUseAssignment_;
1388       }
1389     }
1390 
buildPartialOneofs(com.google.cloud.aiplatform.v1.InputDataConfig result)1391     private void buildPartialOneofs(com.google.cloud.aiplatform.v1.InputDataConfig result) {
1392       result.splitCase_ = splitCase_;
1393       result.split_ = this.split_;
1394       if (splitCase_ == 2 && fractionSplitBuilder_ != null) {
1395         result.split_ = fractionSplitBuilder_.build();
1396       }
1397       if (splitCase_ == 3 && filterSplitBuilder_ != null) {
1398         result.split_ = filterSplitBuilder_.build();
1399       }
1400       if (splitCase_ == 4 && predefinedSplitBuilder_ != null) {
1401         result.split_ = predefinedSplitBuilder_.build();
1402       }
1403       if (splitCase_ == 5 && timestampSplitBuilder_ != null) {
1404         result.split_ = timestampSplitBuilder_.build();
1405       }
1406       if (splitCase_ == 12 && stratifiedSplitBuilder_ != null) {
1407         result.split_ = stratifiedSplitBuilder_.build();
1408       }
1409       result.destinationCase_ = destinationCase_;
1410       result.destination_ = this.destination_;
1411       if (destinationCase_ == 8 && gcsDestinationBuilder_ != null) {
1412         result.destination_ = gcsDestinationBuilder_.build();
1413       }
1414       if (destinationCase_ == 10 && bigqueryDestinationBuilder_ != null) {
1415         result.destination_ = bigqueryDestinationBuilder_.build();
1416       }
1417     }
1418 
1419     @java.lang.Override
clone()1420     public Builder clone() {
1421       return super.clone();
1422     }
1423 
1424     @java.lang.Override
setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1425     public Builder setField(
1426         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1427       return super.setField(field, value);
1428     }
1429 
1430     @java.lang.Override
clearField(com.google.protobuf.Descriptors.FieldDescriptor field)1431     public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
1432       return super.clearField(field);
1433     }
1434 
1435     @java.lang.Override
clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)1436     public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
1437       return super.clearOneof(oneof);
1438     }
1439 
1440     @java.lang.Override
setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value)1441     public Builder setRepeatedField(
1442         com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
1443       return super.setRepeatedField(field, index, value);
1444     }
1445 
1446     @java.lang.Override
addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)1447     public Builder addRepeatedField(
1448         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
1449       return super.addRepeatedField(field, value);
1450     }
1451 
1452     @java.lang.Override
mergeFrom(com.google.protobuf.Message other)1453     public Builder mergeFrom(com.google.protobuf.Message other) {
1454       if (other instanceof com.google.cloud.aiplatform.v1.InputDataConfig) {
1455         return mergeFrom((com.google.cloud.aiplatform.v1.InputDataConfig) other);
1456       } else {
1457         super.mergeFrom(other);
1458         return this;
1459       }
1460     }
1461 
mergeFrom(com.google.cloud.aiplatform.v1.InputDataConfig other)1462     public Builder mergeFrom(com.google.cloud.aiplatform.v1.InputDataConfig other) {
1463       if (other == com.google.cloud.aiplatform.v1.InputDataConfig.getDefaultInstance()) return this;
1464       if (!other.getDatasetId().isEmpty()) {
1465         datasetId_ = other.datasetId_;
1466         bitField0_ |= 0x00000080;
1467         onChanged();
1468       }
1469       if (!other.getAnnotationsFilter().isEmpty()) {
1470         annotationsFilter_ = other.annotationsFilter_;
1471         bitField0_ |= 0x00000100;
1472         onChanged();
1473       }
1474       if (!other.getAnnotationSchemaUri().isEmpty()) {
1475         annotationSchemaUri_ = other.annotationSchemaUri_;
1476         bitField0_ |= 0x00000200;
1477         onChanged();
1478       }
1479       if (!other.getSavedQueryId().isEmpty()) {
1480         savedQueryId_ = other.savedQueryId_;
1481         bitField0_ |= 0x00000400;
1482         onChanged();
1483       }
1484       if (other.getPersistMlUseAssignment() != false) {
1485         setPersistMlUseAssignment(other.getPersistMlUseAssignment());
1486       }
1487       switch (other.getSplitCase()) {
1488         case FRACTION_SPLIT:
1489           {
1490             mergeFractionSplit(other.getFractionSplit());
1491             break;
1492           }
1493         case FILTER_SPLIT:
1494           {
1495             mergeFilterSplit(other.getFilterSplit());
1496             break;
1497           }
1498         case PREDEFINED_SPLIT:
1499           {
1500             mergePredefinedSplit(other.getPredefinedSplit());
1501             break;
1502           }
1503         case TIMESTAMP_SPLIT:
1504           {
1505             mergeTimestampSplit(other.getTimestampSplit());
1506             break;
1507           }
1508         case STRATIFIED_SPLIT:
1509           {
1510             mergeStratifiedSplit(other.getStratifiedSplit());
1511             break;
1512           }
1513         case SPLIT_NOT_SET:
1514           {
1515             break;
1516           }
1517       }
1518       switch (other.getDestinationCase()) {
1519         case GCS_DESTINATION:
1520           {
1521             mergeGcsDestination(other.getGcsDestination());
1522             break;
1523           }
1524         case BIGQUERY_DESTINATION:
1525           {
1526             mergeBigqueryDestination(other.getBigqueryDestination());
1527             break;
1528           }
1529         case DESTINATION_NOT_SET:
1530           {
1531             break;
1532           }
1533       }
1534       this.mergeUnknownFields(other.getUnknownFields());
1535       onChanged();
1536       return this;
1537     }
1538 
1539     @java.lang.Override
isInitialized()1540     public final boolean isInitialized() {
1541       return true;
1542     }
1543 
1544     @java.lang.Override
mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)1545     public Builder mergeFrom(
1546         com.google.protobuf.CodedInputStream input,
1547         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1548         throws java.io.IOException {
1549       if (extensionRegistry == null) {
1550         throw new java.lang.NullPointerException();
1551       }
1552       try {
1553         boolean done = false;
1554         while (!done) {
1555           int tag = input.readTag();
1556           switch (tag) {
1557             case 0:
1558               done = true;
1559               break;
1560             case 10:
1561               {
1562                 datasetId_ = input.readStringRequireUtf8();
1563                 bitField0_ |= 0x00000080;
1564                 break;
1565               } // case 10
1566             case 18:
1567               {
1568                 input.readMessage(getFractionSplitFieldBuilder().getBuilder(), extensionRegistry);
1569                 splitCase_ = 2;
1570                 break;
1571               } // case 18
1572             case 26:
1573               {
1574                 input.readMessage(getFilterSplitFieldBuilder().getBuilder(), extensionRegistry);
1575                 splitCase_ = 3;
1576                 break;
1577               } // case 26
1578             case 34:
1579               {
1580                 input.readMessage(getPredefinedSplitFieldBuilder().getBuilder(), extensionRegistry);
1581                 splitCase_ = 4;
1582                 break;
1583               } // case 34
1584             case 42:
1585               {
1586                 input.readMessage(getTimestampSplitFieldBuilder().getBuilder(), extensionRegistry);
1587                 splitCase_ = 5;
1588                 break;
1589               } // case 42
1590             case 50:
1591               {
1592                 annotationsFilter_ = input.readStringRequireUtf8();
1593                 bitField0_ |= 0x00000100;
1594                 break;
1595               } // case 50
1596             case 58:
1597               {
1598                 savedQueryId_ = input.readStringRequireUtf8();
1599                 bitField0_ |= 0x00000400;
1600                 break;
1601               } // case 58
1602             case 66:
1603               {
1604                 input.readMessage(getGcsDestinationFieldBuilder().getBuilder(), extensionRegistry);
1605                 destinationCase_ = 8;
1606                 break;
1607               } // case 66
1608             case 74:
1609               {
1610                 annotationSchemaUri_ = input.readStringRequireUtf8();
1611                 bitField0_ |= 0x00000200;
1612                 break;
1613               } // case 74
1614             case 82:
1615               {
1616                 input.readMessage(
1617                     getBigqueryDestinationFieldBuilder().getBuilder(), extensionRegistry);
1618                 destinationCase_ = 10;
1619                 break;
1620               } // case 82
1621             case 88:
1622               {
1623                 persistMlUseAssignment_ = input.readBool();
1624                 bitField0_ |= 0x00000800;
1625                 break;
1626               } // case 88
1627             case 98:
1628               {
1629                 input.readMessage(getStratifiedSplitFieldBuilder().getBuilder(), extensionRegistry);
1630                 splitCase_ = 12;
1631                 break;
1632               } // case 98
1633             default:
1634               {
1635                 if (!super.parseUnknownField(input, extensionRegistry, tag)) {
1636                   done = true; // was an endgroup tag
1637                 }
1638                 break;
1639               } // default:
1640           } // switch (tag)
1641         } // while (!done)
1642       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1643         throw e.unwrapIOException();
1644       } finally {
1645         onChanged();
1646       } // finally
1647       return this;
1648     }
1649 
1650     private int splitCase_ = 0;
1651     private java.lang.Object split_;
1652 
getSplitCase()1653     public SplitCase getSplitCase() {
1654       return SplitCase.forNumber(splitCase_);
1655     }
1656 
clearSplit()1657     public Builder clearSplit() {
1658       splitCase_ = 0;
1659       split_ = null;
1660       onChanged();
1661       return this;
1662     }
1663 
1664     private int destinationCase_ = 0;
1665     private java.lang.Object destination_;
1666 
getDestinationCase()1667     public DestinationCase getDestinationCase() {
1668       return DestinationCase.forNumber(destinationCase_);
1669     }
1670 
clearDestination()1671     public Builder clearDestination() {
1672       destinationCase_ = 0;
1673       destination_ = null;
1674       onChanged();
1675       return this;
1676     }
1677 
1678     private int bitField0_;
1679 
1680     private com.google.protobuf.SingleFieldBuilderV3<
1681             com.google.cloud.aiplatform.v1.FractionSplit,
1682             com.google.cloud.aiplatform.v1.FractionSplit.Builder,
1683             com.google.cloud.aiplatform.v1.FractionSplitOrBuilder>
1684         fractionSplitBuilder_;
1685     /**
1686      *
1687      *
1688      * <pre>
1689      * Split based on fractions defining the size of each set.
1690      * </pre>
1691      *
1692      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1693      *
1694      * @return Whether the fractionSplit field is set.
1695      */
1696     @java.lang.Override
hasFractionSplit()1697     public boolean hasFractionSplit() {
1698       return splitCase_ == 2;
1699     }
1700     /**
1701      *
1702      *
1703      * <pre>
1704      * Split based on fractions defining the size of each set.
1705      * </pre>
1706      *
1707      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1708      *
1709      * @return The fractionSplit.
1710      */
1711     @java.lang.Override
getFractionSplit()1712     public com.google.cloud.aiplatform.v1.FractionSplit getFractionSplit() {
1713       if (fractionSplitBuilder_ == null) {
1714         if (splitCase_ == 2) {
1715           return (com.google.cloud.aiplatform.v1.FractionSplit) split_;
1716         }
1717         return com.google.cloud.aiplatform.v1.FractionSplit.getDefaultInstance();
1718       } else {
1719         if (splitCase_ == 2) {
1720           return fractionSplitBuilder_.getMessage();
1721         }
1722         return com.google.cloud.aiplatform.v1.FractionSplit.getDefaultInstance();
1723       }
1724     }
1725     /**
1726      *
1727      *
1728      * <pre>
1729      * Split based on fractions defining the size of each set.
1730      * </pre>
1731      *
1732      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1733      */
setFractionSplit(com.google.cloud.aiplatform.v1.FractionSplit value)1734     public Builder setFractionSplit(com.google.cloud.aiplatform.v1.FractionSplit value) {
1735       if (fractionSplitBuilder_ == null) {
1736         if (value == null) {
1737           throw new NullPointerException();
1738         }
1739         split_ = value;
1740         onChanged();
1741       } else {
1742         fractionSplitBuilder_.setMessage(value);
1743       }
1744       splitCase_ = 2;
1745       return this;
1746     }
1747     /**
1748      *
1749      *
1750      * <pre>
1751      * Split based on fractions defining the size of each set.
1752      * </pre>
1753      *
1754      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1755      */
setFractionSplit( com.google.cloud.aiplatform.v1.FractionSplit.Builder builderForValue)1756     public Builder setFractionSplit(
1757         com.google.cloud.aiplatform.v1.FractionSplit.Builder builderForValue) {
1758       if (fractionSplitBuilder_ == null) {
1759         split_ = builderForValue.build();
1760         onChanged();
1761       } else {
1762         fractionSplitBuilder_.setMessage(builderForValue.build());
1763       }
1764       splitCase_ = 2;
1765       return this;
1766     }
1767     /**
1768      *
1769      *
1770      * <pre>
1771      * Split based on fractions defining the size of each set.
1772      * </pre>
1773      *
1774      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1775      */
mergeFractionSplit(com.google.cloud.aiplatform.v1.FractionSplit value)1776     public Builder mergeFractionSplit(com.google.cloud.aiplatform.v1.FractionSplit value) {
1777       if (fractionSplitBuilder_ == null) {
1778         if (splitCase_ == 2
1779             && split_ != com.google.cloud.aiplatform.v1.FractionSplit.getDefaultInstance()) {
1780           split_ =
1781               com.google.cloud.aiplatform.v1.FractionSplit.newBuilder(
1782                       (com.google.cloud.aiplatform.v1.FractionSplit) split_)
1783                   .mergeFrom(value)
1784                   .buildPartial();
1785         } else {
1786           split_ = value;
1787         }
1788         onChanged();
1789       } else {
1790         if (splitCase_ == 2) {
1791           fractionSplitBuilder_.mergeFrom(value);
1792         } else {
1793           fractionSplitBuilder_.setMessage(value);
1794         }
1795       }
1796       splitCase_ = 2;
1797       return this;
1798     }
1799     /**
1800      *
1801      *
1802      * <pre>
1803      * Split based on fractions defining the size of each set.
1804      * </pre>
1805      *
1806      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1807      */
clearFractionSplit()1808     public Builder clearFractionSplit() {
1809       if (fractionSplitBuilder_ == null) {
1810         if (splitCase_ == 2) {
1811           splitCase_ = 0;
1812           split_ = null;
1813           onChanged();
1814         }
1815       } else {
1816         if (splitCase_ == 2) {
1817           splitCase_ = 0;
1818           split_ = null;
1819         }
1820         fractionSplitBuilder_.clear();
1821       }
1822       return this;
1823     }
1824     /**
1825      *
1826      *
1827      * <pre>
1828      * Split based on fractions defining the size of each set.
1829      * </pre>
1830      *
1831      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1832      */
getFractionSplitBuilder()1833     public com.google.cloud.aiplatform.v1.FractionSplit.Builder getFractionSplitBuilder() {
1834       return getFractionSplitFieldBuilder().getBuilder();
1835     }
1836     /**
1837      *
1838      *
1839      * <pre>
1840      * Split based on fractions defining the size of each set.
1841      * </pre>
1842      *
1843      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1844      */
1845     @java.lang.Override
getFractionSplitOrBuilder()1846     public com.google.cloud.aiplatform.v1.FractionSplitOrBuilder getFractionSplitOrBuilder() {
1847       if ((splitCase_ == 2) && (fractionSplitBuilder_ != null)) {
1848         return fractionSplitBuilder_.getMessageOrBuilder();
1849       } else {
1850         if (splitCase_ == 2) {
1851           return (com.google.cloud.aiplatform.v1.FractionSplit) split_;
1852         }
1853         return com.google.cloud.aiplatform.v1.FractionSplit.getDefaultInstance();
1854       }
1855     }
1856     /**
1857      *
1858      *
1859      * <pre>
1860      * Split based on fractions defining the size of each set.
1861      * </pre>
1862      *
1863      * <code>.google.cloud.aiplatform.v1.FractionSplit fraction_split = 2;</code>
1864      */
1865     private com.google.protobuf.SingleFieldBuilderV3<
1866             com.google.cloud.aiplatform.v1.FractionSplit,
1867             com.google.cloud.aiplatform.v1.FractionSplit.Builder,
1868             com.google.cloud.aiplatform.v1.FractionSplitOrBuilder>
getFractionSplitFieldBuilder()1869         getFractionSplitFieldBuilder() {
1870       if (fractionSplitBuilder_ == null) {
1871         if (!(splitCase_ == 2)) {
1872           split_ = com.google.cloud.aiplatform.v1.FractionSplit.getDefaultInstance();
1873         }
1874         fractionSplitBuilder_ =
1875             new com.google.protobuf.SingleFieldBuilderV3<
1876                 com.google.cloud.aiplatform.v1.FractionSplit,
1877                 com.google.cloud.aiplatform.v1.FractionSplit.Builder,
1878                 com.google.cloud.aiplatform.v1.FractionSplitOrBuilder>(
1879                 (com.google.cloud.aiplatform.v1.FractionSplit) split_,
1880                 getParentForChildren(),
1881                 isClean());
1882         split_ = null;
1883       }
1884       splitCase_ = 2;
1885       onChanged();
1886       return fractionSplitBuilder_;
1887     }
1888 
1889     private com.google.protobuf.SingleFieldBuilderV3<
1890             com.google.cloud.aiplatform.v1.FilterSplit,
1891             com.google.cloud.aiplatform.v1.FilterSplit.Builder,
1892             com.google.cloud.aiplatform.v1.FilterSplitOrBuilder>
1893         filterSplitBuilder_;
1894     /**
1895      *
1896      *
1897      * <pre>
1898      * Split based on the provided filters for each set.
1899      * </pre>
1900      *
1901      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
1902      *
1903      * @return Whether the filterSplit field is set.
1904      */
1905     @java.lang.Override
hasFilterSplit()1906     public boolean hasFilterSplit() {
1907       return splitCase_ == 3;
1908     }
1909     /**
1910      *
1911      *
1912      * <pre>
1913      * Split based on the provided filters for each set.
1914      * </pre>
1915      *
1916      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
1917      *
1918      * @return The filterSplit.
1919      */
1920     @java.lang.Override
getFilterSplit()1921     public com.google.cloud.aiplatform.v1.FilterSplit getFilterSplit() {
1922       if (filterSplitBuilder_ == null) {
1923         if (splitCase_ == 3) {
1924           return (com.google.cloud.aiplatform.v1.FilterSplit) split_;
1925         }
1926         return com.google.cloud.aiplatform.v1.FilterSplit.getDefaultInstance();
1927       } else {
1928         if (splitCase_ == 3) {
1929           return filterSplitBuilder_.getMessage();
1930         }
1931         return com.google.cloud.aiplatform.v1.FilterSplit.getDefaultInstance();
1932       }
1933     }
1934     /**
1935      *
1936      *
1937      * <pre>
1938      * Split based on the provided filters for each set.
1939      * </pre>
1940      *
1941      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
1942      */
setFilterSplit(com.google.cloud.aiplatform.v1.FilterSplit value)1943     public Builder setFilterSplit(com.google.cloud.aiplatform.v1.FilterSplit value) {
1944       if (filterSplitBuilder_ == null) {
1945         if (value == null) {
1946           throw new NullPointerException();
1947         }
1948         split_ = value;
1949         onChanged();
1950       } else {
1951         filterSplitBuilder_.setMessage(value);
1952       }
1953       splitCase_ = 3;
1954       return this;
1955     }
1956     /**
1957      *
1958      *
1959      * <pre>
1960      * Split based on the provided filters for each set.
1961      * </pre>
1962      *
1963      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
1964      */
setFilterSplit( com.google.cloud.aiplatform.v1.FilterSplit.Builder builderForValue)1965     public Builder setFilterSplit(
1966         com.google.cloud.aiplatform.v1.FilterSplit.Builder builderForValue) {
1967       if (filterSplitBuilder_ == null) {
1968         split_ = builderForValue.build();
1969         onChanged();
1970       } else {
1971         filterSplitBuilder_.setMessage(builderForValue.build());
1972       }
1973       splitCase_ = 3;
1974       return this;
1975     }
1976     /**
1977      *
1978      *
1979      * <pre>
1980      * Split based on the provided filters for each set.
1981      * </pre>
1982      *
1983      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
1984      */
mergeFilterSplit(com.google.cloud.aiplatform.v1.FilterSplit value)1985     public Builder mergeFilterSplit(com.google.cloud.aiplatform.v1.FilterSplit value) {
1986       if (filterSplitBuilder_ == null) {
1987         if (splitCase_ == 3
1988             && split_ != com.google.cloud.aiplatform.v1.FilterSplit.getDefaultInstance()) {
1989           split_ =
1990               com.google.cloud.aiplatform.v1.FilterSplit.newBuilder(
1991                       (com.google.cloud.aiplatform.v1.FilterSplit) split_)
1992                   .mergeFrom(value)
1993                   .buildPartial();
1994         } else {
1995           split_ = value;
1996         }
1997         onChanged();
1998       } else {
1999         if (splitCase_ == 3) {
2000           filterSplitBuilder_.mergeFrom(value);
2001         } else {
2002           filterSplitBuilder_.setMessage(value);
2003         }
2004       }
2005       splitCase_ = 3;
2006       return this;
2007     }
2008     /**
2009      *
2010      *
2011      * <pre>
2012      * Split based on the provided filters for each set.
2013      * </pre>
2014      *
2015      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
2016      */
clearFilterSplit()2017     public Builder clearFilterSplit() {
2018       if (filterSplitBuilder_ == null) {
2019         if (splitCase_ == 3) {
2020           splitCase_ = 0;
2021           split_ = null;
2022           onChanged();
2023         }
2024       } else {
2025         if (splitCase_ == 3) {
2026           splitCase_ = 0;
2027           split_ = null;
2028         }
2029         filterSplitBuilder_.clear();
2030       }
2031       return this;
2032     }
2033     /**
2034      *
2035      *
2036      * <pre>
2037      * Split based on the provided filters for each set.
2038      * </pre>
2039      *
2040      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
2041      */
getFilterSplitBuilder()2042     public com.google.cloud.aiplatform.v1.FilterSplit.Builder getFilterSplitBuilder() {
2043       return getFilterSplitFieldBuilder().getBuilder();
2044     }
2045     /**
2046      *
2047      *
2048      * <pre>
2049      * Split based on the provided filters for each set.
2050      * </pre>
2051      *
2052      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
2053      */
2054     @java.lang.Override
getFilterSplitOrBuilder()2055     public com.google.cloud.aiplatform.v1.FilterSplitOrBuilder getFilterSplitOrBuilder() {
2056       if ((splitCase_ == 3) && (filterSplitBuilder_ != null)) {
2057         return filterSplitBuilder_.getMessageOrBuilder();
2058       } else {
2059         if (splitCase_ == 3) {
2060           return (com.google.cloud.aiplatform.v1.FilterSplit) split_;
2061         }
2062         return com.google.cloud.aiplatform.v1.FilterSplit.getDefaultInstance();
2063       }
2064     }
2065     /**
2066      *
2067      *
2068      * <pre>
2069      * Split based on the provided filters for each set.
2070      * </pre>
2071      *
2072      * <code>.google.cloud.aiplatform.v1.FilterSplit filter_split = 3;</code>
2073      */
2074     private com.google.protobuf.SingleFieldBuilderV3<
2075             com.google.cloud.aiplatform.v1.FilterSplit,
2076             com.google.cloud.aiplatform.v1.FilterSplit.Builder,
2077             com.google.cloud.aiplatform.v1.FilterSplitOrBuilder>
getFilterSplitFieldBuilder()2078         getFilterSplitFieldBuilder() {
2079       if (filterSplitBuilder_ == null) {
2080         if (!(splitCase_ == 3)) {
2081           split_ = com.google.cloud.aiplatform.v1.FilterSplit.getDefaultInstance();
2082         }
2083         filterSplitBuilder_ =
2084             new com.google.protobuf.SingleFieldBuilderV3<
2085                 com.google.cloud.aiplatform.v1.FilterSplit,
2086                 com.google.cloud.aiplatform.v1.FilterSplit.Builder,
2087                 com.google.cloud.aiplatform.v1.FilterSplitOrBuilder>(
2088                 (com.google.cloud.aiplatform.v1.FilterSplit) split_,
2089                 getParentForChildren(),
2090                 isClean());
2091         split_ = null;
2092       }
2093       splitCase_ = 3;
2094       onChanged();
2095       return filterSplitBuilder_;
2096     }
2097 
2098     private com.google.protobuf.SingleFieldBuilderV3<
2099             com.google.cloud.aiplatform.v1.PredefinedSplit,
2100             com.google.cloud.aiplatform.v1.PredefinedSplit.Builder,
2101             com.google.cloud.aiplatform.v1.PredefinedSplitOrBuilder>
2102         predefinedSplitBuilder_;
2103     /**
2104      *
2105      *
2106      * <pre>
2107      * Supported only for tabular Datasets.
2108      * Split based on a predefined key.
2109      * </pre>
2110      *
2111      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2112      *
2113      * @return Whether the predefinedSplit field is set.
2114      */
2115     @java.lang.Override
hasPredefinedSplit()2116     public boolean hasPredefinedSplit() {
2117       return splitCase_ == 4;
2118     }
2119     /**
2120      *
2121      *
2122      * <pre>
2123      * Supported only for tabular Datasets.
2124      * Split based on a predefined key.
2125      * </pre>
2126      *
2127      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2128      *
2129      * @return The predefinedSplit.
2130      */
2131     @java.lang.Override
getPredefinedSplit()2132     public com.google.cloud.aiplatform.v1.PredefinedSplit getPredefinedSplit() {
2133       if (predefinedSplitBuilder_ == null) {
2134         if (splitCase_ == 4) {
2135           return (com.google.cloud.aiplatform.v1.PredefinedSplit) split_;
2136         }
2137         return com.google.cloud.aiplatform.v1.PredefinedSplit.getDefaultInstance();
2138       } else {
2139         if (splitCase_ == 4) {
2140           return predefinedSplitBuilder_.getMessage();
2141         }
2142         return com.google.cloud.aiplatform.v1.PredefinedSplit.getDefaultInstance();
2143       }
2144     }
2145     /**
2146      *
2147      *
2148      * <pre>
2149      * Supported only for tabular Datasets.
2150      * Split based on a predefined key.
2151      * </pre>
2152      *
2153      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2154      */
setPredefinedSplit(com.google.cloud.aiplatform.v1.PredefinedSplit value)2155     public Builder setPredefinedSplit(com.google.cloud.aiplatform.v1.PredefinedSplit value) {
2156       if (predefinedSplitBuilder_ == null) {
2157         if (value == null) {
2158           throw new NullPointerException();
2159         }
2160         split_ = value;
2161         onChanged();
2162       } else {
2163         predefinedSplitBuilder_.setMessage(value);
2164       }
2165       splitCase_ = 4;
2166       return this;
2167     }
2168     /**
2169      *
2170      *
2171      * <pre>
2172      * Supported only for tabular Datasets.
2173      * Split based on a predefined key.
2174      * </pre>
2175      *
2176      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2177      */
setPredefinedSplit( com.google.cloud.aiplatform.v1.PredefinedSplit.Builder builderForValue)2178     public Builder setPredefinedSplit(
2179         com.google.cloud.aiplatform.v1.PredefinedSplit.Builder builderForValue) {
2180       if (predefinedSplitBuilder_ == null) {
2181         split_ = builderForValue.build();
2182         onChanged();
2183       } else {
2184         predefinedSplitBuilder_.setMessage(builderForValue.build());
2185       }
2186       splitCase_ = 4;
2187       return this;
2188     }
2189     /**
2190      *
2191      *
2192      * <pre>
2193      * Supported only for tabular Datasets.
2194      * Split based on a predefined key.
2195      * </pre>
2196      *
2197      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2198      */
mergePredefinedSplit(com.google.cloud.aiplatform.v1.PredefinedSplit value)2199     public Builder mergePredefinedSplit(com.google.cloud.aiplatform.v1.PredefinedSplit value) {
2200       if (predefinedSplitBuilder_ == null) {
2201         if (splitCase_ == 4
2202             && split_ != com.google.cloud.aiplatform.v1.PredefinedSplit.getDefaultInstance()) {
2203           split_ =
2204               com.google.cloud.aiplatform.v1.PredefinedSplit.newBuilder(
2205                       (com.google.cloud.aiplatform.v1.PredefinedSplit) split_)
2206                   .mergeFrom(value)
2207                   .buildPartial();
2208         } else {
2209           split_ = value;
2210         }
2211         onChanged();
2212       } else {
2213         if (splitCase_ == 4) {
2214           predefinedSplitBuilder_.mergeFrom(value);
2215         } else {
2216           predefinedSplitBuilder_.setMessage(value);
2217         }
2218       }
2219       splitCase_ = 4;
2220       return this;
2221     }
2222     /**
2223      *
2224      *
2225      * <pre>
2226      * Supported only for tabular Datasets.
2227      * Split based on a predefined key.
2228      * </pre>
2229      *
2230      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2231      */
clearPredefinedSplit()2232     public Builder clearPredefinedSplit() {
2233       if (predefinedSplitBuilder_ == null) {
2234         if (splitCase_ == 4) {
2235           splitCase_ = 0;
2236           split_ = null;
2237           onChanged();
2238         }
2239       } else {
2240         if (splitCase_ == 4) {
2241           splitCase_ = 0;
2242           split_ = null;
2243         }
2244         predefinedSplitBuilder_.clear();
2245       }
2246       return this;
2247     }
2248     /**
2249      *
2250      *
2251      * <pre>
2252      * Supported only for tabular Datasets.
2253      * Split based on a predefined key.
2254      * </pre>
2255      *
2256      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2257      */
getPredefinedSplitBuilder()2258     public com.google.cloud.aiplatform.v1.PredefinedSplit.Builder getPredefinedSplitBuilder() {
2259       return getPredefinedSplitFieldBuilder().getBuilder();
2260     }
2261     /**
2262      *
2263      *
2264      * <pre>
2265      * Supported only for tabular Datasets.
2266      * Split based on a predefined key.
2267      * </pre>
2268      *
2269      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2270      */
2271     @java.lang.Override
getPredefinedSplitOrBuilder()2272     public com.google.cloud.aiplatform.v1.PredefinedSplitOrBuilder getPredefinedSplitOrBuilder() {
2273       if ((splitCase_ == 4) && (predefinedSplitBuilder_ != null)) {
2274         return predefinedSplitBuilder_.getMessageOrBuilder();
2275       } else {
2276         if (splitCase_ == 4) {
2277           return (com.google.cloud.aiplatform.v1.PredefinedSplit) split_;
2278         }
2279         return com.google.cloud.aiplatform.v1.PredefinedSplit.getDefaultInstance();
2280       }
2281     }
2282     /**
2283      *
2284      *
2285      * <pre>
2286      * Supported only for tabular Datasets.
2287      * Split based on a predefined key.
2288      * </pre>
2289      *
2290      * <code>.google.cloud.aiplatform.v1.PredefinedSplit predefined_split = 4;</code>
2291      */
2292     private com.google.protobuf.SingleFieldBuilderV3<
2293             com.google.cloud.aiplatform.v1.PredefinedSplit,
2294             com.google.cloud.aiplatform.v1.PredefinedSplit.Builder,
2295             com.google.cloud.aiplatform.v1.PredefinedSplitOrBuilder>
getPredefinedSplitFieldBuilder()2296         getPredefinedSplitFieldBuilder() {
2297       if (predefinedSplitBuilder_ == null) {
2298         if (!(splitCase_ == 4)) {
2299           split_ = com.google.cloud.aiplatform.v1.PredefinedSplit.getDefaultInstance();
2300         }
2301         predefinedSplitBuilder_ =
2302             new com.google.protobuf.SingleFieldBuilderV3<
2303                 com.google.cloud.aiplatform.v1.PredefinedSplit,
2304                 com.google.cloud.aiplatform.v1.PredefinedSplit.Builder,
2305                 com.google.cloud.aiplatform.v1.PredefinedSplitOrBuilder>(
2306                 (com.google.cloud.aiplatform.v1.PredefinedSplit) split_,
2307                 getParentForChildren(),
2308                 isClean());
2309         split_ = null;
2310       }
2311       splitCase_ = 4;
2312       onChanged();
2313       return predefinedSplitBuilder_;
2314     }
2315 
2316     private com.google.protobuf.SingleFieldBuilderV3<
2317             com.google.cloud.aiplatform.v1.TimestampSplit,
2318             com.google.cloud.aiplatform.v1.TimestampSplit.Builder,
2319             com.google.cloud.aiplatform.v1.TimestampSplitOrBuilder>
2320         timestampSplitBuilder_;
2321     /**
2322      *
2323      *
2324      * <pre>
2325      * Supported only for tabular Datasets.
2326      * Split based on the timestamp of the input data pieces.
2327      * </pre>
2328      *
2329      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2330      *
2331      * @return Whether the timestampSplit field is set.
2332      */
2333     @java.lang.Override
hasTimestampSplit()2334     public boolean hasTimestampSplit() {
2335       return splitCase_ == 5;
2336     }
2337     /**
2338      *
2339      *
2340      * <pre>
2341      * Supported only for tabular Datasets.
2342      * Split based on the timestamp of the input data pieces.
2343      * </pre>
2344      *
2345      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2346      *
2347      * @return The timestampSplit.
2348      */
2349     @java.lang.Override
getTimestampSplit()2350     public com.google.cloud.aiplatform.v1.TimestampSplit getTimestampSplit() {
2351       if (timestampSplitBuilder_ == null) {
2352         if (splitCase_ == 5) {
2353           return (com.google.cloud.aiplatform.v1.TimestampSplit) split_;
2354         }
2355         return com.google.cloud.aiplatform.v1.TimestampSplit.getDefaultInstance();
2356       } else {
2357         if (splitCase_ == 5) {
2358           return timestampSplitBuilder_.getMessage();
2359         }
2360         return com.google.cloud.aiplatform.v1.TimestampSplit.getDefaultInstance();
2361       }
2362     }
2363     /**
2364      *
2365      *
2366      * <pre>
2367      * Supported only for tabular Datasets.
2368      * Split based on the timestamp of the input data pieces.
2369      * </pre>
2370      *
2371      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2372      */
setTimestampSplit(com.google.cloud.aiplatform.v1.TimestampSplit value)2373     public Builder setTimestampSplit(com.google.cloud.aiplatform.v1.TimestampSplit value) {
2374       if (timestampSplitBuilder_ == null) {
2375         if (value == null) {
2376           throw new NullPointerException();
2377         }
2378         split_ = value;
2379         onChanged();
2380       } else {
2381         timestampSplitBuilder_.setMessage(value);
2382       }
2383       splitCase_ = 5;
2384       return this;
2385     }
2386     /**
2387      *
2388      *
2389      * <pre>
2390      * Supported only for tabular Datasets.
2391      * Split based on the timestamp of the input data pieces.
2392      * </pre>
2393      *
2394      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2395      */
setTimestampSplit( com.google.cloud.aiplatform.v1.TimestampSplit.Builder builderForValue)2396     public Builder setTimestampSplit(
2397         com.google.cloud.aiplatform.v1.TimestampSplit.Builder builderForValue) {
2398       if (timestampSplitBuilder_ == null) {
2399         split_ = builderForValue.build();
2400         onChanged();
2401       } else {
2402         timestampSplitBuilder_.setMessage(builderForValue.build());
2403       }
2404       splitCase_ = 5;
2405       return this;
2406     }
2407     /**
2408      *
2409      *
2410      * <pre>
2411      * Supported only for tabular Datasets.
2412      * Split based on the timestamp of the input data pieces.
2413      * </pre>
2414      *
2415      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2416      */
mergeTimestampSplit(com.google.cloud.aiplatform.v1.TimestampSplit value)2417     public Builder mergeTimestampSplit(com.google.cloud.aiplatform.v1.TimestampSplit value) {
2418       if (timestampSplitBuilder_ == null) {
2419         if (splitCase_ == 5
2420             && split_ != com.google.cloud.aiplatform.v1.TimestampSplit.getDefaultInstance()) {
2421           split_ =
2422               com.google.cloud.aiplatform.v1.TimestampSplit.newBuilder(
2423                       (com.google.cloud.aiplatform.v1.TimestampSplit) split_)
2424                   .mergeFrom(value)
2425                   .buildPartial();
2426         } else {
2427           split_ = value;
2428         }
2429         onChanged();
2430       } else {
2431         if (splitCase_ == 5) {
2432           timestampSplitBuilder_.mergeFrom(value);
2433         } else {
2434           timestampSplitBuilder_.setMessage(value);
2435         }
2436       }
2437       splitCase_ = 5;
2438       return this;
2439     }
2440     /**
2441      *
2442      *
2443      * <pre>
2444      * Supported only for tabular Datasets.
2445      * Split based on the timestamp of the input data pieces.
2446      * </pre>
2447      *
2448      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2449      */
clearTimestampSplit()2450     public Builder clearTimestampSplit() {
2451       if (timestampSplitBuilder_ == null) {
2452         if (splitCase_ == 5) {
2453           splitCase_ = 0;
2454           split_ = null;
2455           onChanged();
2456         }
2457       } else {
2458         if (splitCase_ == 5) {
2459           splitCase_ = 0;
2460           split_ = null;
2461         }
2462         timestampSplitBuilder_.clear();
2463       }
2464       return this;
2465     }
2466     /**
2467      *
2468      *
2469      * <pre>
2470      * Supported only for tabular Datasets.
2471      * Split based on the timestamp of the input data pieces.
2472      * </pre>
2473      *
2474      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2475      */
getTimestampSplitBuilder()2476     public com.google.cloud.aiplatform.v1.TimestampSplit.Builder getTimestampSplitBuilder() {
2477       return getTimestampSplitFieldBuilder().getBuilder();
2478     }
2479     /**
2480      *
2481      *
2482      * <pre>
2483      * Supported only for tabular Datasets.
2484      * Split based on the timestamp of the input data pieces.
2485      * </pre>
2486      *
2487      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2488      */
2489     @java.lang.Override
getTimestampSplitOrBuilder()2490     public com.google.cloud.aiplatform.v1.TimestampSplitOrBuilder getTimestampSplitOrBuilder() {
2491       if ((splitCase_ == 5) && (timestampSplitBuilder_ != null)) {
2492         return timestampSplitBuilder_.getMessageOrBuilder();
2493       } else {
2494         if (splitCase_ == 5) {
2495           return (com.google.cloud.aiplatform.v1.TimestampSplit) split_;
2496         }
2497         return com.google.cloud.aiplatform.v1.TimestampSplit.getDefaultInstance();
2498       }
2499     }
2500     /**
2501      *
2502      *
2503      * <pre>
2504      * Supported only for tabular Datasets.
2505      * Split based on the timestamp of the input data pieces.
2506      * </pre>
2507      *
2508      * <code>.google.cloud.aiplatform.v1.TimestampSplit timestamp_split = 5;</code>
2509      */
2510     private com.google.protobuf.SingleFieldBuilderV3<
2511             com.google.cloud.aiplatform.v1.TimestampSplit,
2512             com.google.cloud.aiplatform.v1.TimestampSplit.Builder,
2513             com.google.cloud.aiplatform.v1.TimestampSplitOrBuilder>
getTimestampSplitFieldBuilder()2514         getTimestampSplitFieldBuilder() {
2515       if (timestampSplitBuilder_ == null) {
2516         if (!(splitCase_ == 5)) {
2517           split_ = com.google.cloud.aiplatform.v1.TimestampSplit.getDefaultInstance();
2518         }
2519         timestampSplitBuilder_ =
2520             new com.google.protobuf.SingleFieldBuilderV3<
2521                 com.google.cloud.aiplatform.v1.TimestampSplit,
2522                 com.google.cloud.aiplatform.v1.TimestampSplit.Builder,
2523                 com.google.cloud.aiplatform.v1.TimestampSplitOrBuilder>(
2524                 (com.google.cloud.aiplatform.v1.TimestampSplit) split_,
2525                 getParentForChildren(),
2526                 isClean());
2527         split_ = null;
2528       }
2529       splitCase_ = 5;
2530       onChanged();
2531       return timestampSplitBuilder_;
2532     }
2533 
2534     private com.google.protobuf.SingleFieldBuilderV3<
2535             com.google.cloud.aiplatform.v1.StratifiedSplit,
2536             com.google.cloud.aiplatform.v1.StratifiedSplit.Builder,
2537             com.google.cloud.aiplatform.v1.StratifiedSplitOrBuilder>
2538         stratifiedSplitBuilder_;
2539     /**
2540      *
2541      *
2542      * <pre>
2543      * Supported only for tabular Datasets.
2544      * Split based on the distribution of the specified column.
2545      * </pre>
2546      *
2547      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2548      *
2549      * @return Whether the stratifiedSplit field is set.
2550      */
2551     @java.lang.Override
hasStratifiedSplit()2552     public boolean hasStratifiedSplit() {
2553       return splitCase_ == 12;
2554     }
2555     /**
2556      *
2557      *
2558      * <pre>
2559      * Supported only for tabular Datasets.
2560      * Split based on the distribution of the specified column.
2561      * </pre>
2562      *
2563      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2564      *
2565      * @return The stratifiedSplit.
2566      */
2567     @java.lang.Override
getStratifiedSplit()2568     public com.google.cloud.aiplatform.v1.StratifiedSplit getStratifiedSplit() {
2569       if (stratifiedSplitBuilder_ == null) {
2570         if (splitCase_ == 12) {
2571           return (com.google.cloud.aiplatform.v1.StratifiedSplit) split_;
2572         }
2573         return com.google.cloud.aiplatform.v1.StratifiedSplit.getDefaultInstance();
2574       } else {
2575         if (splitCase_ == 12) {
2576           return stratifiedSplitBuilder_.getMessage();
2577         }
2578         return com.google.cloud.aiplatform.v1.StratifiedSplit.getDefaultInstance();
2579       }
2580     }
2581     /**
2582      *
2583      *
2584      * <pre>
2585      * Supported only for tabular Datasets.
2586      * Split based on the distribution of the specified column.
2587      * </pre>
2588      *
2589      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2590      */
setStratifiedSplit(com.google.cloud.aiplatform.v1.StratifiedSplit value)2591     public Builder setStratifiedSplit(com.google.cloud.aiplatform.v1.StratifiedSplit value) {
2592       if (stratifiedSplitBuilder_ == null) {
2593         if (value == null) {
2594           throw new NullPointerException();
2595         }
2596         split_ = value;
2597         onChanged();
2598       } else {
2599         stratifiedSplitBuilder_.setMessage(value);
2600       }
2601       splitCase_ = 12;
2602       return this;
2603     }
2604     /**
2605      *
2606      *
2607      * <pre>
2608      * Supported only for tabular Datasets.
2609      * Split based on the distribution of the specified column.
2610      * </pre>
2611      *
2612      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2613      */
setStratifiedSplit( com.google.cloud.aiplatform.v1.StratifiedSplit.Builder builderForValue)2614     public Builder setStratifiedSplit(
2615         com.google.cloud.aiplatform.v1.StratifiedSplit.Builder builderForValue) {
2616       if (stratifiedSplitBuilder_ == null) {
2617         split_ = builderForValue.build();
2618         onChanged();
2619       } else {
2620         stratifiedSplitBuilder_.setMessage(builderForValue.build());
2621       }
2622       splitCase_ = 12;
2623       return this;
2624     }
2625     /**
2626      *
2627      *
2628      * <pre>
2629      * Supported only for tabular Datasets.
2630      * Split based on the distribution of the specified column.
2631      * </pre>
2632      *
2633      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2634      */
mergeStratifiedSplit(com.google.cloud.aiplatform.v1.StratifiedSplit value)2635     public Builder mergeStratifiedSplit(com.google.cloud.aiplatform.v1.StratifiedSplit value) {
2636       if (stratifiedSplitBuilder_ == null) {
2637         if (splitCase_ == 12
2638             && split_ != com.google.cloud.aiplatform.v1.StratifiedSplit.getDefaultInstance()) {
2639           split_ =
2640               com.google.cloud.aiplatform.v1.StratifiedSplit.newBuilder(
2641                       (com.google.cloud.aiplatform.v1.StratifiedSplit) split_)
2642                   .mergeFrom(value)
2643                   .buildPartial();
2644         } else {
2645           split_ = value;
2646         }
2647         onChanged();
2648       } else {
2649         if (splitCase_ == 12) {
2650           stratifiedSplitBuilder_.mergeFrom(value);
2651         } else {
2652           stratifiedSplitBuilder_.setMessage(value);
2653         }
2654       }
2655       splitCase_ = 12;
2656       return this;
2657     }
2658     /**
2659      *
2660      *
2661      * <pre>
2662      * Supported only for tabular Datasets.
2663      * Split based on the distribution of the specified column.
2664      * </pre>
2665      *
2666      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2667      */
clearStratifiedSplit()2668     public Builder clearStratifiedSplit() {
2669       if (stratifiedSplitBuilder_ == null) {
2670         if (splitCase_ == 12) {
2671           splitCase_ = 0;
2672           split_ = null;
2673           onChanged();
2674         }
2675       } else {
2676         if (splitCase_ == 12) {
2677           splitCase_ = 0;
2678           split_ = null;
2679         }
2680         stratifiedSplitBuilder_.clear();
2681       }
2682       return this;
2683     }
2684     /**
2685      *
2686      *
2687      * <pre>
2688      * Supported only for tabular Datasets.
2689      * Split based on the distribution of the specified column.
2690      * </pre>
2691      *
2692      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2693      */
getStratifiedSplitBuilder()2694     public com.google.cloud.aiplatform.v1.StratifiedSplit.Builder getStratifiedSplitBuilder() {
2695       return getStratifiedSplitFieldBuilder().getBuilder();
2696     }
2697     /**
2698      *
2699      *
2700      * <pre>
2701      * Supported only for tabular Datasets.
2702      * Split based on the distribution of the specified column.
2703      * </pre>
2704      *
2705      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2706      */
2707     @java.lang.Override
getStratifiedSplitOrBuilder()2708     public com.google.cloud.aiplatform.v1.StratifiedSplitOrBuilder getStratifiedSplitOrBuilder() {
2709       if ((splitCase_ == 12) && (stratifiedSplitBuilder_ != null)) {
2710         return stratifiedSplitBuilder_.getMessageOrBuilder();
2711       } else {
2712         if (splitCase_ == 12) {
2713           return (com.google.cloud.aiplatform.v1.StratifiedSplit) split_;
2714         }
2715         return com.google.cloud.aiplatform.v1.StratifiedSplit.getDefaultInstance();
2716       }
2717     }
2718     /**
2719      *
2720      *
2721      * <pre>
2722      * Supported only for tabular Datasets.
2723      * Split based on the distribution of the specified column.
2724      * </pre>
2725      *
2726      * <code>.google.cloud.aiplatform.v1.StratifiedSplit stratified_split = 12;</code>
2727      */
2728     private com.google.protobuf.SingleFieldBuilderV3<
2729             com.google.cloud.aiplatform.v1.StratifiedSplit,
2730             com.google.cloud.aiplatform.v1.StratifiedSplit.Builder,
2731             com.google.cloud.aiplatform.v1.StratifiedSplitOrBuilder>
getStratifiedSplitFieldBuilder()2732         getStratifiedSplitFieldBuilder() {
2733       if (stratifiedSplitBuilder_ == null) {
2734         if (!(splitCase_ == 12)) {
2735           split_ = com.google.cloud.aiplatform.v1.StratifiedSplit.getDefaultInstance();
2736         }
2737         stratifiedSplitBuilder_ =
2738             new com.google.protobuf.SingleFieldBuilderV3<
2739                 com.google.cloud.aiplatform.v1.StratifiedSplit,
2740                 com.google.cloud.aiplatform.v1.StratifiedSplit.Builder,
2741                 com.google.cloud.aiplatform.v1.StratifiedSplitOrBuilder>(
2742                 (com.google.cloud.aiplatform.v1.StratifiedSplit) split_,
2743                 getParentForChildren(),
2744                 isClean());
2745         split_ = null;
2746       }
2747       splitCase_ = 12;
2748       onChanged();
2749       return stratifiedSplitBuilder_;
2750     }
2751 
2752     private com.google.protobuf.SingleFieldBuilderV3<
2753             com.google.cloud.aiplatform.v1.GcsDestination,
2754             com.google.cloud.aiplatform.v1.GcsDestination.Builder,
2755             com.google.cloud.aiplatform.v1.GcsDestinationOrBuilder>
2756         gcsDestinationBuilder_;
2757     /**
2758      *
2759      *
2760      * <pre>
2761      * The Cloud Storage location where the training data is to be
2762      * written to. In the given directory a new directory is created with
2763      * name:
2764      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
2765      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
2766      * All training input data is written into that directory.
2767      * The Vertex AI environment variables representing Cloud Storage
2768      * data URIs are represented in the Cloud Storage wildcard
2769      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
2770      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
2771      * * AIP_TRAINING_DATA_URI =
2772      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
2773      * * AIP_VALIDATION_DATA_URI =
2774      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
2775      * * AIP_TEST_DATA_URI =
2776      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
2777      * </pre>
2778      *
2779      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
2780      *
2781      * @return Whether the gcsDestination field is set.
2782      */
2783     @java.lang.Override
hasGcsDestination()2784     public boolean hasGcsDestination() {
2785       return destinationCase_ == 8;
2786     }
2787     /**
2788      *
2789      *
2790      * <pre>
2791      * The Cloud Storage location where the training data is to be
2792      * written to. In the given directory a new directory is created with
2793      * name:
2794      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
2795      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
2796      * All training input data is written into that directory.
2797      * The Vertex AI environment variables representing Cloud Storage
2798      * data URIs are represented in the Cloud Storage wildcard
2799      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
2800      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
2801      * * AIP_TRAINING_DATA_URI =
2802      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
2803      * * AIP_VALIDATION_DATA_URI =
2804      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
2805      * * AIP_TEST_DATA_URI =
2806      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
2807      * </pre>
2808      *
2809      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
2810      *
2811      * @return The gcsDestination.
2812      */
2813     @java.lang.Override
getGcsDestination()2814     public com.google.cloud.aiplatform.v1.GcsDestination getGcsDestination() {
2815       if (gcsDestinationBuilder_ == null) {
2816         if (destinationCase_ == 8) {
2817           return (com.google.cloud.aiplatform.v1.GcsDestination) destination_;
2818         }
2819         return com.google.cloud.aiplatform.v1.GcsDestination.getDefaultInstance();
2820       } else {
2821         if (destinationCase_ == 8) {
2822           return gcsDestinationBuilder_.getMessage();
2823         }
2824         return com.google.cloud.aiplatform.v1.GcsDestination.getDefaultInstance();
2825       }
2826     }
2827     /**
2828      *
2829      *
2830      * <pre>
2831      * The Cloud Storage location where the training data is to be
2832      * written to. In the given directory a new directory is created with
2833      * name:
2834      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
2835      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
2836      * All training input data is written into that directory.
2837      * The Vertex AI environment variables representing Cloud Storage
2838      * data URIs are represented in the Cloud Storage wildcard
2839      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
2840      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
2841      * * AIP_TRAINING_DATA_URI =
2842      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
2843      * * AIP_VALIDATION_DATA_URI =
2844      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
2845      * * AIP_TEST_DATA_URI =
2846      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
2847      * </pre>
2848      *
2849      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
2850      */
setGcsDestination(com.google.cloud.aiplatform.v1.GcsDestination value)2851     public Builder setGcsDestination(com.google.cloud.aiplatform.v1.GcsDestination value) {
2852       if (gcsDestinationBuilder_ == null) {
2853         if (value == null) {
2854           throw new NullPointerException();
2855         }
2856         destination_ = value;
2857         onChanged();
2858       } else {
2859         gcsDestinationBuilder_.setMessage(value);
2860       }
2861       destinationCase_ = 8;
2862       return this;
2863     }
2864     /**
2865      *
2866      *
2867      * <pre>
2868      * The Cloud Storage location where the training data is to be
2869      * written to. In the given directory a new directory is created with
2870      * name:
2871      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
2872      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
2873      * All training input data is written into that directory.
2874      * The Vertex AI environment variables representing Cloud Storage
2875      * data URIs are represented in the Cloud Storage wildcard
2876      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
2877      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
2878      * * AIP_TRAINING_DATA_URI =
2879      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
2880      * * AIP_VALIDATION_DATA_URI =
2881      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
2882      * * AIP_TEST_DATA_URI =
2883      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
2884      * </pre>
2885      *
2886      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
2887      */
setGcsDestination( com.google.cloud.aiplatform.v1.GcsDestination.Builder builderForValue)2888     public Builder setGcsDestination(
2889         com.google.cloud.aiplatform.v1.GcsDestination.Builder builderForValue) {
2890       if (gcsDestinationBuilder_ == null) {
2891         destination_ = builderForValue.build();
2892         onChanged();
2893       } else {
2894         gcsDestinationBuilder_.setMessage(builderForValue.build());
2895       }
2896       destinationCase_ = 8;
2897       return this;
2898     }
2899     /**
2900      *
2901      *
2902      * <pre>
2903      * The Cloud Storage location where the training data is to be
2904      * written to. In the given directory a new directory is created with
2905      * name:
2906      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
2907      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
2908      * All training input data is written into that directory.
2909      * The Vertex AI environment variables representing Cloud Storage
2910      * data URIs are represented in the Cloud Storage wildcard
2911      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
2912      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
2913      * * AIP_TRAINING_DATA_URI =
2914      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
2915      * * AIP_VALIDATION_DATA_URI =
2916      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
2917      * * AIP_TEST_DATA_URI =
2918      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
2919      * </pre>
2920      *
2921      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
2922      */
mergeGcsDestination(com.google.cloud.aiplatform.v1.GcsDestination value)2923     public Builder mergeGcsDestination(com.google.cloud.aiplatform.v1.GcsDestination value) {
2924       if (gcsDestinationBuilder_ == null) {
2925         if (destinationCase_ == 8
2926             && destination_ != com.google.cloud.aiplatform.v1.GcsDestination.getDefaultInstance()) {
2927           destination_ =
2928               com.google.cloud.aiplatform.v1.GcsDestination.newBuilder(
2929                       (com.google.cloud.aiplatform.v1.GcsDestination) destination_)
2930                   .mergeFrom(value)
2931                   .buildPartial();
2932         } else {
2933           destination_ = value;
2934         }
2935         onChanged();
2936       } else {
2937         if (destinationCase_ == 8) {
2938           gcsDestinationBuilder_.mergeFrom(value);
2939         } else {
2940           gcsDestinationBuilder_.setMessage(value);
2941         }
2942       }
2943       destinationCase_ = 8;
2944       return this;
2945     }
2946     /**
2947      *
2948      *
2949      * <pre>
2950      * The Cloud Storage location where the training data is to be
2951      * written to. In the given directory a new directory is created with
2952      * name:
2953      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
2954      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
2955      * All training input data is written into that directory.
2956      * The Vertex AI environment variables representing Cloud Storage
2957      * data URIs are represented in the Cloud Storage wildcard
2958      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
2959      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
2960      * * AIP_TRAINING_DATA_URI =
2961      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
2962      * * AIP_VALIDATION_DATA_URI =
2963      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
2964      * * AIP_TEST_DATA_URI =
2965      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
2966      * </pre>
2967      *
2968      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
2969      */
clearGcsDestination()2970     public Builder clearGcsDestination() {
2971       if (gcsDestinationBuilder_ == null) {
2972         if (destinationCase_ == 8) {
2973           destinationCase_ = 0;
2974           destination_ = null;
2975           onChanged();
2976         }
2977       } else {
2978         if (destinationCase_ == 8) {
2979           destinationCase_ = 0;
2980           destination_ = null;
2981         }
2982         gcsDestinationBuilder_.clear();
2983       }
2984       return this;
2985     }
2986     /**
2987      *
2988      *
2989      * <pre>
2990      * The Cloud Storage location where the training data is to be
2991      * written to. In the given directory a new directory is created with
2992      * name:
2993      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
2994      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
2995      * All training input data is written into that directory.
2996      * The Vertex AI environment variables representing Cloud Storage
2997      * data URIs are represented in the Cloud Storage wildcard
2998      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
2999      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
3000      * * AIP_TRAINING_DATA_URI =
3001      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
3002      * * AIP_VALIDATION_DATA_URI =
3003      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
3004      * * AIP_TEST_DATA_URI =
3005      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
3006      * </pre>
3007      *
3008      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
3009      */
getGcsDestinationBuilder()3010     public com.google.cloud.aiplatform.v1.GcsDestination.Builder getGcsDestinationBuilder() {
3011       return getGcsDestinationFieldBuilder().getBuilder();
3012     }
3013     /**
3014      *
3015      *
3016      * <pre>
3017      * The Cloud Storage location where the training data is to be
3018      * written to. In the given directory a new directory is created with
3019      * name:
3020      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
3021      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
3022      * All training input data is written into that directory.
3023      * The Vertex AI environment variables representing Cloud Storage
3024      * data URIs are represented in the Cloud Storage wildcard
3025      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
3026      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
3027      * * AIP_TRAINING_DATA_URI =
3028      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
3029      * * AIP_VALIDATION_DATA_URI =
3030      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
3031      * * AIP_TEST_DATA_URI =
3032      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
3033      * </pre>
3034      *
3035      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
3036      */
3037     @java.lang.Override
getGcsDestinationOrBuilder()3038     public com.google.cloud.aiplatform.v1.GcsDestinationOrBuilder getGcsDestinationOrBuilder() {
3039       if ((destinationCase_ == 8) && (gcsDestinationBuilder_ != null)) {
3040         return gcsDestinationBuilder_.getMessageOrBuilder();
3041       } else {
3042         if (destinationCase_ == 8) {
3043           return (com.google.cloud.aiplatform.v1.GcsDestination) destination_;
3044         }
3045         return com.google.cloud.aiplatform.v1.GcsDestination.getDefaultInstance();
3046       }
3047     }
3048     /**
3049      *
3050      *
3051      * <pre>
3052      * The Cloud Storage location where the training data is to be
3053      * written to. In the given directory a new directory is created with
3054      * name:
3055      * `dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;timestamp-of-training-call&gt;`
3056      * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
3057      * All training input data is written into that directory.
3058      * The Vertex AI environment variables representing Cloud Storage
3059      * data URIs are represented in the Cloud Storage wildcard
3060      * format to support sharded data. e.g.: "gs://.../training-*.jsonl"
3061      * * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data
3062      * * AIP_TRAINING_DATA_URI =
3063      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/training-*.${AIP_DATA_FORMAT}"
3064      * * AIP_VALIDATION_DATA_URI =
3065      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/validation-*.${AIP_DATA_FORMAT}"
3066      * * AIP_TEST_DATA_URI =
3067      * "gcs_destination/dataset-&lt;dataset-id&gt;-&lt;annotation-type&gt;-&lt;time&gt;/test-*.${AIP_DATA_FORMAT}"
3068      * </pre>
3069      *
3070      * <code>.google.cloud.aiplatform.v1.GcsDestination gcs_destination = 8;</code>
3071      */
3072     private com.google.protobuf.SingleFieldBuilderV3<
3073             com.google.cloud.aiplatform.v1.GcsDestination,
3074             com.google.cloud.aiplatform.v1.GcsDestination.Builder,
3075             com.google.cloud.aiplatform.v1.GcsDestinationOrBuilder>
getGcsDestinationFieldBuilder()3076         getGcsDestinationFieldBuilder() {
3077       if (gcsDestinationBuilder_ == null) {
3078         if (!(destinationCase_ == 8)) {
3079           destination_ = com.google.cloud.aiplatform.v1.GcsDestination.getDefaultInstance();
3080         }
3081         gcsDestinationBuilder_ =
3082             new com.google.protobuf.SingleFieldBuilderV3<
3083                 com.google.cloud.aiplatform.v1.GcsDestination,
3084                 com.google.cloud.aiplatform.v1.GcsDestination.Builder,
3085                 com.google.cloud.aiplatform.v1.GcsDestinationOrBuilder>(
3086                 (com.google.cloud.aiplatform.v1.GcsDestination) destination_,
3087                 getParentForChildren(),
3088                 isClean());
3089         destination_ = null;
3090       }
3091       destinationCase_ = 8;
3092       onChanged();
3093       return gcsDestinationBuilder_;
3094     }
3095 
3096     private com.google.protobuf.SingleFieldBuilderV3<
3097             com.google.cloud.aiplatform.v1.BigQueryDestination,
3098             com.google.cloud.aiplatform.v1.BigQueryDestination.Builder,
3099             com.google.cloud.aiplatform.v1.BigQueryDestinationOrBuilder>
3100         bigqueryDestinationBuilder_;
3101     /**
3102      *
3103      *
3104      * <pre>
3105      * Only applicable to custom training with tabular Dataset with BigQuery
3106      * source.
3107      * The BigQuery project location where the training data is to be written
3108      * to. In the given project a new dataset is created with name
3109      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3110      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3111      * input data is written into that dataset. In the dataset three
3112      * tables are created, `training`, `validation` and `test`.
3113      * * AIP_DATA_FORMAT = "bigquery".
3114      * * AIP_TRAINING_DATA_URI  =
3115      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3116      * * AIP_VALIDATION_DATA_URI =
3117      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3118      * * AIP_TEST_DATA_URI =
3119      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3120      * </pre>
3121      *
3122      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3123      *
3124      * @return Whether the bigqueryDestination field is set.
3125      */
3126     @java.lang.Override
hasBigqueryDestination()3127     public boolean hasBigqueryDestination() {
3128       return destinationCase_ == 10;
3129     }
3130     /**
3131      *
3132      *
3133      * <pre>
3134      * Only applicable to custom training with tabular Dataset with BigQuery
3135      * source.
3136      * The BigQuery project location where the training data is to be written
3137      * to. In the given project a new dataset is created with name
3138      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3139      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3140      * input data is written into that dataset. In the dataset three
3141      * tables are created, `training`, `validation` and `test`.
3142      * * AIP_DATA_FORMAT = "bigquery".
3143      * * AIP_TRAINING_DATA_URI  =
3144      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3145      * * AIP_VALIDATION_DATA_URI =
3146      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3147      * * AIP_TEST_DATA_URI =
3148      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3149      * </pre>
3150      *
3151      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3152      *
3153      * @return The bigqueryDestination.
3154      */
3155     @java.lang.Override
getBigqueryDestination()3156     public com.google.cloud.aiplatform.v1.BigQueryDestination getBigqueryDestination() {
3157       if (bigqueryDestinationBuilder_ == null) {
3158         if (destinationCase_ == 10) {
3159           return (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_;
3160         }
3161         return com.google.cloud.aiplatform.v1.BigQueryDestination.getDefaultInstance();
3162       } else {
3163         if (destinationCase_ == 10) {
3164           return bigqueryDestinationBuilder_.getMessage();
3165         }
3166         return com.google.cloud.aiplatform.v1.BigQueryDestination.getDefaultInstance();
3167       }
3168     }
3169     /**
3170      *
3171      *
3172      * <pre>
3173      * Only applicable to custom training with tabular Dataset with BigQuery
3174      * source.
3175      * The BigQuery project location where the training data is to be written
3176      * to. In the given project a new dataset is created with name
3177      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3178      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3179      * input data is written into that dataset. In the dataset three
3180      * tables are created, `training`, `validation` and `test`.
3181      * * AIP_DATA_FORMAT = "bigquery".
3182      * * AIP_TRAINING_DATA_URI  =
3183      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3184      * * AIP_VALIDATION_DATA_URI =
3185      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3186      * * AIP_TEST_DATA_URI =
3187      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3188      * </pre>
3189      *
3190      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3191      */
setBigqueryDestination( com.google.cloud.aiplatform.v1.BigQueryDestination value)3192     public Builder setBigqueryDestination(
3193         com.google.cloud.aiplatform.v1.BigQueryDestination value) {
3194       if (bigqueryDestinationBuilder_ == null) {
3195         if (value == null) {
3196           throw new NullPointerException();
3197         }
3198         destination_ = value;
3199         onChanged();
3200       } else {
3201         bigqueryDestinationBuilder_.setMessage(value);
3202       }
3203       destinationCase_ = 10;
3204       return this;
3205     }
3206     /**
3207      *
3208      *
3209      * <pre>
3210      * Only applicable to custom training with tabular Dataset with BigQuery
3211      * source.
3212      * The BigQuery project location where the training data is to be written
3213      * to. In the given project a new dataset is created with name
3214      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3215      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3216      * input data is written into that dataset. In the dataset three
3217      * tables are created, `training`, `validation` and `test`.
3218      * * AIP_DATA_FORMAT = "bigquery".
3219      * * AIP_TRAINING_DATA_URI  =
3220      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3221      * * AIP_VALIDATION_DATA_URI =
3222      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3223      * * AIP_TEST_DATA_URI =
3224      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3225      * </pre>
3226      *
3227      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3228      */
setBigqueryDestination( com.google.cloud.aiplatform.v1.BigQueryDestination.Builder builderForValue)3229     public Builder setBigqueryDestination(
3230         com.google.cloud.aiplatform.v1.BigQueryDestination.Builder builderForValue) {
3231       if (bigqueryDestinationBuilder_ == null) {
3232         destination_ = builderForValue.build();
3233         onChanged();
3234       } else {
3235         bigqueryDestinationBuilder_.setMessage(builderForValue.build());
3236       }
3237       destinationCase_ = 10;
3238       return this;
3239     }
3240     /**
3241      *
3242      *
3243      * <pre>
3244      * Only applicable to custom training with tabular Dataset with BigQuery
3245      * source.
3246      * The BigQuery project location where the training data is to be written
3247      * to. In the given project a new dataset is created with name
3248      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3249      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3250      * input data is written into that dataset. In the dataset three
3251      * tables are created, `training`, `validation` and `test`.
3252      * * AIP_DATA_FORMAT = "bigquery".
3253      * * AIP_TRAINING_DATA_URI  =
3254      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3255      * * AIP_VALIDATION_DATA_URI =
3256      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3257      * * AIP_TEST_DATA_URI =
3258      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3259      * </pre>
3260      *
3261      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3262      */
mergeBigqueryDestination( com.google.cloud.aiplatform.v1.BigQueryDestination value)3263     public Builder mergeBigqueryDestination(
3264         com.google.cloud.aiplatform.v1.BigQueryDestination value) {
3265       if (bigqueryDestinationBuilder_ == null) {
3266         if (destinationCase_ == 10
3267             && destination_
3268                 != com.google.cloud.aiplatform.v1.BigQueryDestination.getDefaultInstance()) {
3269           destination_ =
3270               com.google.cloud.aiplatform.v1.BigQueryDestination.newBuilder(
3271                       (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_)
3272                   .mergeFrom(value)
3273                   .buildPartial();
3274         } else {
3275           destination_ = value;
3276         }
3277         onChanged();
3278       } else {
3279         if (destinationCase_ == 10) {
3280           bigqueryDestinationBuilder_.mergeFrom(value);
3281         } else {
3282           bigqueryDestinationBuilder_.setMessage(value);
3283         }
3284       }
3285       destinationCase_ = 10;
3286       return this;
3287     }
3288     /**
3289      *
3290      *
3291      * <pre>
3292      * Only applicable to custom training with tabular Dataset with BigQuery
3293      * source.
3294      * The BigQuery project location where the training data is to be written
3295      * to. In the given project a new dataset is created with name
3296      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3297      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3298      * input data is written into that dataset. In the dataset three
3299      * tables are created, `training`, `validation` and `test`.
3300      * * AIP_DATA_FORMAT = "bigquery".
3301      * * AIP_TRAINING_DATA_URI  =
3302      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3303      * * AIP_VALIDATION_DATA_URI =
3304      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3305      * * AIP_TEST_DATA_URI =
3306      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3307      * </pre>
3308      *
3309      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3310      */
clearBigqueryDestination()3311     public Builder clearBigqueryDestination() {
3312       if (bigqueryDestinationBuilder_ == null) {
3313         if (destinationCase_ == 10) {
3314           destinationCase_ = 0;
3315           destination_ = null;
3316           onChanged();
3317         }
3318       } else {
3319         if (destinationCase_ == 10) {
3320           destinationCase_ = 0;
3321           destination_ = null;
3322         }
3323         bigqueryDestinationBuilder_.clear();
3324       }
3325       return this;
3326     }
3327     /**
3328      *
3329      *
3330      * <pre>
3331      * Only applicable to custom training with tabular Dataset with BigQuery
3332      * source.
3333      * The BigQuery project location where the training data is to be written
3334      * to. In the given project a new dataset is created with name
3335      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3336      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3337      * input data is written into that dataset. In the dataset three
3338      * tables are created, `training`, `validation` and `test`.
3339      * * AIP_DATA_FORMAT = "bigquery".
3340      * * AIP_TRAINING_DATA_URI  =
3341      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3342      * * AIP_VALIDATION_DATA_URI =
3343      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3344      * * AIP_TEST_DATA_URI =
3345      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3346      * </pre>
3347      *
3348      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3349      */
3350     public com.google.cloud.aiplatform.v1.BigQueryDestination.Builder
getBigqueryDestinationBuilder()3351         getBigqueryDestinationBuilder() {
3352       return getBigqueryDestinationFieldBuilder().getBuilder();
3353     }
3354     /**
3355      *
3356      *
3357      * <pre>
3358      * Only applicable to custom training with tabular Dataset with BigQuery
3359      * source.
3360      * The BigQuery project location where the training data is to be written
3361      * to. In the given project a new dataset is created with name
3362      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3363      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3364      * input data is written into that dataset. In the dataset three
3365      * tables are created, `training`, `validation` and `test`.
3366      * * AIP_DATA_FORMAT = "bigquery".
3367      * * AIP_TRAINING_DATA_URI  =
3368      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3369      * * AIP_VALIDATION_DATA_URI =
3370      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3371      * * AIP_TEST_DATA_URI =
3372      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3373      * </pre>
3374      *
3375      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3376      */
3377     @java.lang.Override
3378     public com.google.cloud.aiplatform.v1.BigQueryDestinationOrBuilder
getBigqueryDestinationOrBuilder()3379         getBigqueryDestinationOrBuilder() {
3380       if ((destinationCase_ == 10) && (bigqueryDestinationBuilder_ != null)) {
3381         return bigqueryDestinationBuilder_.getMessageOrBuilder();
3382       } else {
3383         if (destinationCase_ == 10) {
3384           return (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_;
3385         }
3386         return com.google.cloud.aiplatform.v1.BigQueryDestination.getDefaultInstance();
3387       }
3388     }
3389     /**
3390      *
3391      *
3392      * <pre>
3393      * Only applicable to custom training with tabular Dataset with BigQuery
3394      * source.
3395      * The BigQuery project location where the training data is to be written
3396      * to. In the given project a new dataset is created with name
3397      * `dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;timestamp-of-training-call&gt;`
3398      * where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training
3399      * input data is written into that dataset. In the dataset three
3400      * tables are created, `training`, `validation` and `test`.
3401      * * AIP_DATA_FORMAT = "bigquery".
3402      * * AIP_TRAINING_DATA_URI  =
3403      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.training"
3404      * * AIP_VALIDATION_DATA_URI =
3405      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.validation"
3406      * * AIP_TEST_DATA_URI =
3407      * "bigquery_destination.dataset_&lt;dataset-id&gt;_&lt;annotation-type&gt;_&lt;time&gt;.test"
3408      * </pre>
3409      *
3410      * <code>.google.cloud.aiplatform.v1.BigQueryDestination bigquery_destination = 10;</code>
3411      */
3412     private com.google.protobuf.SingleFieldBuilderV3<
3413             com.google.cloud.aiplatform.v1.BigQueryDestination,
3414             com.google.cloud.aiplatform.v1.BigQueryDestination.Builder,
3415             com.google.cloud.aiplatform.v1.BigQueryDestinationOrBuilder>
getBigqueryDestinationFieldBuilder()3416         getBigqueryDestinationFieldBuilder() {
3417       if (bigqueryDestinationBuilder_ == null) {
3418         if (!(destinationCase_ == 10)) {
3419           destination_ = com.google.cloud.aiplatform.v1.BigQueryDestination.getDefaultInstance();
3420         }
3421         bigqueryDestinationBuilder_ =
3422             new com.google.protobuf.SingleFieldBuilderV3<
3423                 com.google.cloud.aiplatform.v1.BigQueryDestination,
3424                 com.google.cloud.aiplatform.v1.BigQueryDestination.Builder,
3425                 com.google.cloud.aiplatform.v1.BigQueryDestinationOrBuilder>(
3426                 (com.google.cloud.aiplatform.v1.BigQueryDestination) destination_,
3427                 getParentForChildren(),
3428                 isClean());
3429         destination_ = null;
3430       }
3431       destinationCase_ = 10;
3432       onChanged();
3433       return bigqueryDestinationBuilder_;
3434     }
3435 
3436     private java.lang.Object datasetId_ = "";
3437     /**
3438      *
3439      *
3440      * <pre>
3441      * Required. The ID of the Dataset in the same Project and Location which data
3442      * will be used to train the Model. The Dataset must use schema compatible
3443      * with Model being trained, and what is compatible should be described in the
3444      * used TrainingPipeline's [training_task_definition]
3445      * [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
3446      * For tabular Datasets, all their data is exported to training, to pick
3447      * and choose from.
3448      * </pre>
3449      *
3450      * <code>string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];</code>
3451      *
3452      * @return The datasetId.
3453      */
getDatasetId()3454     public java.lang.String getDatasetId() {
3455       java.lang.Object ref = datasetId_;
3456       if (!(ref instanceof java.lang.String)) {
3457         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
3458         java.lang.String s = bs.toStringUtf8();
3459         datasetId_ = s;
3460         return s;
3461       } else {
3462         return (java.lang.String) ref;
3463       }
3464     }
3465     /**
3466      *
3467      *
3468      * <pre>
3469      * Required. The ID of the Dataset in the same Project and Location which data
3470      * will be used to train the Model. The Dataset must use schema compatible
3471      * with Model being trained, and what is compatible should be described in the
3472      * used TrainingPipeline's [training_task_definition]
3473      * [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
3474      * For tabular Datasets, all their data is exported to training, to pick
3475      * and choose from.
3476      * </pre>
3477      *
3478      * <code>string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];</code>
3479      *
3480      * @return The bytes for datasetId.
3481      */
getDatasetIdBytes()3482     public com.google.protobuf.ByteString getDatasetIdBytes() {
3483       java.lang.Object ref = datasetId_;
3484       if (ref instanceof String) {
3485         com.google.protobuf.ByteString b =
3486             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
3487         datasetId_ = b;
3488         return b;
3489       } else {
3490         return (com.google.protobuf.ByteString) ref;
3491       }
3492     }
3493     /**
3494      *
3495      *
3496      * <pre>
3497      * Required. The ID of the Dataset in the same Project and Location which data
3498      * will be used to train the Model. The Dataset must use schema compatible
3499      * with Model being trained, and what is compatible should be described in the
3500      * used TrainingPipeline's [training_task_definition]
3501      * [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
3502      * For tabular Datasets, all their data is exported to training, to pick
3503      * and choose from.
3504      * </pre>
3505      *
3506      * <code>string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];</code>
3507      *
3508      * @param value The datasetId to set.
3509      * @return This builder for chaining.
3510      */
setDatasetId(java.lang.String value)3511     public Builder setDatasetId(java.lang.String value) {
3512       if (value == null) {
3513         throw new NullPointerException();
3514       }
3515       datasetId_ = value;
3516       bitField0_ |= 0x00000080;
3517       onChanged();
3518       return this;
3519     }
3520     /**
3521      *
3522      *
3523      * <pre>
3524      * Required. The ID of the Dataset in the same Project and Location which data
3525      * will be used to train the Model. The Dataset must use schema compatible
3526      * with Model being trained, and what is compatible should be described in the
3527      * used TrainingPipeline's [training_task_definition]
3528      * [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
3529      * For tabular Datasets, all their data is exported to training, to pick
3530      * and choose from.
3531      * </pre>
3532      *
3533      * <code>string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];</code>
3534      *
3535      * @return This builder for chaining.
3536      */
clearDatasetId()3537     public Builder clearDatasetId() {
3538       datasetId_ = getDefaultInstance().getDatasetId();
3539       bitField0_ = (bitField0_ & ~0x00000080);
3540       onChanged();
3541       return this;
3542     }
3543     /**
3544      *
3545      *
3546      * <pre>
3547      * Required. The ID of the Dataset in the same Project and Location which data
3548      * will be used to train the Model. The Dataset must use schema compatible
3549      * with Model being trained, and what is compatible should be described in the
3550      * used TrainingPipeline's [training_task_definition]
3551      * [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition].
3552      * For tabular Datasets, all their data is exported to training, to pick
3553      * and choose from.
3554      * </pre>
3555      *
3556      * <code>string dataset_id = 1 [(.google.api.field_behavior) = REQUIRED];</code>
3557      *
3558      * @param value The bytes for datasetId to set.
3559      * @return This builder for chaining.
3560      */
setDatasetIdBytes(com.google.protobuf.ByteString value)3561     public Builder setDatasetIdBytes(com.google.protobuf.ByteString value) {
3562       if (value == null) {
3563         throw new NullPointerException();
3564       }
3565       checkByteStringIsUtf8(value);
3566       datasetId_ = value;
3567       bitField0_ |= 0x00000080;
3568       onChanged();
3569       return this;
3570     }
3571 
3572     private java.lang.Object annotationsFilter_ = "";
3573     /**
3574      *
3575      *
3576      * <pre>
3577      * Applicable only to Datasets that have DataItems and Annotations.
3578      * A filter on Annotations of the Dataset. Only Annotations that both
3579      * match this filter and belong to DataItems not ignored by the split method
3580      * are used in respectively training, validation or test role, depending on
3581      * the role of the DataItem they are on (for the auto-assigned that role is
3582      * decided by Vertex AI). A filter with same syntax as the one used in
3583      * [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
3584      * may be used, but note here it filters across all Annotations of the
3585      * Dataset, and not just within a single DataItem.
3586      * </pre>
3587      *
3588      * <code>string annotations_filter = 6;</code>
3589      *
3590      * @return The annotationsFilter.
3591      */
getAnnotationsFilter()3592     public java.lang.String getAnnotationsFilter() {
3593       java.lang.Object ref = annotationsFilter_;
3594       if (!(ref instanceof java.lang.String)) {
3595         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
3596         java.lang.String s = bs.toStringUtf8();
3597         annotationsFilter_ = s;
3598         return s;
3599       } else {
3600         return (java.lang.String) ref;
3601       }
3602     }
3603     /**
3604      *
3605      *
3606      * <pre>
3607      * Applicable only to Datasets that have DataItems and Annotations.
3608      * A filter on Annotations of the Dataset. Only Annotations that both
3609      * match this filter and belong to DataItems not ignored by the split method
3610      * are used in respectively training, validation or test role, depending on
3611      * the role of the DataItem they are on (for the auto-assigned that role is
3612      * decided by Vertex AI). A filter with same syntax as the one used in
3613      * [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
3614      * may be used, but note here it filters across all Annotations of the
3615      * Dataset, and not just within a single DataItem.
3616      * </pre>
3617      *
3618      * <code>string annotations_filter = 6;</code>
3619      *
3620      * @return The bytes for annotationsFilter.
3621      */
getAnnotationsFilterBytes()3622     public com.google.protobuf.ByteString getAnnotationsFilterBytes() {
3623       java.lang.Object ref = annotationsFilter_;
3624       if (ref instanceof String) {
3625         com.google.protobuf.ByteString b =
3626             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
3627         annotationsFilter_ = b;
3628         return b;
3629       } else {
3630         return (com.google.protobuf.ByteString) ref;
3631       }
3632     }
3633     /**
3634      *
3635      *
3636      * <pre>
3637      * Applicable only to Datasets that have DataItems and Annotations.
3638      * A filter on Annotations of the Dataset. Only Annotations that both
3639      * match this filter and belong to DataItems not ignored by the split method
3640      * are used in respectively training, validation or test role, depending on
3641      * the role of the DataItem they are on (for the auto-assigned that role is
3642      * decided by Vertex AI). A filter with same syntax as the one used in
3643      * [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
3644      * may be used, but note here it filters across all Annotations of the
3645      * Dataset, and not just within a single DataItem.
3646      * </pre>
3647      *
3648      * <code>string annotations_filter = 6;</code>
3649      *
3650      * @param value The annotationsFilter to set.
3651      * @return This builder for chaining.
3652      */
setAnnotationsFilter(java.lang.String value)3653     public Builder setAnnotationsFilter(java.lang.String value) {
3654       if (value == null) {
3655         throw new NullPointerException();
3656       }
3657       annotationsFilter_ = value;
3658       bitField0_ |= 0x00000100;
3659       onChanged();
3660       return this;
3661     }
3662     /**
3663      *
3664      *
3665      * <pre>
3666      * Applicable only to Datasets that have DataItems and Annotations.
3667      * A filter on Annotations of the Dataset. Only Annotations that both
3668      * match this filter and belong to DataItems not ignored by the split method
3669      * are used in respectively training, validation or test role, depending on
3670      * the role of the DataItem they are on (for the auto-assigned that role is
3671      * decided by Vertex AI). A filter with same syntax as the one used in
3672      * [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
3673      * may be used, but note here it filters across all Annotations of the
3674      * Dataset, and not just within a single DataItem.
3675      * </pre>
3676      *
3677      * <code>string annotations_filter = 6;</code>
3678      *
3679      * @return This builder for chaining.
3680      */
clearAnnotationsFilter()3681     public Builder clearAnnotationsFilter() {
3682       annotationsFilter_ = getDefaultInstance().getAnnotationsFilter();
3683       bitField0_ = (bitField0_ & ~0x00000100);
3684       onChanged();
3685       return this;
3686     }
3687     /**
3688      *
3689      *
3690      * <pre>
3691      * Applicable only to Datasets that have DataItems and Annotations.
3692      * A filter on Annotations of the Dataset. Only Annotations that both
3693      * match this filter and belong to DataItems not ignored by the split method
3694      * are used in respectively training, validation or test role, depending on
3695      * the role of the DataItem they are on (for the auto-assigned that role is
3696      * decided by Vertex AI). A filter with same syntax as the one used in
3697      * [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]
3698      * may be used, but note here it filters across all Annotations of the
3699      * Dataset, and not just within a single DataItem.
3700      * </pre>
3701      *
3702      * <code>string annotations_filter = 6;</code>
3703      *
3704      * @param value The bytes for annotationsFilter to set.
3705      * @return This builder for chaining.
3706      */
setAnnotationsFilterBytes(com.google.protobuf.ByteString value)3707     public Builder setAnnotationsFilterBytes(com.google.protobuf.ByteString value) {
3708       if (value == null) {
3709         throw new NullPointerException();
3710       }
3711       checkByteStringIsUtf8(value);
3712       annotationsFilter_ = value;
3713       bitField0_ |= 0x00000100;
3714       onChanged();
3715       return this;
3716     }
3717 
3718     private java.lang.Object annotationSchemaUri_ = "";
3719     /**
3720      *
3721      *
3722      * <pre>
3723      * Applicable only to custom training with Datasets that have DataItems and
3724      * Annotations.
3725      * Cloud Storage URI that points to a YAML file describing the annotation
3726      * schema. The schema is defined as an OpenAPI 3.0.2 [Schema
3727      * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
3728      * The schema files that can be used here are found in
3729      * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
3730      * chosen schema must be consistent with
3731      * [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
3732      * Dataset specified by
3733      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
3734      * Only Annotations that both match this schema and belong to DataItems not
3735      * ignored by the split method are used in respectively training, validation
3736      * or test role, depending on the role of the DataItem they are on.
3737      * When used in conjunction with
3738      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
3739      * the Annotations used for training are filtered by both
3740      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
3741      * and
3742      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
3743      * </pre>
3744      *
3745      * <code>string annotation_schema_uri = 9;</code>
3746      *
3747      * @return The annotationSchemaUri.
3748      */
getAnnotationSchemaUri()3749     public java.lang.String getAnnotationSchemaUri() {
3750       java.lang.Object ref = annotationSchemaUri_;
3751       if (!(ref instanceof java.lang.String)) {
3752         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
3753         java.lang.String s = bs.toStringUtf8();
3754         annotationSchemaUri_ = s;
3755         return s;
3756       } else {
3757         return (java.lang.String) ref;
3758       }
3759     }
3760     /**
3761      *
3762      *
3763      * <pre>
3764      * Applicable only to custom training with Datasets that have DataItems and
3765      * Annotations.
3766      * Cloud Storage URI that points to a YAML file describing the annotation
3767      * schema. The schema is defined as an OpenAPI 3.0.2 [Schema
3768      * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
3769      * The schema files that can be used here are found in
3770      * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
3771      * chosen schema must be consistent with
3772      * [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
3773      * Dataset specified by
3774      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
3775      * Only Annotations that both match this schema and belong to DataItems not
3776      * ignored by the split method are used in respectively training, validation
3777      * or test role, depending on the role of the DataItem they are on.
3778      * When used in conjunction with
3779      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
3780      * the Annotations used for training are filtered by both
3781      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
3782      * and
3783      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
3784      * </pre>
3785      *
3786      * <code>string annotation_schema_uri = 9;</code>
3787      *
3788      * @return The bytes for annotationSchemaUri.
3789      */
getAnnotationSchemaUriBytes()3790     public com.google.protobuf.ByteString getAnnotationSchemaUriBytes() {
3791       java.lang.Object ref = annotationSchemaUri_;
3792       if (ref instanceof String) {
3793         com.google.protobuf.ByteString b =
3794             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
3795         annotationSchemaUri_ = b;
3796         return b;
3797       } else {
3798         return (com.google.protobuf.ByteString) ref;
3799       }
3800     }
3801     /**
3802      *
3803      *
3804      * <pre>
3805      * Applicable only to custom training with Datasets that have DataItems and
3806      * Annotations.
3807      * Cloud Storage URI that points to a YAML file describing the annotation
3808      * schema. The schema is defined as an OpenAPI 3.0.2 [Schema
3809      * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
3810      * The schema files that can be used here are found in
3811      * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
3812      * chosen schema must be consistent with
3813      * [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
3814      * Dataset specified by
3815      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
3816      * Only Annotations that both match this schema and belong to DataItems not
3817      * ignored by the split method are used in respectively training, validation
3818      * or test role, depending on the role of the DataItem they are on.
3819      * When used in conjunction with
3820      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
3821      * the Annotations used for training are filtered by both
3822      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
3823      * and
3824      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
3825      * </pre>
3826      *
3827      * <code>string annotation_schema_uri = 9;</code>
3828      *
3829      * @param value The annotationSchemaUri to set.
3830      * @return This builder for chaining.
3831      */
setAnnotationSchemaUri(java.lang.String value)3832     public Builder setAnnotationSchemaUri(java.lang.String value) {
3833       if (value == null) {
3834         throw new NullPointerException();
3835       }
3836       annotationSchemaUri_ = value;
3837       bitField0_ |= 0x00000200;
3838       onChanged();
3839       return this;
3840     }
3841     /**
3842      *
3843      *
3844      * <pre>
3845      * Applicable only to custom training with Datasets that have DataItems and
3846      * Annotations.
3847      * Cloud Storage URI that points to a YAML file describing the annotation
3848      * schema. The schema is defined as an OpenAPI 3.0.2 [Schema
3849      * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
3850      * The schema files that can be used here are found in
3851      * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
3852      * chosen schema must be consistent with
3853      * [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
3854      * Dataset specified by
3855      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
3856      * Only Annotations that both match this schema and belong to DataItems not
3857      * ignored by the split method are used in respectively training, validation
3858      * or test role, depending on the role of the DataItem they are on.
3859      * When used in conjunction with
3860      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
3861      * the Annotations used for training are filtered by both
3862      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
3863      * and
3864      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
3865      * </pre>
3866      *
3867      * <code>string annotation_schema_uri = 9;</code>
3868      *
3869      * @return This builder for chaining.
3870      */
clearAnnotationSchemaUri()3871     public Builder clearAnnotationSchemaUri() {
3872       annotationSchemaUri_ = getDefaultInstance().getAnnotationSchemaUri();
3873       bitField0_ = (bitField0_ & ~0x00000200);
3874       onChanged();
3875       return this;
3876     }
3877     /**
3878      *
3879      *
3880      * <pre>
3881      * Applicable only to custom training with Datasets that have DataItems and
3882      * Annotations.
3883      * Cloud Storage URI that points to a YAML file describing the annotation
3884      * schema. The schema is defined as an OpenAPI 3.0.2 [Schema
3885      * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
3886      * The schema files that can be used here are found in
3887      * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the
3888      * chosen schema must be consistent with
3889      * [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] of the
3890      * Dataset specified by
3891      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id].
3892      * Only Annotations that both match this schema and belong to DataItems not
3893      * ignored by the split method are used in respectively training, validation
3894      * or test role, depending on the role of the DataItem they are on.
3895      * When used in conjunction with
3896      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
3897      * the Annotations used for training are filtered by both
3898      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter]
3899      * and
3900      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri].
3901      * </pre>
3902      *
3903      * <code>string annotation_schema_uri = 9;</code>
3904      *
3905      * @param value The bytes for annotationSchemaUri to set.
3906      * @return This builder for chaining.
3907      */
setAnnotationSchemaUriBytes(com.google.protobuf.ByteString value)3908     public Builder setAnnotationSchemaUriBytes(com.google.protobuf.ByteString value) {
3909       if (value == null) {
3910         throw new NullPointerException();
3911       }
3912       checkByteStringIsUtf8(value);
3913       annotationSchemaUri_ = value;
3914       bitField0_ |= 0x00000200;
3915       onChanged();
3916       return this;
3917     }
3918 
3919     private java.lang.Object savedQueryId_ = "";
3920     /**
3921      *
3922      *
3923      * <pre>
3924      * Only applicable to Datasets that have SavedQueries.
3925      * The ID of a SavedQuery (annotation set) under the Dataset specified by
3926      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
3927      * for filtering Annotations for training.
3928      * Only Annotations that are associated with this SavedQuery are used in
3929      * respectively training. When used in conjunction with
3930      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
3931      * the Annotations used for training are filtered by both
3932      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
3933      * and
3934      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
3935      * Only one of
3936      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
3937      * and
3938      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
3939      * should be specified as both of them represent the same thing: problem type.
3940      * </pre>
3941      *
3942      * <code>string saved_query_id = 7;</code>
3943      *
3944      * @return The savedQueryId.
3945      */
getSavedQueryId()3946     public java.lang.String getSavedQueryId() {
3947       java.lang.Object ref = savedQueryId_;
3948       if (!(ref instanceof java.lang.String)) {
3949         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
3950         java.lang.String s = bs.toStringUtf8();
3951         savedQueryId_ = s;
3952         return s;
3953       } else {
3954         return (java.lang.String) ref;
3955       }
3956     }
3957     /**
3958      *
3959      *
3960      * <pre>
3961      * Only applicable to Datasets that have SavedQueries.
3962      * The ID of a SavedQuery (annotation set) under the Dataset specified by
3963      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
3964      * for filtering Annotations for training.
3965      * Only Annotations that are associated with this SavedQuery are used in
3966      * respectively training. When used in conjunction with
3967      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
3968      * the Annotations used for training are filtered by both
3969      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
3970      * and
3971      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
3972      * Only one of
3973      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
3974      * and
3975      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
3976      * should be specified as both of them represent the same thing: problem type.
3977      * </pre>
3978      *
3979      * <code>string saved_query_id = 7;</code>
3980      *
3981      * @return The bytes for savedQueryId.
3982      */
getSavedQueryIdBytes()3983     public com.google.protobuf.ByteString getSavedQueryIdBytes() {
3984       java.lang.Object ref = savedQueryId_;
3985       if (ref instanceof String) {
3986         com.google.protobuf.ByteString b =
3987             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
3988         savedQueryId_ = b;
3989         return b;
3990       } else {
3991         return (com.google.protobuf.ByteString) ref;
3992       }
3993     }
3994     /**
3995      *
3996      *
3997      * <pre>
3998      * Only applicable to Datasets that have SavedQueries.
3999      * The ID of a SavedQuery (annotation set) under the Dataset specified by
4000      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
4001      * for filtering Annotations for training.
4002      * Only Annotations that are associated with this SavedQuery are used in
4003      * respectively training. When used in conjunction with
4004      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
4005      * the Annotations used for training are filtered by both
4006      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
4007      * and
4008      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
4009      * Only one of
4010      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
4011      * and
4012      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
4013      * should be specified as both of them represent the same thing: problem type.
4014      * </pre>
4015      *
4016      * <code>string saved_query_id = 7;</code>
4017      *
4018      * @param value The savedQueryId to set.
4019      * @return This builder for chaining.
4020      */
setSavedQueryId(java.lang.String value)4021     public Builder setSavedQueryId(java.lang.String value) {
4022       if (value == null) {
4023         throw new NullPointerException();
4024       }
4025       savedQueryId_ = value;
4026       bitField0_ |= 0x00000400;
4027       onChanged();
4028       return this;
4029     }
4030     /**
4031      *
4032      *
4033      * <pre>
4034      * Only applicable to Datasets that have SavedQueries.
4035      * The ID of a SavedQuery (annotation set) under the Dataset specified by
4036      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
4037      * for filtering Annotations for training.
4038      * Only Annotations that are associated with this SavedQuery are used in
4039      * respectively training. When used in conjunction with
4040      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
4041      * the Annotations used for training are filtered by both
4042      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
4043      * and
4044      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
4045      * Only one of
4046      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
4047      * and
4048      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
4049      * should be specified as both of them represent the same thing: problem type.
4050      * </pre>
4051      *
4052      * <code>string saved_query_id = 7;</code>
4053      *
4054      * @return This builder for chaining.
4055      */
clearSavedQueryId()4056     public Builder clearSavedQueryId() {
4057       savedQueryId_ = getDefaultInstance().getSavedQueryId();
4058       bitField0_ = (bitField0_ & ~0x00000400);
4059       onChanged();
4060       return this;
4061     }
4062     /**
4063      *
4064      *
4065      * <pre>
4066      * Only applicable to Datasets that have SavedQueries.
4067      * The ID of a SavedQuery (annotation set) under the Dataset specified by
4068      * [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id] used
4069      * for filtering Annotations for training.
4070      * Only Annotations that are associated with this SavedQuery are used in
4071      * respectively training. When used in conjunction with
4072      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter],
4073      * the Annotations used for training are filtered by both
4074      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
4075      * and
4076      * [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter].
4077      * Only one of
4078      * [saved_query_id][google.cloud.aiplatform.v1.InputDataConfig.saved_query_id]
4079      * and
4080      * [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]
4081      * should be specified as both of them represent the same thing: problem type.
4082      * </pre>
4083      *
4084      * <code>string saved_query_id = 7;</code>
4085      *
4086      * @param value The bytes for savedQueryId to set.
4087      * @return This builder for chaining.
4088      */
setSavedQueryIdBytes(com.google.protobuf.ByteString value)4089     public Builder setSavedQueryIdBytes(com.google.protobuf.ByteString value) {
4090       if (value == null) {
4091         throw new NullPointerException();
4092       }
4093       checkByteStringIsUtf8(value);
4094       savedQueryId_ = value;
4095       bitField0_ |= 0x00000400;
4096       onChanged();
4097       return this;
4098     }
4099 
4100     private boolean persistMlUseAssignment_;
4101     /**
4102      *
4103      *
4104      * <pre>
4105      * Whether to persist the ML use assignment to data item system labels.
4106      * </pre>
4107      *
4108      * <code>bool persist_ml_use_assignment = 11;</code>
4109      *
4110      * @return The persistMlUseAssignment.
4111      */
4112     @java.lang.Override
getPersistMlUseAssignment()4113     public boolean getPersistMlUseAssignment() {
4114       return persistMlUseAssignment_;
4115     }
4116     /**
4117      *
4118      *
4119      * <pre>
4120      * Whether to persist the ML use assignment to data item system labels.
4121      * </pre>
4122      *
4123      * <code>bool persist_ml_use_assignment = 11;</code>
4124      *
4125      * @param value The persistMlUseAssignment to set.
4126      * @return This builder for chaining.
4127      */
setPersistMlUseAssignment(boolean value)4128     public Builder setPersistMlUseAssignment(boolean value) {
4129 
4130       persistMlUseAssignment_ = value;
4131       bitField0_ |= 0x00000800;
4132       onChanged();
4133       return this;
4134     }
4135     /**
4136      *
4137      *
4138      * <pre>
4139      * Whether to persist the ML use assignment to data item system labels.
4140      * </pre>
4141      *
4142      * <code>bool persist_ml_use_assignment = 11;</code>
4143      *
4144      * @return This builder for chaining.
4145      */
clearPersistMlUseAssignment()4146     public Builder clearPersistMlUseAssignment() {
4147       bitField0_ = (bitField0_ & ~0x00000800);
4148       persistMlUseAssignment_ = false;
4149       onChanged();
4150       return this;
4151     }
4152 
4153     @java.lang.Override
setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields)4154     public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
4155       return super.setUnknownFields(unknownFields);
4156     }
4157 
4158     @java.lang.Override
mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields)4159     public final Builder mergeUnknownFields(
4160         final com.google.protobuf.UnknownFieldSet unknownFields) {
4161       return super.mergeUnknownFields(unknownFields);
4162     }
4163 
4164     // @@protoc_insertion_point(builder_scope:google.cloud.aiplatform.v1.InputDataConfig)
4165   }
4166 
4167   // @@protoc_insertion_point(class_scope:google.cloud.aiplatform.v1.InputDataConfig)
4168   private static final com.google.cloud.aiplatform.v1.InputDataConfig DEFAULT_INSTANCE;
4169 
4170   static {
4171     DEFAULT_INSTANCE = new com.google.cloud.aiplatform.v1.InputDataConfig();
4172   }
4173 
getDefaultInstance()4174   public static com.google.cloud.aiplatform.v1.InputDataConfig getDefaultInstance() {
4175     return DEFAULT_INSTANCE;
4176   }
4177 
4178   private static final com.google.protobuf.Parser<InputDataConfig> PARSER =
4179       new com.google.protobuf.AbstractParser<InputDataConfig>() {
4180         @java.lang.Override
4181         public InputDataConfig parsePartialFrom(
4182             com.google.protobuf.CodedInputStream input,
4183             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4184             throws com.google.protobuf.InvalidProtocolBufferException {
4185           Builder builder = newBuilder();
4186           try {
4187             builder.mergeFrom(input, extensionRegistry);
4188           } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4189             throw e.setUnfinishedMessage(builder.buildPartial());
4190           } catch (com.google.protobuf.UninitializedMessageException e) {
4191             throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
4192           } catch (java.io.IOException e) {
4193             throw new com.google.protobuf.InvalidProtocolBufferException(e)
4194                 .setUnfinishedMessage(builder.buildPartial());
4195           }
4196           return builder.buildPartial();
4197         }
4198       };
4199 
parser()4200   public static com.google.protobuf.Parser<InputDataConfig> parser() {
4201     return PARSER;
4202   }
4203 
4204   @java.lang.Override
getParserForType()4205   public com.google.protobuf.Parser<InputDataConfig> getParserForType() {
4206     return PARSER;
4207   }
4208 
4209   @java.lang.Override
getDefaultInstanceForType()4210   public com.google.cloud.aiplatform.v1.InputDataConfig getDefaultInstanceForType() {
4211     return DEFAULT_INSTANCE;
4212   }
4213 }
4214