• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // Generated by the protocol buffer compiler.  DO NOT EDIT!
17 // source: google/cloud/asset/v1/asset_service.proto
18 
19 package com.google.cloud.asset.v1;
20 
21 /**
22  *
23  *
24  * <pre>
25  * A BigQuery destination for exporting assets to.
26  * </pre>
27  *
28  * Protobuf type {@code google.cloud.asset.v1.BigQueryDestination}
29  */
30 public final class BigQueryDestination extends com.google.protobuf.GeneratedMessageV3
31     implements
32     // @@protoc_insertion_point(message_implements:google.cloud.asset.v1.BigQueryDestination)
33     BigQueryDestinationOrBuilder {
34   private static final long serialVersionUID = 0L;
35   // Use BigQueryDestination.newBuilder() to construct.
BigQueryDestination(com.google.protobuf.GeneratedMessageV3.Builder<?> builder)36   private BigQueryDestination(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
37     super(builder);
38   }
39 
BigQueryDestination()40   private BigQueryDestination() {
41     dataset_ = "";
42     table_ = "";
43   }
44 
45   @java.lang.Override
46   @SuppressWarnings({"unused"})
newInstance(UnusedPrivateParameter unused)47   protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
48     return new BigQueryDestination();
49   }
50 
51   @java.lang.Override
getUnknownFields()52   public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
53     return this.unknownFields;
54   }
55 
getDescriptor()56   public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
57     return com.google.cloud.asset.v1.AssetServiceProto
58         .internal_static_google_cloud_asset_v1_BigQueryDestination_descriptor;
59   }
60 
61   @java.lang.Override
62   protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()63       internalGetFieldAccessorTable() {
64     return com.google.cloud.asset.v1.AssetServiceProto
65         .internal_static_google_cloud_asset_v1_BigQueryDestination_fieldAccessorTable
66         .ensureFieldAccessorsInitialized(
67             com.google.cloud.asset.v1.BigQueryDestination.class,
68             com.google.cloud.asset.v1.BigQueryDestination.Builder.class);
69   }
70 
71   public static final int DATASET_FIELD_NUMBER = 1;
72 
73   @SuppressWarnings("serial")
74   private volatile java.lang.Object dataset_ = "";
75   /**
76    *
77    *
78    * <pre>
79    * Required. The BigQuery dataset in format
80    * "projects/projectId/datasets/datasetId", to which the snapshot result
81    * should be exported. If this dataset does not exist, the export call returns
82    * an INVALID_ARGUMENT error. Setting the `contentType` for `exportAssets`
83    * determines the
84    * [schema](/asset-inventory/docs/exporting-to-bigquery#bigquery-schema)
85    * of the BigQuery table. Setting `separateTablesPerAssetType` to `TRUE` also
86    * influences the schema.
87    * </pre>
88    *
89    * <code>string dataset = 1 [(.google.api.field_behavior) = REQUIRED];</code>
90    *
91    * @return The dataset.
92    */
93   @java.lang.Override
getDataset()94   public java.lang.String getDataset() {
95     java.lang.Object ref = dataset_;
96     if (ref instanceof java.lang.String) {
97       return (java.lang.String) ref;
98     } else {
99       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
100       java.lang.String s = bs.toStringUtf8();
101       dataset_ = s;
102       return s;
103     }
104   }
105   /**
106    *
107    *
108    * <pre>
109    * Required. The BigQuery dataset in format
110    * "projects/projectId/datasets/datasetId", to which the snapshot result
111    * should be exported. If this dataset does not exist, the export call returns
112    * an INVALID_ARGUMENT error. Setting the `contentType` for `exportAssets`
113    * determines the
114    * [schema](/asset-inventory/docs/exporting-to-bigquery#bigquery-schema)
115    * of the BigQuery table. Setting `separateTablesPerAssetType` to `TRUE` also
116    * influences the schema.
117    * </pre>
118    *
119    * <code>string dataset = 1 [(.google.api.field_behavior) = REQUIRED];</code>
120    *
121    * @return The bytes for dataset.
122    */
123   @java.lang.Override
getDatasetBytes()124   public com.google.protobuf.ByteString getDatasetBytes() {
125     java.lang.Object ref = dataset_;
126     if (ref instanceof java.lang.String) {
127       com.google.protobuf.ByteString b =
128           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
129       dataset_ = b;
130       return b;
131     } else {
132       return (com.google.protobuf.ByteString) ref;
133     }
134   }
135 
136   public static final int TABLE_FIELD_NUMBER = 2;
137 
138   @SuppressWarnings("serial")
139   private volatile java.lang.Object table_ = "";
140   /**
141    *
142    *
143    * <pre>
144    * Required. The BigQuery table to which the snapshot result should be
145    * written. If this table does not exist, a new table with the given name
146    * will be created.
147    * </pre>
148    *
149    * <code>string table = 2 [(.google.api.field_behavior) = REQUIRED];</code>
150    *
151    * @return The table.
152    */
153   @java.lang.Override
getTable()154   public java.lang.String getTable() {
155     java.lang.Object ref = table_;
156     if (ref instanceof java.lang.String) {
157       return (java.lang.String) ref;
158     } else {
159       com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
160       java.lang.String s = bs.toStringUtf8();
161       table_ = s;
162       return s;
163     }
164   }
165   /**
166    *
167    *
168    * <pre>
169    * Required. The BigQuery table to which the snapshot result should be
170    * written. If this table does not exist, a new table with the given name
171    * will be created.
172    * </pre>
173    *
174    * <code>string table = 2 [(.google.api.field_behavior) = REQUIRED];</code>
175    *
176    * @return The bytes for table.
177    */
178   @java.lang.Override
getTableBytes()179   public com.google.protobuf.ByteString getTableBytes() {
180     java.lang.Object ref = table_;
181     if (ref instanceof java.lang.String) {
182       com.google.protobuf.ByteString b =
183           com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
184       table_ = b;
185       return b;
186     } else {
187       return (com.google.protobuf.ByteString) ref;
188     }
189   }
190 
191   public static final int FORCE_FIELD_NUMBER = 3;
192   private boolean force_ = false;
193   /**
194    *
195    *
196    * <pre>
197    * If the destination table already exists and this flag is `TRUE`, the
198    * table will be overwritten by the contents of assets snapshot. If the flag
199    * is `FALSE` or unset and the destination table already exists, the export
200    * call returns an INVALID_ARGUMEMT error.
201    * </pre>
202    *
203    * <code>bool force = 3;</code>
204    *
205    * @return The force.
206    */
207   @java.lang.Override
getForce()208   public boolean getForce() {
209     return force_;
210   }
211 
212   public static final int PARTITION_SPEC_FIELD_NUMBER = 4;
213   private com.google.cloud.asset.v1.PartitionSpec partitionSpec_;
214   /**
215    *
216    *
217    * <pre>
218    * [partition_spec] determines whether to export to partitioned table(s) and
219    * how to partition the data.
220    * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
221    * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
222    * non-partitioned table(s). [force] will decide whether to overwrite existing
223    * table(s).
224    * If [partition_spec] is specified. First, the snapshot results will be
225    * written to partitioned table(s) with two additional timestamp columns,
226    * readTime and requestTime, one of which will be the partition key. Secondly,
227    * in the case when any destination table already exists, it will first try to
228    * update existing table's schema as necessary by appending additional
229    * columns. Then, if [force] is `TRUE`, the corresponding partition will be
230    * overwritten by the snapshot results (data in different partitions will
231    * remain intact); if [force] is unset or `FALSE`, it will append the data. An
232    * error will be returned if the schema update or data appension fails.
233    * </pre>
234    *
235    * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
236    *
237    * @return Whether the partitionSpec field is set.
238    */
239   @java.lang.Override
hasPartitionSpec()240   public boolean hasPartitionSpec() {
241     return partitionSpec_ != null;
242   }
243   /**
244    *
245    *
246    * <pre>
247    * [partition_spec] determines whether to export to partitioned table(s) and
248    * how to partition the data.
249    * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
250    * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
251    * non-partitioned table(s). [force] will decide whether to overwrite existing
252    * table(s).
253    * If [partition_spec] is specified. First, the snapshot results will be
254    * written to partitioned table(s) with two additional timestamp columns,
255    * readTime and requestTime, one of which will be the partition key. Secondly,
256    * in the case when any destination table already exists, it will first try to
257    * update existing table's schema as necessary by appending additional
258    * columns. Then, if [force] is `TRUE`, the corresponding partition will be
259    * overwritten by the snapshot results (data in different partitions will
260    * remain intact); if [force] is unset or `FALSE`, it will append the data. An
261    * error will be returned if the schema update or data appension fails.
262    * </pre>
263    *
264    * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
265    *
266    * @return The partitionSpec.
267    */
268   @java.lang.Override
getPartitionSpec()269   public com.google.cloud.asset.v1.PartitionSpec getPartitionSpec() {
270     return partitionSpec_ == null
271         ? com.google.cloud.asset.v1.PartitionSpec.getDefaultInstance()
272         : partitionSpec_;
273   }
274   /**
275    *
276    *
277    * <pre>
278    * [partition_spec] determines whether to export to partitioned table(s) and
279    * how to partition the data.
280    * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
281    * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
282    * non-partitioned table(s). [force] will decide whether to overwrite existing
283    * table(s).
284    * If [partition_spec] is specified. First, the snapshot results will be
285    * written to partitioned table(s) with two additional timestamp columns,
286    * readTime and requestTime, one of which will be the partition key. Secondly,
287    * in the case when any destination table already exists, it will first try to
288    * update existing table's schema as necessary by appending additional
289    * columns. Then, if [force] is `TRUE`, the corresponding partition will be
290    * overwritten by the snapshot results (data in different partitions will
291    * remain intact); if [force] is unset or `FALSE`, it will append the data. An
292    * error will be returned if the schema update or data appension fails.
293    * </pre>
294    *
295    * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
296    */
297   @java.lang.Override
getPartitionSpecOrBuilder()298   public com.google.cloud.asset.v1.PartitionSpecOrBuilder getPartitionSpecOrBuilder() {
299     return partitionSpec_ == null
300         ? com.google.cloud.asset.v1.PartitionSpec.getDefaultInstance()
301         : partitionSpec_;
302   }
303 
304   public static final int SEPARATE_TABLES_PER_ASSET_TYPE_FIELD_NUMBER = 5;
305   private boolean separateTablesPerAssetType_ = false;
306   /**
307    *
308    *
309    * <pre>
310    * If this flag is `TRUE`, the snapshot results will be written to one or
311    * multiple tables, each of which contains results of one asset type. The
312    * [force] and [partition_spec] fields will apply to each of them.
313    * Field [table] will be concatenated with "_" and the asset type names (see
314    * https://cloud.google.com/asset-inventory/docs/supported-asset-types for
315    * supported asset types) to construct per-asset-type table names, in which
316    * all non-alphanumeric characters like "." and "/" will be substituted by
317    * "_". Example: if field [table] is "mytable" and snapshot results
318    * contain "storage.googleapis.com/Bucket" assets, the corresponding table
319    * name will be "mytable_storage_googleapis_com_Bucket". If any of these
320    * tables does not exist, a new table with the concatenated name will be
321    * created.
322    * When [content_type] in the ExportAssetsRequest is `RESOURCE`, the schema of
323    * each table will include RECORD-type columns mapped to the nested fields in
324    * the Asset.resource.data field of that asset type (up to the 15 nested level
325    * BigQuery supports
326    * (https://cloud.google.com/bigquery/docs/nested-repeated#limitations)). The
327    * fields in &gt;15 nested levels will be stored in JSON format string as a child
328    * column of its parent RECORD column.
329    * If error occurs when exporting to any table, the whole export call will
330    * return an error but the export results that already succeed will persist.
331    * Example: if exporting to table_type_A succeeds when exporting to
332    * table_type_B fails during one export call, the results in table_type_A will
333    * persist and there will not be partial results persisting in a table.
334    * </pre>
335    *
336    * <code>bool separate_tables_per_asset_type = 5;</code>
337    *
338    * @return The separateTablesPerAssetType.
339    */
340   @java.lang.Override
getSeparateTablesPerAssetType()341   public boolean getSeparateTablesPerAssetType() {
342     return separateTablesPerAssetType_;
343   }
344 
345   private byte memoizedIsInitialized = -1;
346 
347   @java.lang.Override
isInitialized()348   public final boolean isInitialized() {
349     byte isInitialized = memoizedIsInitialized;
350     if (isInitialized == 1) return true;
351     if (isInitialized == 0) return false;
352 
353     memoizedIsInitialized = 1;
354     return true;
355   }
356 
357   @java.lang.Override
writeTo(com.google.protobuf.CodedOutputStream output)358   public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
359     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(dataset_)) {
360       com.google.protobuf.GeneratedMessageV3.writeString(output, 1, dataset_);
361     }
362     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(table_)) {
363       com.google.protobuf.GeneratedMessageV3.writeString(output, 2, table_);
364     }
365     if (force_ != false) {
366       output.writeBool(3, force_);
367     }
368     if (partitionSpec_ != null) {
369       output.writeMessage(4, getPartitionSpec());
370     }
371     if (separateTablesPerAssetType_ != false) {
372       output.writeBool(5, separateTablesPerAssetType_);
373     }
374     getUnknownFields().writeTo(output);
375   }
376 
377   @java.lang.Override
getSerializedSize()378   public int getSerializedSize() {
379     int size = memoizedSize;
380     if (size != -1) return size;
381 
382     size = 0;
383     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(dataset_)) {
384       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, dataset_);
385     }
386     if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(table_)) {
387       size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, table_);
388     }
389     if (force_ != false) {
390       size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, force_);
391     }
392     if (partitionSpec_ != null) {
393       size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getPartitionSpec());
394     }
395     if (separateTablesPerAssetType_ != false) {
396       size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, separateTablesPerAssetType_);
397     }
398     size += getUnknownFields().getSerializedSize();
399     memoizedSize = size;
400     return size;
401   }
402 
403   @java.lang.Override
equals(final java.lang.Object obj)404   public boolean equals(final java.lang.Object obj) {
405     if (obj == this) {
406       return true;
407     }
408     if (!(obj instanceof com.google.cloud.asset.v1.BigQueryDestination)) {
409       return super.equals(obj);
410     }
411     com.google.cloud.asset.v1.BigQueryDestination other =
412         (com.google.cloud.asset.v1.BigQueryDestination) obj;
413 
414     if (!getDataset().equals(other.getDataset())) return false;
415     if (!getTable().equals(other.getTable())) return false;
416     if (getForce() != other.getForce()) return false;
417     if (hasPartitionSpec() != other.hasPartitionSpec()) return false;
418     if (hasPartitionSpec()) {
419       if (!getPartitionSpec().equals(other.getPartitionSpec())) return false;
420     }
421     if (getSeparateTablesPerAssetType() != other.getSeparateTablesPerAssetType()) return false;
422     if (!getUnknownFields().equals(other.getUnknownFields())) return false;
423     return true;
424   }
425 
426   @java.lang.Override
hashCode()427   public int hashCode() {
428     if (memoizedHashCode != 0) {
429       return memoizedHashCode;
430     }
431     int hash = 41;
432     hash = (19 * hash) + getDescriptor().hashCode();
433     hash = (37 * hash) + DATASET_FIELD_NUMBER;
434     hash = (53 * hash) + getDataset().hashCode();
435     hash = (37 * hash) + TABLE_FIELD_NUMBER;
436     hash = (53 * hash) + getTable().hashCode();
437     hash = (37 * hash) + FORCE_FIELD_NUMBER;
438     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getForce());
439     if (hasPartitionSpec()) {
440       hash = (37 * hash) + PARTITION_SPEC_FIELD_NUMBER;
441       hash = (53 * hash) + getPartitionSpec().hashCode();
442     }
443     hash = (37 * hash) + SEPARATE_TABLES_PER_ASSET_TYPE_FIELD_NUMBER;
444     hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSeparateTablesPerAssetType());
445     hash = (29 * hash) + getUnknownFields().hashCode();
446     memoizedHashCode = hash;
447     return hash;
448   }
449 
parseFrom(java.nio.ByteBuffer data)450   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(java.nio.ByteBuffer data)
451       throws com.google.protobuf.InvalidProtocolBufferException {
452     return PARSER.parseFrom(data);
453   }
454 
parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)455   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(
456       java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
457       throws com.google.protobuf.InvalidProtocolBufferException {
458     return PARSER.parseFrom(data, extensionRegistry);
459   }
460 
parseFrom( com.google.protobuf.ByteString data)461   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(
462       com.google.protobuf.ByteString data)
463       throws com.google.protobuf.InvalidProtocolBufferException {
464     return PARSER.parseFrom(data);
465   }
466 
parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)467   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(
468       com.google.protobuf.ByteString data,
469       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
470       throws com.google.protobuf.InvalidProtocolBufferException {
471     return PARSER.parseFrom(data, extensionRegistry);
472   }
473 
parseFrom(byte[] data)474   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(byte[] data)
475       throws com.google.protobuf.InvalidProtocolBufferException {
476     return PARSER.parseFrom(data);
477   }
478 
parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)479   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(
480       byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
481       throws com.google.protobuf.InvalidProtocolBufferException {
482     return PARSER.parseFrom(data, extensionRegistry);
483   }
484 
parseFrom(java.io.InputStream input)485   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(java.io.InputStream input)
486       throws java.io.IOException {
487     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
488   }
489 
parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)490   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(
491       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
492       throws java.io.IOException {
493     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
494         PARSER, input, extensionRegistry);
495   }
496 
parseDelimitedFrom( java.io.InputStream input)497   public static com.google.cloud.asset.v1.BigQueryDestination parseDelimitedFrom(
498       java.io.InputStream input) throws java.io.IOException {
499     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
500   }
501 
parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)502   public static com.google.cloud.asset.v1.BigQueryDestination parseDelimitedFrom(
503       java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
504       throws java.io.IOException {
505     return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
506         PARSER, input, extensionRegistry);
507   }
508 
parseFrom( com.google.protobuf.CodedInputStream input)509   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(
510       com.google.protobuf.CodedInputStream input) throws java.io.IOException {
511     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
512   }
513 
parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)514   public static com.google.cloud.asset.v1.BigQueryDestination parseFrom(
515       com.google.protobuf.CodedInputStream input,
516       com.google.protobuf.ExtensionRegistryLite extensionRegistry)
517       throws java.io.IOException {
518     return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
519         PARSER, input, extensionRegistry);
520   }
521 
522   @java.lang.Override
newBuilderForType()523   public Builder newBuilderForType() {
524     return newBuilder();
525   }
526 
newBuilder()527   public static Builder newBuilder() {
528     return DEFAULT_INSTANCE.toBuilder();
529   }
530 
newBuilder(com.google.cloud.asset.v1.BigQueryDestination prototype)531   public static Builder newBuilder(com.google.cloud.asset.v1.BigQueryDestination prototype) {
532     return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
533   }
534 
535   @java.lang.Override
toBuilder()536   public Builder toBuilder() {
537     return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
538   }
539 
540   @java.lang.Override
newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)541   protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
542     Builder builder = new Builder(parent);
543     return builder;
544   }
545   /**
546    *
547    *
548    * <pre>
549    * A BigQuery destination for exporting assets to.
550    * </pre>
551    *
552    * Protobuf type {@code google.cloud.asset.v1.BigQueryDestination}
553    */
554   public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
555       implements
556       // @@protoc_insertion_point(builder_implements:google.cloud.asset.v1.BigQueryDestination)
557       com.google.cloud.asset.v1.BigQueryDestinationOrBuilder {
getDescriptor()558     public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
559       return com.google.cloud.asset.v1.AssetServiceProto
560           .internal_static_google_cloud_asset_v1_BigQueryDestination_descriptor;
561     }
562 
563     @java.lang.Override
564     protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable()565         internalGetFieldAccessorTable() {
566       return com.google.cloud.asset.v1.AssetServiceProto
567           .internal_static_google_cloud_asset_v1_BigQueryDestination_fieldAccessorTable
568           .ensureFieldAccessorsInitialized(
569               com.google.cloud.asset.v1.BigQueryDestination.class,
570               com.google.cloud.asset.v1.BigQueryDestination.Builder.class);
571     }
572 
573     // Construct using com.google.cloud.asset.v1.BigQueryDestination.newBuilder()
Builder()574     private Builder() {}
575 
Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent)576     private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
577       super(parent);
578     }
579 
580     @java.lang.Override
clear()581     public Builder clear() {
582       super.clear();
583       bitField0_ = 0;
584       dataset_ = "";
585       table_ = "";
586       force_ = false;
587       partitionSpec_ = null;
588       if (partitionSpecBuilder_ != null) {
589         partitionSpecBuilder_.dispose();
590         partitionSpecBuilder_ = null;
591       }
592       separateTablesPerAssetType_ = false;
593       return this;
594     }
595 
596     @java.lang.Override
getDescriptorForType()597     public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
598       return com.google.cloud.asset.v1.AssetServiceProto
599           .internal_static_google_cloud_asset_v1_BigQueryDestination_descriptor;
600     }
601 
602     @java.lang.Override
getDefaultInstanceForType()603     public com.google.cloud.asset.v1.BigQueryDestination getDefaultInstanceForType() {
604       return com.google.cloud.asset.v1.BigQueryDestination.getDefaultInstance();
605     }
606 
607     @java.lang.Override
build()608     public com.google.cloud.asset.v1.BigQueryDestination build() {
609       com.google.cloud.asset.v1.BigQueryDestination result = buildPartial();
610       if (!result.isInitialized()) {
611         throw newUninitializedMessageException(result);
612       }
613       return result;
614     }
615 
616     @java.lang.Override
buildPartial()617     public com.google.cloud.asset.v1.BigQueryDestination buildPartial() {
618       com.google.cloud.asset.v1.BigQueryDestination result =
619           new com.google.cloud.asset.v1.BigQueryDestination(this);
620       if (bitField0_ != 0) {
621         buildPartial0(result);
622       }
623       onBuilt();
624       return result;
625     }
626 
buildPartial0(com.google.cloud.asset.v1.BigQueryDestination result)627     private void buildPartial0(com.google.cloud.asset.v1.BigQueryDestination result) {
628       int from_bitField0_ = bitField0_;
629       if (((from_bitField0_ & 0x00000001) != 0)) {
630         result.dataset_ = dataset_;
631       }
632       if (((from_bitField0_ & 0x00000002) != 0)) {
633         result.table_ = table_;
634       }
635       if (((from_bitField0_ & 0x00000004) != 0)) {
636         result.force_ = force_;
637       }
638       if (((from_bitField0_ & 0x00000008) != 0)) {
639         result.partitionSpec_ =
640             partitionSpecBuilder_ == null ? partitionSpec_ : partitionSpecBuilder_.build();
641       }
642       if (((from_bitField0_ & 0x00000010) != 0)) {
643         result.separateTablesPerAssetType_ = separateTablesPerAssetType_;
644       }
645     }
646 
647     @java.lang.Override
clone()648     public Builder clone() {
649       return super.clone();
650     }
651 
652     @java.lang.Override
setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)653     public Builder setField(
654         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
655       return super.setField(field, value);
656     }
657 
658     @java.lang.Override
clearField(com.google.protobuf.Descriptors.FieldDescriptor field)659     public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
660       return super.clearField(field);
661     }
662 
663     @java.lang.Override
clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)664     public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
665       return super.clearOneof(oneof);
666     }
667 
668     @java.lang.Override
setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value)669     public Builder setRepeatedField(
670         com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
671       return super.setRepeatedField(field, index, value);
672     }
673 
674     @java.lang.Override
addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value)675     public Builder addRepeatedField(
676         com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
677       return super.addRepeatedField(field, value);
678     }
679 
680     @java.lang.Override
mergeFrom(com.google.protobuf.Message other)681     public Builder mergeFrom(com.google.protobuf.Message other) {
682       if (other instanceof com.google.cloud.asset.v1.BigQueryDestination) {
683         return mergeFrom((com.google.cloud.asset.v1.BigQueryDestination) other);
684       } else {
685         super.mergeFrom(other);
686         return this;
687       }
688     }
689 
mergeFrom(com.google.cloud.asset.v1.BigQueryDestination other)690     public Builder mergeFrom(com.google.cloud.asset.v1.BigQueryDestination other) {
691       if (other == com.google.cloud.asset.v1.BigQueryDestination.getDefaultInstance()) return this;
692       if (!other.getDataset().isEmpty()) {
693         dataset_ = other.dataset_;
694         bitField0_ |= 0x00000001;
695         onChanged();
696       }
697       if (!other.getTable().isEmpty()) {
698         table_ = other.table_;
699         bitField0_ |= 0x00000002;
700         onChanged();
701       }
702       if (other.getForce() != false) {
703         setForce(other.getForce());
704       }
705       if (other.hasPartitionSpec()) {
706         mergePartitionSpec(other.getPartitionSpec());
707       }
708       if (other.getSeparateTablesPerAssetType() != false) {
709         setSeparateTablesPerAssetType(other.getSeparateTablesPerAssetType());
710       }
711       this.mergeUnknownFields(other.getUnknownFields());
712       onChanged();
713       return this;
714     }
715 
716     @java.lang.Override
isInitialized()717     public final boolean isInitialized() {
718       return true;
719     }
720 
721     @java.lang.Override
mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)722     public Builder mergeFrom(
723         com.google.protobuf.CodedInputStream input,
724         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
725         throws java.io.IOException {
726       if (extensionRegistry == null) {
727         throw new java.lang.NullPointerException();
728       }
729       try {
730         boolean done = false;
731         while (!done) {
732           int tag = input.readTag();
733           switch (tag) {
734             case 0:
735               done = true;
736               break;
737             case 10:
738               {
739                 dataset_ = input.readStringRequireUtf8();
740                 bitField0_ |= 0x00000001;
741                 break;
742               } // case 10
743             case 18:
744               {
745                 table_ = input.readStringRequireUtf8();
746                 bitField0_ |= 0x00000002;
747                 break;
748               } // case 18
749             case 24:
750               {
751                 force_ = input.readBool();
752                 bitField0_ |= 0x00000004;
753                 break;
754               } // case 24
755             case 34:
756               {
757                 input.readMessage(getPartitionSpecFieldBuilder().getBuilder(), extensionRegistry);
758                 bitField0_ |= 0x00000008;
759                 break;
760               } // case 34
761             case 40:
762               {
763                 separateTablesPerAssetType_ = input.readBool();
764                 bitField0_ |= 0x00000010;
765                 break;
766               } // case 40
767             default:
768               {
769                 if (!super.parseUnknownField(input, extensionRegistry, tag)) {
770                   done = true; // was an endgroup tag
771                 }
772                 break;
773               } // default:
774           } // switch (tag)
775         } // while (!done)
776       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
777         throw e.unwrapIOException();
778       } finally {
779         onChanged();
780       } // finally
781       return this;
782     }
783 
784     private int bitField0_;
785 
786     private java.lang.Object dataset_ = "";
787     /**
788      *
789      *
790      * <pre>
791      * Required. The BigQuery dataset in format
792      * "projects/projectId/datasets/datasetId", to which the snapshot result
793      * should be exported. If this dataset does not exist, the export call returns
794      * an INVALID_ARGUMENT error. Setting the `contentType` for `exportAssets`
795      * determines the
796      * [schema](/asset-inventory/docs/exporting-to-bigquery#bigquery-schema)
797      * of the BigQuery table. Setting `separateTablesPerAssetType` to `TRUE` also
798      * influences the schema.
799      * </pre>
800      *
801      * <code>string dataset = 1 [(.google.api.field_behavior) = REQUIRED];</code>
802      *
803      * @return The dataset.
804      */
getDataset()805     public java.lang.String getDataset() {
806       java.lang.Object ref = dataset_;
807       if (!(ref instanceof java.lang.String)) {
808         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
809         java.lang.String s = bs.toStringUtf8();
810         dataset_ = s;
811         return s;
812       } else {
813         return (java.lang.String) ref;
814       }
815     }
816     /**
817      *
818      *
819      * <pre>
820      * Required. The BigQuery dataset in format
821      * "projects/projectId/datasets/datasetId", to which the snapshot result
822      * should be exported. If this dataset does not exist, the export call returns
823      * an INVALID_ARGUMENT error. Setting the `contentType` for `exportAssets`
824      * determines the
825      * [schema](/asset-inventory/docs/exporting-to-bigquery#bigquery-schema)
826      * of the BigQuery table. Setting `separateTablesPerAssetType` to `TRUE` also
827      * influences the schema.
828      * </pre>
829      *
830      * <code>string dataset = 1 [(.google.api.field_behavior) = REQUIRED];</code>
831      *
832      * @return The bytes for dataset.
833      */
getDatasetBytes()834     public com.google.protobuf.ByteString getDatasetBytes() {
835       java.lang.Object ref = dataset_;
836       if (ref instanceof String) {
837         com.google.protobuf.ByteString b =
838             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
839         dataset_ = b;
840         return b;
841       } else {
842         return (com.google.protobuf.ByteString) ref;
843       }
844     }
845     /**
846      *
847      *
848      * <pre>
849      * Required. The BigQuery dataset in format
850      * "projects/projectId/datasets/datasetId", to which the snapshot result
851      * should be exported. If this dataset does not exist, the export call returns
852      * an INVALID_ARGUMENT error. Setting the `contentType` for `exportAssets`
853      * determines the
854      * [schema](/asset-inventory/docs/exporting-to-bigquery#bigquery-schema)
855      * of the BigQuery table. Setting `separateTablesPerAssetType` to `TRUE` also
856      * influences the schema.
857      * </pre>
858      *
859      * <code>string dataset = 1 [(.google.api.field_behavior) = REQUIRED];</code>
860      *
861      * @param value The dataset to set.
862      * @return This builder for chaining.
863      */
setDataset(java.lang.String value)864     public Builder setDataset(java.lang.String value) {
865       if (value == null) {
866         throw new NullPointerException();
867       }
868       dataset_ = value;
869       bitField0_ |= 0x00000001;
870       onChanged();
871       return this;
872     }
873     /**
874      *
875      *
876      * <pre>
877      * Required. The BigQuery dataset in format
878      * "projects/projectId/datasets/datasetId", to which the snapshot result
879      * should be exported. If this dataset does not exist, the export call returns
880      * an INVALID_ARGUMENT error. Setting the `contentType` for `exportAssets`
881      * determines the
882      * [schema](/asset-inventory/docs/exporting-to-bigquery#bigquery-schema)
883      * of the BigQuery table. Setting `separateTablesPerAssetType` to `TRUE` also
884      * influences the schema.
885      * </pre>
886      *
887      * <code>string dataset = 1 [(.google.api.field_behavior) = REQUIRED];</code>
888      *
889      * @return This builder for chaining.
890      */
clearDataset()891     public Builder clearDataset() {
892       dataset_ = getDefaultInstance().getDataset();
893       bitField0_ = (bitField0_ & ~0x00000001);
894       onChanged();
895       return this;
896     }
897     /**
898      *
899      *
900      * <pre>
901      * Required. The BigQuery dataset in format
902      * "projects/projectId/datasets/datasetId", to which the snapshot result
903      * should be exported. If this dataset does not exist, the export call returns
904      * an INVALID_ARGUMENT error. Setting the `contentType` for `exportAssets`
905      * determines the
906      * [schema](/asset-inventory/docs/exporting-to-bigquery#bigquery-schema)
907      * of the BigQuery table. Setting `separateTablesPerAssetType` to `TRUE` also
908      * influences the schema.
909      * </pre>
910      *
911      * <code>string dataset = 1 [(.google.api.field_behavior) = REQUIRED];</code>
912      *
913      * @param value The bytes for dataset to set.
914      * @return This builder for chaining.
915      */
setDatasetBytes(com.google.protobuf.ByteString value)916     public Builder setDatasetBytes(com.google.protobuf.ByteString value) {
917       if (value == null) {
918         throw new NullPointerException();
919       }
920       checkByteStringIsUtf8(value);
921       dataset_ = value;
922       bitField0_ |= 0x00000001;
923       onChanged();
924       return this;
925     }
926 
927     private java.lang.Object table_ = "";
928     /**
929      *
930      *
931      * <pre>
932      * Required. The BigQuery table to which the snapshot result should be
933      * written. If this table does not exist, a new table with the given name
934      * will be created.
935      * </pre>
936      *
937      * <code>string table = 2 [(.google.api.field_behavior) = REQUIRED];</code>
938      *
939      * @return The table.
940      */
getTable()941     public java.lang.String getTable() {
942       java.lang.Object ref = table_;
943       if (!(ref instanceof java.lang.String)) {
944         com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
945         java.lang.String s = bs.toStringUtf8();
946         table_ = s;
947         return s;
948       } else {
949         return (java.lang.String) ref;
950       }
951     }
952     /**
953      *
954      *
955      * <pre>
956      * Required. The BigQuery table to which the snapshot result should be
957      * written. If this table does not exist, a new table with the given name
958      * will be created.
959      * </pre>
960      *
961      * <code>string table = 2 [(.google.api.field_behavior) = REQUIRED];</code>
962      *
963      * @return The bytes for table.
964      */
getTableBytes()965     public com.google.protobuf.ByteString getTableBytes() {
966       java.lang.Object ref = table_;
967       if (ref instanceof String) {
968         com.google.protobuf.ByteString b =
969             com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
970         table_ = b;
971         return b;
972       } else {
973         return (com.google.protobuf.ByteString) ref;
974       }
975     }
976     /**
977      *
978      *
979      * <pre>
980      * Required. The BigQuery table to which the snapshot result should be
981      * written. If this table does not exist, a new table with the given name
982      * will be created.
983      * </pre>
984      *
985      * <code>string table = 2 [(.google.api.field_behavior) = REQUIRED];</code>
986      *
987      * @param value The table to set.
988      * @return This builder for chaining.
989      */
setTable(java.lang.String value)990     public Builder setTable(java.lang.String value) {
991       if (value == null) {
992         throw new NullPointerException();
993       }
994       table_ = value;
995       bitField0_ |= 0x00000002;
996       onChanged();
997       return this;
998     }
999     /**
1000      *
1001      *
1002      * <pre>
1003      * Required. The BigQuery table to which the snapshot result should be
1004      * written. If this table does not exist, a new table with the given name
1005      * will be created.
1006      * </pre>
1007      *
1008      * <code>string table = 2 [(.google.api.field_behavior) = REQUIRED];</code>
1009      *
1010      * @return This builder for chaining.
1011      */
clearTable()1012     public Builder clearTable() {
1013       table_ = getDefaultInstance().getTable();
1014       bitField0_ = (bitField0_ & ~0x00000002);
1015       onChanged();
1016       return this;
1017     }
1018     /**
1019      *
1020      *
1021      * <pre>
1022      * Required. The BigQuery table to which the snapshot result should be
1023      * written. If this table does not exist, a new table with the given name
1024      * will be created.
1025      * </pre>
1026      *
1027      * <code>string table = 2 [(.google.api.field_behavior) = REQUIRED];</code>
1028      *
1029      * @param value The bytes for table to set.
1030      * @return This builder for chaining.
1031      */
setTableBytes(com.google.protobuf.ByteString value)1032     public Builder setTableBytes(com.google.protobuf.ByteString value) {
1033       if (value == null) {
1034         throw new NullPointerException();
1035       }
1036       checkByteStringIsUtf8(value);
1037       table_ = value;
1038       bitField0_ |= 0x00000002;
1039       onChanged();
1040       return this;
1041     }
1042 
1043     private boolean force_;
1044     /**
1045      *
1046      *
1047      * <pre>
1048      * If the destination table already exists and this flag is `TRUE`, the
1049      * table will be overwritten by the contents of assets snapshot. If the flag
1050      * is `FALSE` or unset and the destination table already exists, the export
1051      * call returns an INVALID_ARGUMEMT error.
1052      * </pre>
1053      *
1054      * <code>bool force = 3;</code>
1055      *
1056      * @return The force.
1057      */
1058     @java.lang.Override
getForce()1059     public boolean getForce() {
1060       return force_;
1061     }
1062     /**
1063      *
1064      *
1065      * <pre>
1066      * If the destination table already exists and this flag is `TRUE`, the
1067      * table will be overwritten by the contents of assets snapshot. If the flag
1068      * is `FALSE` or unset and the destination table already exists, the export
1069      * call returns an INVALID_ARGUMEMT error.
1070      * </pre>
1071      *
1072      * <code>bool force = 3;</code>
1073      *
1074      * @param value The force to set.
1075      * @return This builder for chaining.
1076      */
setForce(boolean value)1077     public Builder setForce(boolean value) {
1078 
1079       force_ = value;
1080       bitField0_ |= 0x00000004;
1081       onChanged();
1082       return this;
1083     }
1084     /**
1085      *
1086      *
1087      * <pre>
1088      * If the destination table already exists and this flag is `TRUE`, the
1089      * table will be overwritten by the contents of assets snapshot. If the flag
1090      * is `FALSE` or unset and the destination table already exists, the export
1091      * call returns an INVALID_ARGUMEMT error.
1092      * </pre>
1093      *
1094      * <code>bool force = 3;</code>
1095      *
1096      * @return This builder for chaining.
1097      */
clearForce()1098     public Builder clearForce() {
1099       bitField0_ = (bitField0_ & ~0x00000004);
1100       force_ = false;
1101       onChanged();
1102       return this;
1103     }
1104 
1105     private com.google.cloud.asset.v1.PartitionSpec partitionSpec_;
1106     private com.google.protobuf.SingleFieldBuilderV3<
1107             com.google.cloud.asset.v1.PartitionSpec,
1108             com.google.cloud.asset.v1.PartitionSpec.Builder,
1109             com.google.cloud.asset.v1.PartitionSpecOrBuilder>
1110         partitionSpecBuilder_;
1111     /**
1112      *
1113      *
1114      * <pre>
1115      * [partition_spec] determines whether to export to partitioned table(s) and
1116      * how to partition the data.
1117      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1118      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1119      * non-partitioned table(s). [force] will decide whether to overwrite existing
1120      * table(s).
1121      * If [partition_spec] is specified. First, the snapshot results will be
1122      * written to partitioned table(s) with two additional timestamp columns,
1123      * readTime and requestTime, one of which will be the partition key. Secondly,
1124      * in the case when any destination table already exists, it will first try to
1125      * update existing table's schema as necessary by appending additional
1126      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1127      * overwritten by the snapshot results (data in different partitions will
1128      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1129      * error will be returned if the schema update or data appension fails.
1130      * </pre>
1131      *
1132      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1133      *
1134      * @return Whether the partitionSpec field is set.
1135      */
hasPartitionSpec()1136     public boolean hasPartitionSpec() {
1137       return ((bitField0_ & 0x00000008) != 0);
1138     }
1139     /**
1140      *
1141      *
1142      * <pre>
1143      * [partition_spec] determines whether to export to partitioned table(s) and
1144      * how to partition the data.
1145      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1146      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1147      * non-partitioned table(s). [force] will decide whether to overwrite existing
1148      * table(s).
1149      * If [partition_spec] is specified. First, the snapshot results will be
1150      * written to partitioned table(s) with two additional timestamp columns,
1151      * readTime and requestTime, one of which will be the partition key. Secondly,
1152      * in the case when any destination table already exists, it will first try to
1153      * update existing table's schema as necessary by appending additional
1154      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1155      * overwritten by the snapshot results (data in different partitions will
1156      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1157      * error will be returned if the schema update or data appension fails.
1158      * </pre>
1159      *
1160      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1161      *
1162      * @return The partitionSpec.
1163      */
getPartitionSpec()1164     public com.google.cloud.asset.v1.PartitionSpec getPartitionSpec() {
1165       if (partitionSpecBuilder_ == null) {
1166         return partitionSpec_ == null
1167             ? com.google.cloud.asset.v1.PartitionSpec.getDefaultInstance()
1168             : partitionSpec_;
1169       } else {
1170         return partitionSpecBuilder_.getMessage();
1171       }
1172     }
1173     /**
1174      *
1175      *
1176      * <pre>
1177      * [partition_spec] determines whether to export to partitioned table(s) and
1178      * how to partition the data.
1179      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1180      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1181      * non-partitioned table(s). [force] will decide whether to overwrite existing
1182      * table(s).
1183      * If [partition_spec] is specified. First, the snapshot results will be
1184      * written to partitioned table(s) with two additional timestamp columns,
1185      * readTime and requestTime, one of which will be the partition key. Secondly,
1186      * in the case when any destination table already exists, it will first try to
1187      * update existing table's schema as necessary by appending additional
1188      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1189      * overwritten by the snapshot results (data in different partitions will
1190      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1191      * error will be returned if the schema update or data appension fails.
1192      * </pre>
1193      *
1194      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1195      */
setPartitionSpec(com.google.cloud.asset.v1.PartitionSpec value)1196     public Builder setPartitionSpec(com.google.cloud.asset.v1.PartitionSpec value) {
1197       if (partitionSpecBuilder_ == null) {
1198         if (value == null) {
1199           throw new NullPointerException();
1200         }
1201         partitionSpec_ = value;
1202       } else {
1203         partitionSpecBuilder_.setMessage(value);
1204       }
1205       bitField0_ |= 0x00000008;
1206       onChanged();
1207       return this;
1208     }
1209     /**
1210      *
1211      *
1212      * <pre>
1213      * [partition_spec] determines whether to export to partitioned table(s) and
1214      * how to partition the data.
1215      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1216      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1217      * non-partitioned table(s). [force] will decide whether to overwrite existing
1218      * table(s).
1219      * If [partition_spec] is specified. First, the snapshot results will be
1220      * written to partitioned table(s) with two additional timestamp columns,
1221      * readTime and requestTime, one of which will be the partition key. Secondly,
1222      * in the case when any destination table already exists, it will first try to
1223      * update existing table's schema as necessary by appending additional
1224      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1225      * overwritten by the snapshot results (data in different partitions will
1226      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1227      * error will be returned if the schema update or data appension fails.
1228      * </pre>
1229      *
1230      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1231      */
setPartitionSpec( com.google.cloud.asset.v1.PartitionSpec.Builder builderForValue)1232     public Builder setPartitionSpec(
1233         com.google.cloud.asset.v1.PartitionSpec.Builder builderForValue) {
1234       if (partitionSpecBuilder_ == null) {
1235         partitionSpec_ = builderForValue.build();
1236       } else {
1237         partitionSpecBuilder_.setMessage(builderForValue.build());
1238       }
1239       bitField0_ |= 0x00000008;
1240       onChanged();
1241       return this;
1242     }
1243     /**
1244      *
1245      *
1246      * <pre>
1247      * [partition_spec] determines whether to export to partitioned table(s) and
1248      * how to partition the data.
1249      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1250      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1251      * non-partitioned table(s). [force] will decide whether to overwrite existing
1252      * table(s).
1253      * If [partition_spec] is specified. First, the snapshot results will be
1254      * written to partitioned table(s) with two additional timestamp columns,
1255      * readTime and requestTime, one of which will be the partition key. Secondly,
1256      * in the case when any destination table already exists, it will first try to
1257      * update existing table's schema as necessary by appending additional
1258      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1259      * overwritten by the snapshot results (data in different partitions will
1260      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1261      * error will be returned if the schema update or data appension fails.
1262      * </pre>
1263      *
1264      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1265      */
mergePartitionSpec(com.google.cloud.asset.v1.PartitionSpec value)1266     public Builder mergePartitionSpec(com.google.cloud.asset.v1.PartitionSpec value) {
1267       if (partitionSpecBuilder_ == null) {
1268         if (((bitField0_ & 0x00000008) != 0)
1269             && partitionSpec_ != null
1270             && partitionSpec_ != com.google.cloud.asset.v1.PartitionSpec.getDefaultInstance()) {
1271           getPartitionSpecBuilder().mergeFrom(value);
1272         } else {
1273           partitionSpec_ = value;
1274         }
1275       } else {
1276         partitionSpecBuilder_.mergeFrom(value);
1277       }
1278       bitField0_ |= 0x00000008;
1279       onChanged();
1280       return this;
1281     }
1282     /**
1283      *
1284      *
1285      * <pre>
1286      * [partition_spec] determines whether to export to partitioned table(s) and
1287      * how to partition the data.
1288      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1289      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1290      * non-partitioned table(s). [force] will decide whether to overwrite existing
1291      * table(s).
1292      * If [partition_spec] is specified. First, the snapshot results will be
1293      * written to partitioned table(s) with two additional timestamp columns,
1294      * readTime and requestTime, one of which will be the partition key. Secondly,
1295      * in the case when any destination table already exists, it will first try to
1296      * update existing table's schema as necessary by appending additional
1297      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1298      * overwritten by the snapshot results (data in different partitions will
1299      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1300      * error will be returned if the schema update or data appension fails.
1301      * </pre>
1302      *
1303      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1304      */
clearPartitionSpec()1305     public Builder clearPartitionSpec() {
1306       bitField0_ = (bitField0_ & ~0x00000008);
1307       partitionSpec_ = null;
1308       if (partitionSpecBuilder_ != null) {
1309         partitionSpecBuilder_.dispose();
1310         partitionSpecBuilder_ = null;
1311       }
1312       onChanged();
1313       return this;
1314     }
1315     /**
1316      *
1317      *
1318      * <pre>
1319      * [partition_spec] determines whether to export to partitioned table(s) and
1320      * how to partition the data.
1321      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1322      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1323      * non-partitioned table(s). [force] will decide whether to overwrite existing
1324      * table(s).
1325      * If [partition_spec] is specified. First, the snapshot results will be
1326      * written to partitioned table(s) with two additional timestamp columns,
1327      * readTime and requestTime, one of which will be the partition key. Secondly,
1328      * in the case when any destination table already exists, it will first try to
1329      * update existing table's schema as necessary by appending additional
1330      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1331      * overwritten by the snapshot results (data in different partitions will
1332      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1333      * error will be returned if the schema update or data appension fails.
1334      * </pre>
1335      *
1336      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1337      */
getPartitionSpecBuilder()1338     public com.google.cloud.asset.v1.PartitionSpec.Builder getPartitionSpecBuilder() {
1339       bitField0_ |= 0x00000008;
1340       onChanged();
1341       return getPartitionSpecFieldBuilder().getBuilder();
1342     }
1343     /**
1344      *
1345      *
1346      * <pre>
1347      * [partition_spec] determines whether to export to partitioned table(s) and
1348      * how to partition the data.
1349      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1350      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1351      * non-partitioned table(s). [force] will decide whether to overwrite existing
1352      * table(s).
1353      * If [partition_spec] is specified. First, the snapshot results will be
1354      * written to partitioned table(s) with two additional timestamp columns,
1355      * readTime and requestTime, one of which will be the partition key. Secondly,
1356      * in the case when any destination table already exists, it will first try to
1357      * update existing table's schema as necessary by appending additional
1358      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1359      * overwritten by the snapshot results (data in different partitions will
1360      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1361      * error will be returned if the schema update or data appension fails.
1362      * </pre>
1363      *
1364      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1365      */
getPartitionSpecOrBuilder()1366     public com.google.cloud.asset.v1.PartitionSpecOrBuilder getPartitionSpecOrBuilder() {
1367       if (partitionSpecBuilder_ != null) {
1368         return partitionSpecBuilder_.getMessageOrBuilder();
1369       } else {
1370         return partitionSpec_ == null
1371             ? com.google.cloud.asset.v1.PartitionSpec.getDefaultInstance()
1372             : partitionSpec_;
1373       }
1374     }
1375     /**
1376      *
1377      *
1378      * <pre>
1379      * [partition_spec] determines whether to export to partitioned table(s) and
1380      * how to partition the data.
1381      * If [partition_spec] is unset or [partition_spec.partition_key] is unset or
1382      * `PARTITION_KEY_UNSPECIFIED`, the snapshot results will be exported to
1383      * non-partitioned table(s). [force] will decide whether to overwrite existing
1384      * table(s).
1385      * If [partition_spec] is specified. First, the snapshot results will be
1386      * written to partitioned table(s) with two additional timestamp columns,
1387      * readTime and requestTime, one of which will be the partition key. Secondly,
1388      * in the case when any destination table already exists, it will first try to
1389      * update existing table's schema as necessary by appending additional
1390      * columns. Then, if [force] is `TRUE`, the corresponding partition will be
1391      * overwritten by the snapshot results (data in different partitions will
1392      * remain intact); if [force] is unset or `FALSE`, it will append the data. An
1393      * error will be returned if the schema update or data appension fails.
1394      * </pre>
1395      *
1396      * <code>.google.cloud.asset.v1.PartitionSpec partition_spec = 4;</code>
1397      */
1398     private com.google.protobuf.SingleFieldBuilderV3<
1399             com.google.cloud.asset.v1.PartitionSpec,
1400             com.google.cloud.asset.v1.PartitionSpec.Builder,
1401             com.google.cloud.asset.v1.PartitionSpecOrBuilder>
getPartitionSpecFieldBuilder()1402         getPartitionSpecFieldBuilder() {
1403       if (partitionSpecBuilder_ == null) {
1404         partitionSpecBuilder_ =
1405             new com.google.protobuf.SingleFieldBuilderV3<
1406                 com.google.cloud.asset.v1.PartitionSpec,
1407                 com.google.cloud.asset.v1.PartitionSpec.Builder,
1408                 com.google.cloud.asset.v1.PartitionSpecOrBuilder>(
1409                 getPartitionSpec(), getParentForChildren(), isClean());
1410         partitionSpec_ = null;
1411       }
1412       return partitionSpecBuilder_;
1413     }
1414 
1415     private boolean separateTablesPerAssetType_;
1416     /**
1417      *
1418      *
1419      * <pre>
1420      * If this flag is `TRUE`, the snapshot results will be written to one or
1421      * multiple tables, each of which contains results of one asset type. The
1422      * [force] and [partition_spec] fields will apply to each of them.
1423      * Field [table] will be concatenated with "_" and the asset type names (see
1424      * https://cloud.google.com/asset-inventory/docs/supported-asset-types for
1425      * supported asset types) to construct per-asset-type table names, in which
1426      * all non-alphanumeric characters like "." and "/" will be substituted by
1427      * "_". Example: if field [table] is "mytable" and snapshot results
1428      * contain "storage.googleapis.com/Bucket" assets, the corresponding table
1429      * name will be "mytable_storage_googleapis_com_Bucket". If any of these
1430      * tables does not exist, a new table with the concatenated name will be
1431      * created.
1432      * When [content_type] in the ExportAssetsRequest is `RESOURCE`, the schema of
1433      * each table will include RECORD-type columns mapped to the nested fields in
1434      * the Asset.resource.data field of that asset type (up to the 15 nested level
1435      * BigQuery supports
1436      * (https://cloud.google.com/bigquery/docs/nested-repeated#limitations)). The
1437      * fields in &gt;15 nested levels will be stored in JSON format string as a child
1438      * column of its parent RECORD column.
1439      * If error occurs when exporting to any table, the whole export call will
1440      * return an error but the export results that already succeed will persist.
1441      * Example: if exporting to table_type_A succeeds when exporting to
1442      * table_type_B fails during one export call, the results in table_type_A will
1443      * persist and there will not be partial results persisting in a table.
1444      * </pre>
1445      *
1446      * <code>bool separate_tables_per_asset_type = 5;</code>
1447      *
1448      * @return The separateTablesPerAssetType.
1449      */
1450     @java.lang.Override
getSeparateTablesPerAssetType()1451     public boolean getSeparateTablesPerAssetType() {
1452       return separateTablesPerAssetType_;
1453     }
1454     /**
1455      *
1456      *
1457      * <pre>
1458      * If this flag is `TRUE`, the snapshot results will be written to one or
1459      * multiple tables, each of which contains results of one asset type. The
1460      * [force] and [partition_spec] fields will apply to each of them.
1461      * Field [table] will be concatenated with "_" and the asset type names (see
1462      * https://cloud.google.com/asset-inventory/docs/supported-asset-types for
1463      * supported asset types) to construct per-asset-type table names, in which
1464      * all non-alphanumeric characters like "." and "/" will be substituted by
1465      * "_". Example: if field [table] is "mytable" and snapshot results
1466      * contain "storage.googleapis.com/Bucket" assets, the corresponding table
1467      * name will be "mytable_storage_googleapis_com_Bucket". If any of these
1468      * tables does not exist, a new table with the concatenated name will be
1469      * created.
1470      * When [content_type] in the ExportAssetsRequest is `RESOURCE`, the schema of
1471      * each table will include RECORD-type columns mapped to the nested fields in
1472      * the Asset.resource.data field of that asset type (up to the 15 nested level
1473      * BigQuery supports
1474      * (https://cloud.google.com/bigquery/docs/nested-repeated#limitations)). The
1475      * fields in &gt;15 nested levels will be stored in JSON format string as a child
1476      * column of its parent RECORD column.
1477      * If error occurs when exporting to any table, the whole export call will
1478      * return an error but the export results that already succeed will persist.
1479      * Example: if exporting to table_type_A succeeds when exporting to
1480      * table_type_B fails during one export call, the results in table_type_A will
1481      * persist and there will not be partial results persisting in a table.
1482      * </pre>
1483      *
1484      * <code>bool separate_tables_per_asset_type = 5;</code>
1485      *
1486      * @param value The separateTablesPerAssetType to set.
1487      * @return This builder for chaining.
1488      */
setSeparateTablesPerAssetType(boolean value)1489     public Builder setSeparateTablesPerAssetType(boolean value) {
1490 
1491       separateTablesPerAssetType_ = value;
1492       bitField0_ |= 0x00000010;
1493       onChanged();
1494       return this;
1495     }
1496     /**
1497      *
1498      *
1499      * <pre>
1500      * If this flag is `TRUE`, the snapshot results will be written to one or
1501      * multiple tables, each of which contains results of one asset type. The
1502      * [force] and [partition_spec] fields will apply to each of them.
1503      * Field [table] will be concatenated with "_" and the asset type names (see
1504      * https://cloud.google.com/asset-inventory/docs/supported-asset-types for
1505      * supported asset types) to construct per-asset-type table names, in which
1506      * all non-alphanumeric characters like "." and "/" will be substituted by
1507      * "_". Example: if field [table] is "mytable" and snapshot results
1508      * contain "storage.googleapis.com/Bucket" assets, the corresponding table
1509      * name will be "mytable_storage_googleapis_com_Bucket". If any of these
1510      * tables does not exist, a new table with the concatenated name will be
1511      * created.
1512      * When [content_type] in the ExportAssetsRequest is `RESOURCE`, the schema of
1513      * each table will include RECORD-type columns mapped to the nested fields in
1514      * the Asset.resource.data field of that asset type (up to the 15 nested level
1515      * BigQuery supports
1516      * (https://cloud.google.com/bigquery/docs/nested-repeated#limitations)). The
1517      * fields in &gt;15 nested levels will be stored in JSON format string as a child
1518      * column of its parent RECORD column.
1519      * If error occurs when exporting to any table, the whole export call will
1520      * return an error but the export results that already succeed will persist.
1521      * Example: if exporting to table_type_A succeeds when exporting to
1522      * table_type_B fails during one export call, the results in table_type_A will
1523      * persist and there will not be partial results persisting in a table.
1524      * </pre>
1525      *
1526      * <code>bool separate_tables_per_asset_type = 5;</code>
1527      *
1528      * @return This builder for chaining.
1529      */
clearSeparateTablesPerAssetType()1530     public Builder clearSeparateTablesPerAssetType() {
1531       bitField0_ = (bitField0_ & ~0x00000010);
1532       separateTablesPerAssetType_ = false;
1533       onChanged();
1534       return this;
1535     }
1536 
1537     @java.lang.Override
setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields)1538     public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
1539       return super.setUnknownFields(unknownFields);
1540     }
1541 
1542     @java.lang.Override
mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields)1543     public final Builder mergeUnknownFields(
1544         final com.google.protobuf.UnknownFieldSet unknownFields) {
1545       return super.mergeUnknownFields(unknownFields);
1546     }
1547 
1548     // @@protoc_insertion_point(builder_scope:google.cloud.asset.v1.BigQueryDestination)
1549   }
1550 
1551   // @@protoc_insertion_point(class_scope:google.cloud.asset.v1.BigQueryDestination)
1552   private static final com.google.cloud.asset.v1.BigQueryDestination DEFAULT_INSTANCE;
1553 
1554   static {
1555     DEFAULT_INSTANCE = new com.google.cloud.asset.v1.BigQueryDestination();
1556   }
1557 
getDefaultInstance()1558   public static com.google.cloud.asset.v1.BigQueryDestination getDefaultInstance() {
1559     return DEFAULT_INSTANCE;
1560   }
1561 
1562   private static final com.google.protobuf.Parser<BigQueryDestination> PARSER =
1563       new com.google.protobuf.AbstractParser<BigQueryDestination>() {
1564         @java.lang.Override
1565         public BigQueryDestination parsePartialFrom(
1566             com.google.protobuf.CodedInputStream input,
1567             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1568             throws com.google.protobuf.InvalidProtocolBufferException {
1569           Builder builder = newBuilder();
1570           try {
1571             builder.mergeFrom(input, extensionRegistry);
1572           } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1573             throw e.setUnfinishedMessage(builder.buildPartial());
1574           } catch (com.google.protobuf.UninitializedMessageException e) {
1575             throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
1576           } catch (java.io.IOException e) {
1577             throw new com.google.protobuf.InvalidProtocolBufferException(e)
1578                 .setUnfinishedMessage(builder.buildPartial());
1579           }
1580           return builder.buildPartial();
1581         }
1582       };
1583 
parser()1584   public static com.google.protobuf.Parser<BigQueryDestination> parser() {
1585     return PARSER;
1586   }
1587 
1588   @java.lang.Override
getParserForType()1589   public com.google.protobuf.Parser<BigQueryDestination> getParserForType() {
1590     return PARSER;
1591   }
1592 
1593   @java.lang.Override
getDefaultInstanceForType()1594   public com.google.cloud.asset.v1.BigQueryDestination getDefaultInstanceForType() {
1595     return DEFAULT_INSTANCE;
1596   }
1597 }
1598