1 /* 2 * Copyright 2020 Google LLC 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * https://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 package com.google.cloud.automl.v1; 17 18 import static io.grpc.MethodDescriptor.generateFullMethodName; 19 20 /** 21 * 22 * 23 * <pre> 24 * AutoML Prediction API. 25 * On any input that is documented to expect a string parameter in 26 * snake_case or dash-case, either of those cases is accepted. 27 * </pre> 28 */ 29 @javax.annotation.Generated( 30 value = "by gRPC proto compiler", 31 comments = "Source: google/cloud/automl/v1/prediction_service.proto") 32 @io.grpc.stub.annotations.GrpcGenerated 33 public final class PredictionServiceGrpc { 34 PredictionServiceGrpc()35 private PredictionServiceGrpc() {} 36 37 public static final String SERVICE_NAME = "google.cloud.automl.v1.PredictionService"; 38 39 // Static method descriptors that strictly reflect the proto. 40 private static volatile io.grpc.MethodDescriptor< 41 com.google.cloud.automl.v1.PredictRequest, com.google.cloud.automl.v1.PredictResponse> 42 getPredictMethod; 43 44 @io.grpc.stub.annotations.RpcMethod( 45 fullMethodName = SERVICE_NAME + '/' + "Predict", 46 requestType = com.google.cloud.automl.v1.PredictRequest.class, 47 responseType = com.google.cloud.automl.v1.PredictResponse.class, 48 methodType = io.grpc.MethodDescriptor.MethodType.UNARY) 49 public static io.grpc.MethodDescriptor< 50 com.google.cloud.automl.v1.PredictRequest, com.google.cloud.automl.v1.PredictResponse> getPredictMethod()51 getPredictMethod() { 52 io.grpc.MethodDescriptor< 53 com.google.cloud.automl.v1.PredictRequest, com.google.cloud.automl.v1.PredictResponse> 54 getPredictMethod; 55 if ((getPredictMethod = PredictionServiceGrpc.getPredictMethod) == null) { 56 synchronized (PredictionServiceGrpc.class) { 57 if ((getPredictMethod = PredictionServiceGrpc.getPredictMethod) == null) { 58 PredictionServiceGrpc.getPredictMethod = 59 getPredictMethod = 60 io.grpc.MethodDescriptor 61 .<com.google.cloud.automl.v1.PredictRequest, 62 com.google.cloud.automl.v1.PredictResponse> 63 newBuilder() 64 .setType(io.grpc.MethodDescriptor.MethodType.UNARY) 65 .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Predict")) 66 .setSampledToLocalTracing(true) 67 .setRequestMarshaller( 68 io.grpc.protobuf.ProtoUtils.marshaller( 69 com.google.cloud.automl.v1.PredictRequest.getDefaultInstance())) 70 .setResponseMarshaller( 71 io.grpc.protobuf.ProtoUtils.marshaller( 72 com.google.cloud.automl.v1.PredictResponse.getDefaultInstance())) 73 .setSchemaDescriptor(new PredictionServiceMethodDescriptorSupplier("Predict")) 74 .build(); 75 } 76 } 77 } 78 return getPredictMethod; 79 } 80 81 private static volatile io.grpc.MethodDescriptor< 82 com.google.cloud.automl.v1.BatchPredictRequest, com.google.longrunning.Operation> 83 getBatchPredictMethod; 84 85 @io.grpc.stub.annotations.RpcMethod( 86 fullMethodName = SERVICE_NAME + '/' + "BatchPredict", 87 requestType = com.google.cloud.automl.v1.BatchPredictRequest.class, 88 responseType = com.google.longrunning.Operation.class, 89 methodType = io.grpc.MethodDescriptor.MethodType.UNARY) 90 public static io.grpc.MethodDescriptor< 91 com.google.cloud.automl.v1.BatchPredictRequest, com.google.longrunning.Operation> getBatchPredictMethod()92 getBatchPredictMethod() { 93 io.grpc.MethodDescriptor< 94 com.google.cloud.automl.v1.BatchPredictRequest, com.google.longrunning.Operation> 95 getBatchPredictMethod; 96 if ((getBatchPredictMethod = PredictionServiceGrpc.getBatchPredictMethod) == null) { 97 synchronized (PredictionServiceGrpc.class) { 98 if ((getBatchPredictMethod = PredictionServiceGrpc.getBatchPredictMethod) == null) { 99 PredictionServiceGrpc.getBatchPredictMethod = 100 getBatchPredictMethod = 101 io.grpc.MethodDescriptor 102 .<com.google.cloud.automl.v1.BatchPredictRequest, 103 com.google.longrunning.Operation> 104 newBuilder() 105 .setType(io.grpc.MethodDescriptor.MethodType.UNARY) 106 .setFullMethodName(generateFullMethodName(SERVICE_NAME, "BatchPredict")) 107 .setSampledToLocalTracing(true) 108 .setRequestMarshaller( 109 io.grpc.protobuf.ProtoUtils.marshaller( 110 com.google.cloud.automl.v1.BatchPredictRequest.getDefaultInstance())) 111 .setResponseMarshaller( 112 io.grpc.protobuf.ProtoUtils.marshaller( 113 com.google.longrunning.Operation.getDefaultInstance())) 114 .setSchemaDescriptor( 115 new PredictionServiceMethodDescriptorSupplier("BatchPredict")) 116 .build(); 117 } 118 } 119 } 120 return getBatchPredictMethod; 121 } 122 123 /** Creates a new async stub that supports all call types for the service */ newStub(io.grpc.Channel channel)124 public static PredictionServiceStub newStub(io.grpc.Channel channel) { 125 io.grpc.stub.AbstractStub.StubFactory<PredictionServiceStub> factory = 126 new io.grpc.stub.AbstractStub.StubFactory<PredictionServiceStub>() { 127 @java.lang.Override 128 public PredictionServiceStub newStub( 129 io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 130 return new PredictionServiceStub(channel, callOptions); 131 } 132 }; 133 return PredictionServiceStub.newStub(factory, channel); 134 } 135 136 /** 137 * Creates a new blocking-style stub that supports unary and streaming output calls on the service 138 */ newBlockingStub(io.grpc.Channel channel)139 public static PredictionServiceBlockingStub newBlockingStub(io.grpc.Channel channel) { 140 io.grpc.stub.AbstractStub.StubFactory<PredictionServiceBlockingStub> factory = 141 new io.grpc.stub.AbstractStub.StubFactory<PredictionServiceBlockingStub>() { 142 @java.lang.Override 143 public PredictionServiceBlockingStub newStub( 144 io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 145 return new PredictionServiceBlockingStub(channel, callOptions); 146 } 147 }; 148 return PredictionServiceBlockingStub.newStub(factory, channel); 149 } 150 151 /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ newFutureStub(io.grpc.Channel channel)152 public static PredictionServiceFutureStub newFutureStub(io.grpc.Channel channel) { 153 io.grpc.stub.AbstractStub.StubFactory<PredictionServiceFutureStub> factory = 154 new io.grpc.stub.AbstractStub.StubFactory<PredictionServiceFutureStub>() { 155 @java.lang.Override 156 public PredictionServiceFutureStub newStub( 157 io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 158 return new PredictionServiceFutureStub(channel, callOptions); 159 } 160 }; 161 return PredictionServiceFutureStub.newStub(factory, channel); 162 } 163 164 /** 165 * 166 * 167 * <pre> 168 * AutoML Prediction API. 169 * On any input that is documented to expect a string parameter in 170 * snake_case or dash-case, either of those cases is accepted. 171 * </pre> 172 */ 173 public interface AsyncService { 174 175 /** 176 * 177 * 178 * <pre> 179 * Perform an online prediction. The prediction result is directly 180 * returned in the response. 181 * Available for following ML scenarios, and their expected request payloads: 182 * AutoML Vision Classification 183 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 184 * AutoML Vision Object Detection 185 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 186 * AutoML Natural Language Classification 187 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 188 * .PDF, .TIF or .TIFF format with size upto 2MB. 189 * AutoML Natural Language Entity Extraction 190 * * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document 191 * in .PDF, .TIF or .TIFF format with size upto 20MB. 192 * AutoML Natural Language Sentiment Analysis 193 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 194 * .PDF, .TIF or .TIFF format with size upto 2MB. 195 * AutoML Translation 196 * * A TextSnippet up to 25,000 characters, UTF-8 encoded. 197 * AutoML Tables 198 * * A row with column values matching 199 * the columns of the model, up to 5MB. Not available for FORECASTING 200 * `prediction_type`. 201 * </pre> 202 */ predict( com.google.cloud.automl.v1.PredictRequest request, io.grpc.stub.StreamObserver<com.google.cloud.automl.v1.PredictResponse> responseObserver)203 default void predict( 204 com.google.cloud.automl.v1.PredictRequest request, 205 io.grpc.stub.StreamObserver<com.google.cloud.automl.v1.PredictResponse> responseObserver) { 206 io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPredictMethod(), responseObserver); 207 } 208 209 /** 210 * 211 * 212 * <pre> 213 * Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch 214 * prediction result won't be immediately available in the response. Instead, 215 * a long running operation object is returned. User can poll the operation 216 * result via [GetOperation][google.longrunning.Operations.GetOperation] 217 * method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in 218 * the [response][google.longrunning.Operation.response] field. 219 * Available for following ML scenarios: 220 * * AutoML Vision Classification 221 * * AutoML Vision Object Detection 222 * * AutoML Video Intelligence Classification 223 * * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification 224 * * AutoML Natural Language Entity Extraction 225 * * AutoML Natural Language Sentiment Analysis 226 * * AutoML Tables 227 * </pre> 228 */ batchPredict( com.google.cloud.automl.v1.BatchPredictRequest request, io.grpc.stub.StreamObserver<com.google.longrunning.Operation> responseObserver)229 default void batchPredict( 230 com.google.cloud.automl.v1.BatchPredictRequest request, 231 io.grpc.stub.StreamObserver<com.google.longrunning.Operation> responseObserver) { 232 io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( 233 getBatchPredictMethod(), responseObserver); 234 } 235 } 236 237 /** 238 * Base class for the server implementation of the service PredictionService. 239 * 240 * <pre> 241 * AutoML Prediction API. 242 * On any input that is documented to expect a string parameter in 243 * snake_case or dash-case, either of those cases is accepted. 244 * </pre> 245 */ 246 public abstract static class PredictionServiceImplBase 247 implements io.grpc.BindableService, AsyncService { 248 249 @java.lang.Override bindService()250 public final io.grpc.ServerServiceDefinition bindService() { 251 return PredictionServiceGrpc.bindService(this); 252 } 253 } 254 255 /** 256 * A stub to allow clients to do asynchronous rpc calls to service PredictionService. 257 * 258 * <pre> 259 * AutoML Prediction API. 260 * On any input that is documented to expect a string parameter in 261 * snake_case or dash-case, either of those cases is accepted. 262 * </pre> 263 */ 264 public static final class PredictionServiceStub 265 extends io.grpc.stub.AbstractAsyncStub<PredictionServiceStub> { PredictionServiceStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions)266 private PredictionServiceStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 267 super(channel, callOptions); 268 } 269 270 @java.lang.Override build( io.grpc.Channel channel, io.grpc.CallOptions callOptions)271 protected PredictionServiceStub build( 272 io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 273 return new PredictionServiceStub(channel, callOptions); 274 } 275 276 /** 277 * 278 * 279 * <pre> 280 * Perform an online prediction. The prediction result is directly 281 * returned in the response. 282 * Available for following ML scenarios, and their expected request payloads: 283 * AutoML Vision Classification 284 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 285 * AutoML Vision Object Detection 286 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 287 * AutoML Natural Language Classification 288 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 289 * .PDF, .TIF or .TIFF format with size upto 2MB. 290 * AutoML Natural Language Entity Extraction 291 * * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document 292 * in .PDF, .TIF or .TIFF format with size upto 20MB. 293 * AutoML Natural Language Sentiment Analysis 294 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 295 * .PDF, .TIF or .TIFF format with size upto 2MB. 296 * AutoML Translation 297 * * A TextSnippet up to 25,000 characters, UTF-8 encoded. 298 * AutoML Tables 299 * * A row with column values matching 300 * the columns of the model, up to 5MB. Not available for FORECASTING 301 * `prediction_type`. 302 * </pre> 303 */ predict( com.google.cloud.automl.v1.PredictRequest request, io.grpc.stub.StreamObserver<com.google.cloud.automl.v1.PredictResponse> responseObserver)304 public void predict( 305 com.google.cloud.automl.v1.PredictRequest request, 306 io.grpc.stub.StreamObserver<com.google.cloud.automl.v1.PredictResponse> responseObserver) { 307 io.grpc.stub.ClientCalls.asyncUnaryCall( 308 getChannel().newCall(getPredictMethod(), getCallOptions()), request, responseObserver); 309 } 310 311 /** 312 * 313 * 314 * <pre> 315 * Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch 316 * prediction result won't be immediately available in the response. Instead, 317 * a long running operation object is returned. User can poll the operation 318 * result via [GetOperation][google.longrunning.Operations.GetOperation] 319 * method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in 320 * the [response][google.longrunning.Operation.response] field. 321 * Available for following ML scenarios: 322 * * AutoML Vision Classification 323 * * AutoML Vision Object Detection 324 * * AutoML Video Intelligence Classification 325 * * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification 326 * * AutoML Natural Language Entity Extraction 327 * * AutoML Natural Language Sentiment Analysis 328 * * AutoML Tables 329 * </pre> 330 */ batchPredict( com.google.cloud.automl.v1.BatchPredictRequest request, io.grpc.stub.StreamObserver<com.google.longrunning.Operation> responseObserver)331 public void batchPredict( 332 com.google.cloud.automl.v1.BatchPredictRequest request, 333 io.grpc.stub.StreamObserver<com.google.longrunning.Operation> responseObserver) { 334 io.grpc.stub.ClientCalls.asyncUnaryCall( 335 getChannel().newCall(getBatchPredictMethod(), getCallOptions()), 336 request, 337 responseObserver); 338 } 339 } 340 341 /** 342 * A stub to allow clients to do synchronous rpc calls to service PredictionService. 343 * 344 * <pre> 345 * AutoML Prediction API. 346 * On any input that is documented to expect a string parameter in 347 * snake_case or dash-case, either of those cases is accepted. 348 * </pre> 349 */ 350 public static final class PredictionServiceBlockingStub 351 extends io.grpc.stub.AbstractBlockingStub<PredictionServiceBlockingStub> { PredictionServiceBlockingStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions)352 private PredictionServiceBlockingStub( 353 io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 354 super(channel, callOptions); 355 } 356 357 @java.lang.Override build( io.grpc.Channel channel, io.grpc.CallOptions callOptions)358 protected PredictionServiceBlockingStub build( 359 io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 360 return new PredictionServiceBlockingStub(channel, callOptions); 361 } 362 363 /** 364 * 365 * 366 * <pre> 367 * Perform an online prediction. The prediction result is directly 368 * returned in the response. 369 * Available for following ML scenarios, and their expected request payloads: 370 * AutoML Vision Classification 371 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 372 * AutoML Vision Object Detection 373 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 374 * AutoML Natural Language Classification 375 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 376 * .PDF, .TIF or .TIFF format with size upto 2MB. 377 * AutoML Natural Language Entity Extraction 378 * * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document 379 * in .PDF, .TIF or .TIFF format with size upto 20MB. 380 * AutoML Natural Language Sentiment Analysis 381 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 382 * .PDF, .TIF or .TIFF format with size upto 2MB. 383 * AutoML Translation 384 * * A TextSnippet up to 25,000 characters, UTF-8 encoded. 385 * AutoML Tables 386 * * A row with column values matching 387 * the columns of the model, up to 5MB. Not available for FORECASTING 388 * `prediction_type`. 389 * </pre> 390 */ predict( com.google.cloud.automl.v1.PredictRequest request)391 public com.google.cloud.automl.v1.PredictResponse predict( 392 com.google.cloud.automl.v1.PredictRequest request) { 393 return io.grpc.stub.ClientCalls.blockingUnaryCall( 394 getChannel(), getPredictMethod(), getCallOptions(), request); 395 } 396 397 /** 398 * 399 * 400 * <pre> 401 * Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch 402 * prediction result won't be immediately available in the response. Instead, 403 * a long running operation object is returned. User can poll the operation 404 * result via [GetOperation][google.longrunning.Operations.GetOperation] 405 * method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in 406 * the [response][google.longrunning.Operation.response] field. 407 * Available for following ML scenarios: 408 * * AutoML Vision Classification 409 * * AutoML Vision Object Detection 410 * * AutoML Video Intelligence Classification 411 * * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification 412 * * AutoML Natural Language Entity Extraction 413 * * AutoML Natural Language Sentiment Analysis 414 * * AutoML Tables 415 * </pre> 416 */ batchPredict( com.google.cloud.automl.v1.BatchPredictRequest request)417 public com.google.longrunning.Operation batchPredict( 418 com.google.cloud.automl.v1.BatchPredictRequest request) { 419 return io.grpc.stub.ClientCalls.blockingUnaryCall( 420 getChannel(), getBatchPredictMethod(), getCallOptions(), request); 421 } 422 } 423 424 /** 425 * A stub to allow clients to do ListenableFuture-style rpc calls to service PredictionService. 426 * 427 * <pre> 428 * AutoML Prediction API. 429 * On any input that is documented to expect a string parameter in 430 * snake_case or dash-case, either of those cases is accepted. 431 * </pre> 432 */ 433 public static final class PredictionServiceFutureStub 434 extends io.grpc.stub.AbstractFutureStub<PredictionServiceFutureStub> { PredictionServiceFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions)435 private PredictionServiceFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 436 super(channel, callOptions); 437 } 438 439 @java.lang.Override build( io.grpc.Channel channel, io.grpc.CallOptions callOptions)440 protected PredictionServiceFutureStub build( 441 io.grpc.Channel channel, io.grpc.CallOptions callOptions) { 442 return new PredictionServiceFutureStub(channel, callOptions); 443 } 444 445 /** 446 * 447 * 448 * <pre> 449 * Perform an online prediction. The prediction result is directly 450 * returned in the response. 451 * Available for following ML scenarios, and their expected request payloads: 452 * AutoML Vision Classification 453 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 454 * AutoML Vision Object Detection 455 * * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. 456 * AutoML Natural Language Classification 457 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 458 * .PDF, .TIF or .TIFF format with size upto 2MB. 459 * AutoML Natural Language Entity Extraction 460 * * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document 461 * in .PDF, .TIF or .TIFF format with size upto 20MB. 462 * AutoML Natural Language Sentiment Analysis 463 * * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in 464 * .PDF, .TIF or .TIFF format with size upto 2MB. 465 * AutoML Translation 466 * * A TextSnippet up to 25,000 characters, UTF-8 encoded. 467 * AutoML Tables 468 * * A row with column values matching 469 * the columns of the model, up to 5MB. Not available for FORECASTING 470 * `prediction_type`. 471 * </pre> 472 */ 473 public com.google.common.util.concurrent.ListenableFuture< 474 com.google.cloud.automl.v1.PredictResponse> predict(com.google.cloud.automl.v1.PredictRequest request)475 predict(com.google.cloud.automl.v1.PredictRequest request) { 476 return io.grpc.stub.ClientCalls.futureUnaryCall( 477 getChannel().newCall(getPredictMethod(), getCallOptions()), request); 478 } 479 480 /** 481 * 482 * 483 * <pre> 484 * Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch 485 * prediction result won't be immediately available in the response. Instead, 486 * a long running operation object is returned. User can poll the operation 487 * result via [GetOperation][google.longrunning.Operations.GetOperation] 488 * method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in 489 * the [response][google.longrunning.Operation.response] field. 490 * Available for following ML scenarios: 491 * * AutoML Vision Classification 492 * * AutoML Vision Object Detection 493 * * AutoML Video Intelligence Classification 494 * * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification 495 * * AutoML Natural Language Entity Extraction 496 * * AutoML Natural Language Sentiment Analysis 497 * * AutoML Tables 498 * </pre> 499 */ 500 public com.google.common.util.concurrent.ListenableFuture<com.google.longrunning.Operation> batchPredict(com.google.cloud.automl.v1.BatchPredictRequest request)501 batchPredict(com.google.cloud.automl.v1.BatchPredictRequest request) { 502 return io.grpc.stub.ClientCalls.futureUnaryCall( 503 getChannel().newCall(getBatchPredictMethod(), getCallOptions()), request); 504 } 505 } 506 507 private static final int METHODID_PREDICT = 0; 508 private static final int METHODID_BATCH_PREDICT = 1; 509 510 private static final class MethodHandlers<Req, Resp> 511 implements io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>, 512 io.grpc.stub.ServerCalls.ServerStreamingMethod<Req, Resp>, 513 io.grpc.stub.ServerCalls.ClientStreamingMethod<Req, Resp>, 514 io.grpc.stub.ServerCalls.BidiStreamingMethod<Req, Resp> { 515 private final AsyncService serviceImpl; 516 private final int methodId; 517 MethodHandlers(AsyncService serviceImpl, int methodId)518 MethodHandlers(AsyncService serviceImpl, int methodId) { 519 this.serviceImpl = serviceImpl; 520 this.methodId = methodId; 521 } 522 523 @java.lang.Override 524 @java.lang.SuppressWarnings("unchecked") invoke(Req request, io.grpc.stub.StreamObserver<Resp> responseObserver)525 public void invoke(Req request, io.grpc.stub.StreamObserver<Resp> responseObserver) { 526 switch (methodId) { 527 case METHODID_PREDICT: 528 serviceImpl.predict( 529 (com.google.cloud.automl.v1.PredictRequest) request, 530 (io.grpc.stub.StreamObserver<com.google.cloud.automl.v1.PredictResponse>) 531 responseObserver); 532 break; 533 case METHODID_BATCH_PREDICT: 534 serviceImpl.batchPredict( 535 (com.google.cloud.automl.v1.BatchPredictRequest) request, 536 (io.grpc.stub.StreamObserver<com.google.longrunning.Operation>) responseObserver); 537 break; 538 default: 539 throw new AssertionError(); 540 } 541 } 542 543 @java.lang.Override 544 @java.lang.SuppressWarnings("unchecked") invoke( io.grpc.stub.StreamObserver<Resp> responseObserver)545 public io.grpc.stub.StreamObserver<Req> invoke( 546 io.grpc.stub.StreamObserver<Resp> responseObserver) { 547 switch (methodId) { 548 default: 549 throw new AssertionError(); 550 } 551 } 552 } 553 bindService(AsyncService service)554 public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { 555 return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) 556 .addMethod( 557 getPredictMethod(), 558 io.grpc.stub.ServerCalls.asyncUnaryCall( 559 new MethodHandlers< 560 com.google.cloud.automl.v1.PredictRequest, 561 com.google.cloud.automl.v1.PredictResponse>(service, METHODID_PREDICT))) 562 .addMethod( 563 getBatchPredictMethod(), 564 io.grpc.stub.ServerCalls.asyncUnaryCall( 565 new MethodHandlers< 566 com.google.cloud.automl.v1.BatchPredictRequest, 567 com.google.longrunning.Operation>(service, METHODID_BATCH_PREDICT))) 568 .build(); 569 } 570 571 private abstract static class PredictionServiceBaseDescriptorSupplier 572 implements io.grpc.protobuf.ProtoFileDescriptorSupplier, 573 io.grpc.protobuf.ProtoServiceDescriptorSupplier { PredictionServiceBaseDescriptorSupplier()574 PredictionServiceBaseDescriptorSupplier() {} 575 576 @java.lang.Override getFileDescriptor()577 public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { 578 return com.google.cloud.automl.v1.PredictionServiceProto.getDescriptor(); 579 } 580 581 @java.lang.Override getServiceDescriptor()582 public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { 583 return getFileDescriptor().findServiceByName("PredictionService"); 584 } 585 } 586 587 private static final class PredictionServiceFileDescriptorSupplier 588 extends PredictionServiceBaseDescriptorSupplier { PredictionServiceFileDescriptorSupplier()589 PredictionServiceFileDescriptorSupplier() {} 590 } 591 592 private static final class PredictionServiceMethodDescriptorSupplier 593 extends PredictionServiceBaseDescriptorSupplier 594 implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { 595 private final String methodName; 596 PredictionServiceMethodDescriptorSupplier(String methodName)597 PredictionServiceMethodDescriptorSupplier(String methodName) { 598 this.methodName = methodName; 599 } 600 601 @java.lang.Override getMethodDescriptor()602 public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { 603 return getServiceDescriptor().findMethodByName(methodName); 604 } 605 } 606 607 private static volatile io.grpc.ServiceDescriptor serviceDescriptor; 608 getServiceDescriptor()609 public static io.grpc.ServiceDescriptor getServiceDescriptor() { 610 io.grpc.ServiceDescriptor result = serviceDescriptor; 611 if (result == null) { 612 synchronized (PredictionServiceGrpc.class) { 613 result = serviceDescriptor; 614 if (result == null) { 615 serviceDescriptor = 616 result = 617 io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) 618 .setSchemaDescriptor(new PredictionServiceFileDescriptorSupplier()) 619 .addMethod(getPredictMethod()) 620 .addMethod(getBatchPredictMethod()) 621 .build(); 622 } 623 } 624 } 625 return result; 626 } 627 } 628