• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.google.cloud.speech.v1p1beta1;
18 
19 import com.google.api.core.BetaApi;
20 import com.google.api.gax.core.BackgroundResource;
21 import com.google.api.gax.httpjson.longrunning.OperationsClient;
22 import com.google.api.gax.longrunning.OperationFuture;
23 import com.google.api.gax.rpc.BidiStreamingCallable;
24 import com.google.api.gax.rpc.OperationCallable;
25 import com.google.api.gax.rpc.UnaryCallable;
26 import com.google.cloud.speech.v1p1beta1.stub.SpeechStub;
27 import com.google.cloud.speech.v1p1beta1.stub.SpeechStubSettings;
28 import com.google.longrunning.Operation;
29 import java.io.IOException;
30 import java.util.concurrent.TimeUnit;
31 import javax.annotation.Generated;
32 
33 // AUTO-GENERATED DOCUMENTATION AND CLASS.
34 /**
35  * Service Description: Service that implements Google Cloud Speech API.
36  *
37  * <p>This class provides the ability to make remote calls to the backing service through method
38  * calls that map to API methods. Sample code to get started:
39  *
40  * <pre>{@code
41  * // This snippet has been automatically generated and should be regarded as a code template only.
42  * // It will require modifications to work:
43  * // - It may require correct/in-range values for request initialization.
44  * // - It may require specifying regional endpoints when creating the service client as shown in
45  * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
46  * try (SpeechClient speechClient = SpeechClient.create()) {
47  *   RecognitionConfig config = RecognitionConfig.newBuilder().build();
48  *   RecognitionAudio audio = RecognitionAudio.newBuilder().build();
49  *   RecognizeResponse response = speechClient.recognize(config, audio);
50  * }
51  * }</pre>
52  *
53  * <p>Note: close() needs to be called on the SpeechClient object to clean up resources such as
54  * threads. In the example above, try-with-resources is used, which automatically calls close().
55  *
56  * <p>The surface of this class includes several types of Java methods for each of the API's
57  * methods:
58  *
59  * <ol>
60  *   <li>A "flattened" method. With this type of method, the fields of the request type have been
61  *       converted into function parameters. It may be the case that not all fields are available as
62  *       parameters, and not every API method will have a flattened method entry point.
63  *   <li>A "request object" method. This type of method only takes one parameter, a request object,
64  *       which must be constructed before the call. Not every API method will have a request object
65  *       method.
66  *   <li>A "callable" method. This type of method takes no parameters and returns an immutable API
67  *       callable object, which can be used to initiate calls to the service.
68  * </ol>
69  *
70  * <p>See the individual methods for example code.
71  *
72  * <p>Many parameters require resource names to be formatted in a particular way. To assist with
73  * these names, this class includes a format method for each type of name, and additionally a parse
74  * method to extract the individual identifiers contained within names that are returned.
75  *
76  * <p>This class can be customized by passing in a custom instance of SpeechSettings to create().
77  * For example:
78  *
79  * <p>To customize credentials:
80  *
81  * <pre>{@code
82  * // This snippet has been automatically generated and should be regarded as a code template only.
83  * // It will require modifications to work:
84  * // - It may require correct/in-range values for request initialization.
85  * // - It may require specifying regional endpoints when creating the service client as shown in
86  * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
87  * SpeechSettings speechSettings =
88  *     SpeechSettings.newBuilder()
89  *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
90  *         .build();
91  * SpeechClient speechClient = SpeechClient.create(speechSettings);
92  * }</pre>
93  *
94  * <p>To customize the endpoint:
95  *
96  * <pre>{@code
97  * // This snippet has been automatically generated and should be regarded as a code template only.
98  * // It will require modifications to work:
99  * // - It may require correct/in-range values for request initialization.
100  * // - It may require specifying regional endpoints when creating the service client as shown in
101  * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
102  * SpeechSettings speechSettings = SpeechSettings.newBuilder().setEndpoint(myEndpoint).build();
103  * SpeechClient speechClient = SpeechClient.create(speechSettings);
104  * }</pre>
105  *
106  * <p>To use REST (HTTP1.1/JSON) transport (instead of gRPC) for sending and receiving requests over
107  * the wire:
108  *
109  * <pre>{@code
110  * // This snippet has been automatically generated and should be regarded as a code template only.
111  * // It will require modifications to work:
112  * // - It may require correct/in-range values for request initialization.
113  * // - It may require specifying regional endpoints when creating the service client as shown in
114  * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
115  * SpeechSettings speechSettings = SpeechSettings.newHttpJsonBuilder().build();
116  * SpeechClient speechClient = SpeechClient.create(speechSettings);
117  * }</pre>
118  *
119  * <p>Please refer to the GitHub repository's samples for more quickstart code snippets.
120  */
121 @BetaApi
122 @Generated("by gapic-generator-java")
123 public class SpeechClient implements BackgroundResource {
124   private final SpeechSettings settings;
125   private final SpeechStub stub;
126   private final OperationsClient httpJsonOperationsClient;
127   private final com.google.longrunning.OperationsClient operationsClient;
128 
129   /** Constructs an instance of SpeechClient with default settings. */
create()130   public static final SpeechClient create() throws IOException {
131     return create(SpeechSettings.newBuilder().build());
132   }
133 
134   /**
135    * Constructs an instance of SpeechClient, using the given settings. The channels are created
136    * based on the settings passed in, or defaults for any settings that are not set.
137    */
create(SpeechSettings settings)138   public static final SpeechClient create(SpeechSettings settings) throws IOException {
139     return new SpeechClient(settings);
140   }
141 
142   /**
143    * Constructs an instance of SpeechClient, using the given stub for making calls. This is for
144    * advanced usage - prefer using create(SpeechSettings).
145    */
create(SpeechStub stub)146   public static final SpeechClient create(SpeechStub stub) {
147     return new SpeechClient(stub);
148   }
149 
150   /**
151    * Constructs an instance of SpeechClient, using the given settings. This is protected so that it
152    * is easy to make a subclass, but otherwise, the static factory methods should be preferred.
153    */
SpeechClient(SpeechSettings settings)154   protected SpeechClient(SpeechSettings settings) throws IOException {
155     this.settings = settings;
156     this.stub = ((SpeechStubSettings) settings.getStubSettings()).createStub();
157     this.operationsClient =
158         com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub());
159     this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub());
160   }
161 
SpeechClient(SpeechStub stub)162   protected SpeechClient(SpeechStub stub) {
163     this.settings = null;
164     this.stub = stub;
165     this.operationsClient =
166         com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub());
167     this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub());
168   }
169 
getSettings()170   public final SpeechSettings getSettings() {
171     return settings;
172   }
173 
getStub()174   public SpeechStub getStub() {
175     return stub;
176   }
177 
178   /**
179    * Returns the OperationsClient that can be used to query the status of a long-running operation
180    * returned by another API method call.
181    */
getOperationsClient()182   public final com.google.longrunning.OperationsClient getOperationsClient() {
183     return operationsClient;
184   }
185 
186   /**
187    * Returns the OperationsClient that can be used to query the status of a long-running operation
188    * returned by another API method call.
189    */
190   @BetaApi
getHttpJsonOperationsClient()191   public final OperationsClient getHttpJsonOperationsClient() {
192     return httpJsonOperationsClient;
193   }
194 
195   // AUTO-GENERATED DOCUMENTATION AND METHOD.
196   /**
197    * Performs synchronous speech recognition: receive results after all audio has been sent and
198    * processed.
199    *
200    * <p>Sample code:
201    *
202    * <pre>{@code
203    * // This snippet has been automatically generated and should be regarded as a code template only.
204    * // It will require modifications to work:
205    * // - It may require correct/in-range values for request initialization.
206    * // - It may require specifying regional endpoints when creating the service client as shown in
207    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
208    * try (SpeechClient speechClient = SpeechClient.create()) {
209    *   RecognitionConfig config = RecognitionConfig.newBuilder().build();
210    *   RecognitionAudio audio = RecognitionAudio.newBuilder().build();
211    *   RecognizeResponse response = speechClient.recognize(config, audio);
212    * }
213    * }</pre>
214    *
215    * @param config Required. Provides information to the recognizer that specifies how to process
216    *     the request.
217    * @param audio Required. The audio data to be recognized.
218    * @throws com.google.api.gax.rpc.ApiException if the remote call fails
219    */
recognize(RecognitionConfig config, RecognitionAudio audio)220   public final RecognizeResponse recognize(RecognitionConfig config, RecognitionAudio audio) {
221     RecognizeRequest request =
222         RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
223     return recognize(request);
224   }
225 
226   // AUTO-GENERATED DOCUMENTATION AND METHOD.
227   /**
228    * Performs synchronous speech recognition: receive results after all audio has been sent and
229    * processed.
230    *
231    * <p>Sample code:
232    *
233    * <pre>{@code
234    * // This snippet has been automatically generated and should be regarded as a code template only.
235    * // It will require modifications to work:
236    * // - It may require correct/in-range values for request initialization.
237    * // - It may require specifying regional endpoints when creating the service client as shown in
238    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
239    * try (SpeechClient speechClient = SpeechClient.create()) {
240    *   RecognizeRequest request =
241    *       RecognizeRequest.newBuilder()
242    *           .setConfig(RecognitionConfig.newBuilder().build())
243    *           .setAudio(RecognitionAudio.newBuilder().build())
244    *           .build();
245    *   RecognizeResponse response = speechClient.recognize(request);
246    * }
247    * }</pre>
248    *
249    * @param request The request object containing all of the parameters for the API call.
250    * @throws com.google.api.gax.rpc.ApiException if the remote call fails
251    */
recognize(RecognizeRequest request)252   public final RecognizeResponse recognize(RecognizeRequest request) {
253     return recognizeCallable().call(request);
254   }
255 
256   // AUTO-GENERATED DOCUMENTATION AND METHOD.
257   /**
258    * Performs synchronous speech recognition: receive results after all audio has been sent and
259    * processed.
260    *
261    * <p>Sample code:
262    *
263    * <pre>{@code
264    * // This snippet has been automatically generated and should be regarded as a code template only.
265    * // It will require modifications to work:
266    * // - It may require correct/in-range values for request initialization.
267    * // - It may require specifying regional endpoints when creating the service client as shown in
268    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
269    * try (SpeechClient speechClient = SpeechClient.create()) {
270    *   RecognizeRequest request =
271    *       RecognizeRequest.newBuilder()
272    *           .setConfig(RecognitionConfig.newBuilder().build())
273    *           .setAudio(RecognitionAudio.newBuilder().build())
274    *           .build();
275    *   ApiFuture<RecognizeResponse> future = speechClient.recognizeCallable().futureCall(request);
276    *   // Do something.
277    *   RecognizeResponse response = future.get();
278    * }
279    * }</pre>
280    */
recognizeCallable()281   public final UnaryCallable<RecognizeRequest, RecognizeResponse> recognizeCallable() {
282     return stub.recognizeCallable();
283   }
284 
285   // AUTO-GENERATED DOCUMENTATION AND METHOD.
286   /**
287    * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations
288    * interface. Returns either an `Operation.error` or an `Operation.response` which contains a
289    * `LongRunningRecognizeResponse` message. For more information on asynchronous speech
290    * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
291    *
292    * <p>Sample code:
293    *
294    * <pre>{@code
295    * // This snippet has been automatically generated and should be regarded as a code template only.
296    * // It will require modifications to work:
297    * // - It may require correct/in-range values for request initialization.
298    * // - It may require specifying regional endpoints when creating the service client as shown in
299    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
300    * try (SpeechClient speechClient = SpeechClient.create()) {
301    *   RecognitionConfig config = RecognitionConfig.newBuilder().build();
302    *   RecognitionAudio audio = RecognitionAudio.newBuilder().build();
303    *   LongRunningRecognizeResponse response =
304    *       speechClient.longRunningRecognizeAsync(config, audio).get();
305    * }
306    * }</pre>
307    *
308    * @param config Required. Provides information to the recognizer that specifies how to process
309    *     the request.
310    * @param audio Required. The audio data to be recognized.
311    * @throws com.google.api.gax.rpc.ApiException if the remote call fails
312    */
313   public final OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata>
longRunningRecognizeAsync(RecognitionConfig config, RecognitionAudio audio)314       longRunningRecognizeAsync(RecognitionConfig config, RecognitionAudio audio) {
315     LongRunningRecognizeRequest request =
316         LongRunningRecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build();
317     return longRunningRecognizeAsync(request);
318   }
319 
320   // AUTO-GENERATED DOCUMENTATION AND METHOD.
321   /**
322    * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations
323    * interface. Returns either an `Operation.error` or an `Operation.response` which contains a
324    * `LongRunningRecognizeResponse` message. For more information on asynchronous speech
325    * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
326    *
327    * <p>Sample code:
328    *
329    * <pre>{@code
330    * // This snippet has been automatically generated and should be regarded as a code template only.
331    * // It will require modifications to work:
332    * // - It may require correct/in-range values for request initialization.
333    * // - It may require specifying regional endpoints when creating the service client as shown in
334    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
335    * try (SpeechClient speechClient = SpeechClient.create()) {
336    *   LongRunningRecognizeRequest request =
337    *       LongRunningRecognizeRequest.newBuilder()
338    *           .setConfig(RecognitionConfig.newBuilder().build())
339    *           .setAudio(RecognitionAudio.newBuilder().build())
340    *           .setOutputConfig(TranscriptOutputConfig.newBuilder().build())
341    *           .build();
342    *   LongRunningRecognizeResponse response = speechClient.longRunningRecognizeAsync(request).get();
343    * }
344    * }</pre>
345    *
346    * @param request The request object containing all of the parameters for the API call.
347    * @throws com.google.api.gax.rpc.ApiException if the remote call fails
348    */
349   public final OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata>
longRunningRecognizeAsync(LongRunningRecognizeRequest request)350       longRunningRecognizeAsync(LongRunningRecognizeRequest request) {
351     return longRunningRecognizeOperationCallable().futureCall(request);
352   }
353 
354   // AUTO-GENERATED DOCUMENTATION AND METHOD.
355   /**
356    * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations
357    * interface. Returns either an `Operation.error` or an `Operation.response` which contains a
358    * `LongRunningRecognizeResponse` message. For more information on asynchronous speech
359    * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
360    *
361    * <p>Sample code:
362    *
363    * <pre>{@code
364    * // This snippet has been automatically generated and should be regarded as a code template only.
365    * // It will require modifications to work:
366    * // - It may require correct/in-range values for request initialization.
367    * // - It may require specifying regional endpoints when creating the service client as shown in
368    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
369    * try (SpeechClient speechClient = SpeechClient.create()) {
370    *   LongRunningRecognizeRequest request =
371    *       LongRunningRecognizeRequest.newBuilder()
372    *           .setConfig(RecognitionConfig.newBuilder().build())
373    *           .setAudio(RecognitionAudio.newBuilder().build())
374    *           .setOutputConfig(TranscriptOutputConfig.newBuilder().build())
375    *           .build();
376    *   OperationFuture<LongRunningRecognizeResponse, LongRunningRecognizeMetadata> future =
377    *       speechClient.longRunningRecognizeOperationCallable().futureCall(request);
378    *   // Do something.
379    *   LongRunningRecognizeResponse response = future.get();
380    * }
381    * }</pre>
382    */
383   public final OperationCallable<
384           LongRunningRecognizeRequest, LongRunningRecognizeResponse, LongRunningRecognizeMetadata>
longRunningRecognizeOperationCallable()385       longRunningRecognizeOperationCallable() {
386     return stub.longRunningRecognizeOperationCallable();
387   }
388 
389   // AUTO-GENERATED DOCUMENTATION AND METHOD.
390   /**
391    * Performs asynchronous speech recognition: receive results via the google.longrunning.Operations
392    * interface. Returns either an `Operation.error` or an `Operation.response` which contains a
393    * `LongRunningRecognizeResponse` message. For more information on asynchronous speech
394    * recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
395    *
396    * <p>Sample code:
397    *
398    * <pre>{@code
399    * // This snippet has been automatically generated and should be regarded as a code template only.
400    * // It will require modifications to work:
401    * // - It may require correct/in-range values for request initialization.
402    * // - It may require specifying regional endpoints when creating the service client as shown in
403    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
404    * try (SpeechClient speechClient = SpeechClient.create()) {
405    *   LongRunningRecognizeRequest request =
406    *       LongRunningRecognizeRequest.newBuilder()
407    *           .setConfig(RecognitionConfig.newBuilder().build())
408    *           .setAudio(RecognitionAudio.newBuilder().build())
409    *           .setOutputConfig(TranscriptOutputConfig.newBuilder().build())
410    *           .build();
411    *   ApiFuture<Operation> future = speechClient.longRunningRecognizeCallable().futureCall(request);
412    *   // Do something.
413    *   Operation response = future.get();
414    * }
415    * }</pre>
416    */
417   public final UnaryCallable<LongRunningRecognizeRequest, Operation>
longRunningRecognizeCallable()418       longRunningRecognizeCallable() {
419     return stub.longRunningRecognizeCallable();
420   }
421 
422   // AUTO-GENERATED DOCUMENTATION AND METHOD.
423   /**
424    * Performs bidirectional streaming speech recognition: receive results while sending audio. This
425    * method is only available via the gRPC API (not REST).
426    *
427    * <p>Sample code:
428    *
429    * <pre>{@code
430    * // This snippet has been automatically generated and should be regarded as a code template only.
431    * // It will require modifications to work:
432    * // - It may require correct/in-range values for request initialization.
433    * // - It may require specifying regional endpoints when creating the service client as shown in
434    * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
435    * try (SpeechClient speechClient = SpeechClient.create()) {
436    *   BidiStream<StreamingRecognizeRequest, StreamingRecognizeResponse> bidiStream =
437    *       speechClient.streamingRecognizeCallable().call();
438    *   StreamingRecognizeRequest request = StreamingRecognizeRequest.newBuilder().build();
439    *   bidiStream.send(request);
440    *   for (StreamingRecognizeResponse response : bidiStream) {
441    *     // Do something when a response is received.
442    *   }
443    * }
444    * }</pre>
445    */
446   public final BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse>
streamingRecognizeCallable()447       streamingRecognizeCallable() {
448     return stub.streamingRecognizeCallable();
449   }
450 
451   @Override
close()452   public final void close() {
453     stub.close();
454   }
455 
456   @Override
shutdown()457   public void shutdown() {
458     stub.shutdown();
459   }
460 
461   @Override
isShutdown()462   public boolean isShutdown() {
463     return stub.isShutdown();
464   }
465 
466   @Override
isTerminated()467   public boolean isTerminated() {
468     return stub.isTerminated();
469   }
470 
471   @Override
shutdownNow()472   public void shutdownNow() {
473     stub.shutdownNow();
474   }
475 
476   @Override
awaitTermination(long duration, TimeUnit unit)477   public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException {
478     return stub.awaitTermination(duration, unit);
479   }
480 }
481