• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // Generated by the protocol buffer compiler.  DO NOT EDIT!
17 // source: google/cloud/vision/v1/image_annotator.proto
18 
19 package com.google.cloud.vision.v1;
20 
21 public interface FaceAnnotationOrBuilder
22     extends
23     // @@protoc_insertion_point(interface_extends:google.cloud.vision.v1.FaceAnnotation)
24     com.google.protobuf.MessageOrBuilder {
25 
26   /**
27    *
28    *
29    * <pre>
30    * The bounding polygon around the face. The coordinates of the bounding box
31    * are in the original image's scale.
32    * The bounding box is computed to "frame" the face in accordance with human
33    * expectations. It is based on the landmarker results.
34    * Note that one or more x and/or y coordinates may not be generated in the
35    * `BoundingPoly` (the polygon will be unbounded) if only a partial face
36    * appears in the image to be annotated.
37    * </pre>
38    *
39    * <code>.google.cloud.vision.v1.BoundingPoly bounding_poly = 1;</code>
40    *
41    * @return Whether the boundingPoly field is set.
42    */
hasBoundingPoly()43   boolean hasBoundingPoly();
44   /**
45    *
46    *
47    * <pre>
48    * The bounding polygon around the face. The coordinates of the bounding box
49    * are in the original image's scale.
50    * The bounding box is computed to "frame" the face in accordance with human
51    * expectations. It is based on the landmarker results.
52    * Note that one or more x and/or y coordinates may not be generated in the
53    * `BoundingPoly` (the polygon will be unbounded) if only a partial face
54    * appears in the image to be annotated.
55    * </pre>
56    *
57    * <code>.google.cloud.vision.v1.BoundingPoly bounding_poly = 1;</code>
58    *
59    * @return The boundingPoly.
60    */
getBoundingPoly()61   com.google.cloud.vision.v1.BoundingPoly getBoundingPoly();
62   /**
63    *
64    *
65    * <pre>
66    * The bounding polygon around the face. The coordinates of the bounding box
67    * are in the original image's scale.
68    * The bounding box is computed to "frame" the face in accordance with human
69    * expectations. It is based on the landmarker results.
70    * Note that one or more x and/or y coordinates may not be generated in the
71    * `BoundingPoly` (the polygon will be unbounded) if only a partial face
72    * appears in the image to be annotated.
73    * </pre>
74    *
75    * <code>.google.cloud.vision.v1.BoundingPoly bounding_poly = 1;</code>
76    */
getBoundingPolyOrBuilder()77   com.google.cloud.vision.v1.BoundingPolyOrBuilder getBoundingPolyOrBuilder();
78 
79   /**
80    *
81    *
82    * <pre>
83    * The `fd_bounding_poly` bounding polygon is tighter than the
84    * `boundingPoly`, and encloses only the skin part of the face. Typically, it
85    * is used to eliminate the face from any image analysis that detects the
86    * "amount of skin" visible in an image. It is not based on the
87    * landmarker results, only on the initial face detection, hence
88    * the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
89    * </pre>
90    *
91    * <code>.google.cloud.vision.v1.BoundingPoly fd_bounding_poly = 2;</code>
92    *
93    * @return Whether the fdBoundingPoly field is set.
94    */
hasFdBoundingPoly()95   boolean hasFdBoundingPoly();
96   /**
97    *
98    *
99    * <pre>
100    * The `fd_bounding_poly` bounding polygon is tighter than the
101    * `boundingPoly`, and encloses only the skin part of the face. Typically, it
102    * is used to eliminate the face from any image analysis that detects the
103    * "amount of skin" visible in an image. It is not based on the
104    * landmarker results, only on the initial face detection, hence
105    * the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
106    * </pre>
107    *
108    * <code>.google.cloud.vision.v1.BoundingPoly fd_bounding_poly = 2;</code>
109    *
110    * @return The fdBoundingPoly.
111    */
getFdBoundingPoly()112   com.google.cloud.vision.v1.BoundingPoly getFdBoundingPoly();
113   /**
114    *
115    *
116    * <pre>
117    * The `fd_bounding_poly` bounding polygon is tighter than the
118    * `boundingPoly`, and encloses only the skin part of the face. Typically, it
119    * is used to eliminate the face from any image analysis that detects the
120    * "amount of skin" visible in an image. It is not based on the
121    * landmarker results, only on the initial face detection, hence
122    * the &lt;code&gt;fd&lt;/code&gt; (face detection) prefix.
123    * </pre>
124    *
125    * <code>.google.cloud.vision.v1.BoundingPoly fd_bounding_poly = 2;</code>
126    */
getFdBoundingPolyOrBuilder()127   com.google.cloud.vision.v1.BoundingPolyOrBuilder getFdBoundingPolyOrBuilder();
128 
129   /**
130    *
131    *
132    * <pre>
133    * Detected face landmarks.
134    * </pre>
135    *
136    * <code>repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3;</code>
137    */
getLandmarksList()138   java.util.List<com.google.cloud.vision.v1.FaceAnnotation.Landmark> getLandmarksList();
139   /**
140    *
141    *
142    * <pre>
143    * Detected face landmarks.
144    * </pre>
145    *
146    * <code>repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3;</code>
147    */
getLandmarks(int index)148   com.google.cloud.vision.v1.FaceAnnotation.Landmark getLandmarks(int index);
149   /**
150    *
151    *
152    * <pre>
153    * Detected face landmarks.
154    * </pre>
155    *
156    * <code>repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3;</code>
157    */
getLandmarksCount()158   int getLandmarksCount();
159   /**
160    *
161    *
162    * <pre>
163    * Detected face landmarks.
164    * </pre>
165    *
166    * <code>repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3;</code>
167    */
168   java.util.List<? extends com.google.cloud.vision.v1.FaceAnnotation.LandmarkOrBuilder>
getLandmarksOrBuilderList()169       getLandmarksOrBuilderList();
170   /**
171    *
172    *
173    * <pre>
174    * Detected face landmarks.
175    * </pre>
176    *
177    * <code>repeated .google.cloud.vision.v1.FaceAnnotation.Landmark landmarks = 3;</code>
178    */
getLandmarksOrBuilder(int index)179   com.google.cloud.vision.v1.FaceAnnotation.LandmarkOrBuilder getLandmarksOrBuilder(int index);
180 
181   /**
182    *
183    *
184    * <pre>
185    * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
186    * of the face relative to the image vertical about the axis perpendicular to
187    * the face. Range [-180,180].
188    * </pre>
189    *
190    * <code>float roll_angle = 4;</code>
191    *
192    * @return The rollAngle.
193    */
getRollAngle()194   float getRollAngle();
195 
196   /**
197    *
198    *
199    * <pre>
200    * Yaw angle, which indicates the leftward/rightward angle that the face is
201    * pointing relative to the vertical plane perpendicular to the image. Range
202    * [-180,180].
203    * </pre>
204    *
205    * <code>float pan_angle = 5;</code>
206    *
207    * @return The panAngle.
208    */
getPanAngle()209   float getPanAngle();
210 
211   /**
212    *
213    *
214    * <pre>
215    * Pitch angle, which indicates the upwards/downwards angle that the face is
216    * pointing relative to the image's horizontal plane. Range [-180,180].
217    * </pre>
218    *
219    * <code>float tilt_angle = 6;</code>
220    *
221    * @return The tiltAngle.
222    */
getTiltAngle()223   float getTiltAngle();
224 
225   /**
226    *
227    *
228    * <pre>
229    * Detection confidence. Range [0, 1].
230    * </pre>
231    *
232    * <code>float detection_confidence = 7;</code>
233    *
234    * @return The detectionConfidence.
235    */
getDetectionConfidence()236   float getDetectionConfidence();
237 
238   /**
239    *
240    *
241    * <pre>
242    * Face landmarking confidence. Range [0, 1].
243    * </pre>
244    *
245    * <code>float landmarking_confidence = 8;</code>
246    *
247    * @return The landmarkingConfidence.
248    */
getLandmarkingConfidence()249   float getLandmarkingConfidence();
250 
251   /**
252    *
253    *
254    * <pre>
255    * Joy likelihood.
256    * </pre>
257    *
258    * <code>.google.cloud.vision.v1.Likelihood joy_likelihood = 9;</code>
259    *
260    * @return The enum numeric value on the wire for joyLikelihood.
261    */
getJoyLikelihoodValue()262   int getJoyLikelihoodValue();
263   /**
264    *
265    *
266    * <pre>
267    * Joy likelihood.
268    * </pre>
269    *
270    * <code>.google.cloud.vision.v1.Likelihood joy_likelihood = 9;</code>
271    *
272    * @return The joyLikelihood.
273    */
getJoyLikelihood()274   com.google.cloud.vision.v1.Likelihood getJoyLikelihood();
275 
276   /**
277    *
278    *
279    * <pre>
280    * Sorrow likelihood.
281    * </pre>
282    *
283    * <code>.google.cloud.vision.v1.Likelihood sorrow_likelihood = 10;</code>
284    *
285    * @return The enum numeric value on the wire for sorrowLikelihood.
286    */
getSorrowLikelihoodValue()287   int getSorrowLikelihoodValue();
288   /**
289    *
290    *
291    * <pre>
292    * Sorrow likelihood.
293    * </pre>
294    *
295    * <code>.google.cloud.vision.v1.Likelihood sorrow_likelihood = 10;</code>
296    *
297    * @return The sorrowLikelihood.
298    */
getSorrowLikelihood()299   com.google.cloud.vision.v1.Likelihood getSorrowLikelihood();
300 
301   /**
302    *
303    *
304    * <pre>
305    * Anger likelihood.
306    * </pre>
307    *
308    * <code>.google.cloud.vision.v1.Likelihood anger_likelihood = 11;</code>
309    *
310    * @return The enum numeric value on the wire for angerLikelihood.
311    */
getAngerLikelihoodValue()312   int getAngerLikelihoodValue();
313   /**
314    *
315    *
316    * <pre>
317    * Anger likelihood.
318    * </pre>
319    *
320    * <code>.google.cloud.vision.v1.Likelihood anger_likelihood = 11;</code>
321    *
322    * @return The angerLikelihood.
323    */
getAngerLikelihood()324   com.google.cloud.vision.v1.Likelihood getAngerLikelihood();
325 
326   /**
327    *
328    *
329    * <pre>
330    * Surprise likelihood.
331    * </pre>
332    *
333    * <code>.google.cloud.vision.v1.Likelihood surprise_likelihood = 12;</code>
334    *
335    * @return The enum numeric value on the wire for surpriseLikelihood.
336    */
getSurpriseLikelihoodValue()337   int getSurpriseLikelihoodValue();
338   /**
339    *
340    *
341    * <pre>
342    * Surprise likelihood.
343    * </pre>
344    *
345    * <code>.google.cloud.vision.v1.Likelihood surprise_likelihood = 12;</code>
346    *
347    * @return The surpriseLikelihood.
348    */
getSurpriseLikelihood()349   com.google.cloud.vision.v1.Likelihood getSurpriseLikelihood();
350 
351   /**
352    *
353    *
354    * <pre>
355    * Under-exposed likelihood.
356    * </pre>
357    *
358    * <code>.google.cloud.vision.v1.Likelihood under_exposed_likelihood = 13;</code>
359    *
360    * @return The enum numeric value on the wire for underExposedLikelihood.
361    */
getUnderExposedLikelihoodValue()362   int getUnderExposedLikelihoodValue();
363   /**
364    *
365    *
366    * <pre>
367    * Under-exposed likelihood.
368    * </pre>
369    *
370    * <code>.google.cloud.vision.v1.Likelihood under_exposed_likelihood = 13;</code>
371    *
372    * @return The underExposedLikelihood.
373    */
getUnderExposedLikelihood()374   com.google.cloud.vision.v1.Likelihood getUnderExposedLikelihood();
375 
376   /**
377    *
378    *
379    * <pre>
380    * Blurred likelihood.
381    * </pre>
382    *
383    * <code>.google.cloud.vision.v1.Likelihood blurred_likelihood = 14;</code>
384    *
385    * @return The enum numeric value on the wire for blurredLikelihood.
386    */
getBlurredLikelihoodValue()387   int getBlurredLikelihoodValue();
388   /**
389    *
390    *
391    * <pre>
392    * Blurred likelihood.
393    * </pre>
394    *
395    * <code>.google.cloud.vision.v1.Likelihood blurred_likelihood = 14;</code>
396    *
397    * @return The blurredLikelihood.
398    */
getBlurredLikelihood()399   com.google.cloud.vision.v1.Likelihood getBlurredLikelihood();
400 
401   /**
402    *
403    *
404    * <pre>
405    * Headwear likelihood.
406    * </pre>
407    *
408    * <code>.google.cloud.vision.v1.Likelihood headwear_likelihood = 15;</code>
409    *
410    * @return The enum numeric value on the wire for headwearLikelihood.
411    */
getHeadwearLikelihoodValue()412   int getHeadwearLikelihoodValue();
413   /**
414    *
415    *
416    * <pre>
417    * Headwear likelihood.
418    * </pre>
419    *
420    * <code>.google.cloud.vision.v1.Likelihood headwear_likelihood = 15;</code>
421    *
422    * @return The headwearLikelihood.
423    */
getHeadwearLikelihood()424   com.google.cloud.vision.v1.Likelihood getHeadwearLikelihood();
425 }
426