• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.graphics;
18 
19 import android.annotation.IntDef;
20 
21 import java.lang.annotation.Retention;
22 import java.lang.annotation.RetentionPolicy;
23 
24 public class ImageFormat {
25      /** @hide */
26      @Retention(RetentionPolicy.SOURCE)
27      @IntDef(value = {
28              UNKNOWN,
29              RGB_565,
30              YV12,
31              Y8,
32              Y16,
33              NV16,
34              NV21,
35              YUY2,
36              JPEG,
37              DEPTH_JPEG,
38              YUV_420_888,
39              YUV_422_888,
40              YUV_444_888,
41              FLEX_RGB_888,
42              FLEX_RGBA_8888,
43              RAW_SENSOR,
44              RAW_PRIVATE,
45              RAW10,
46              RAW12,
47              DEPTH16,
48              DEPTH_POINT_CLOUD,
49              RAW_DEPTH,
50              RAW_DEPTH10,
51              PRIVATE,
52              HEIC
53      })
54      public @interface Format {
55      }
56 
57     /*
58      * these constants are chosen to be binary compatible with their previous
59      * location in PixelFormat.java
60      */
61 
62     public static final int UNKNOWN = 0;
63 
64     /**
65      * RGB format used for pictures encoded as RGB_565. See
66      * {@link android.hardware.Camera.Parameters#setPictureFormat(int)}.
67      */
68     public static final int RGB_565 = 4;
69 
70     /**
71      * <p>Android YUV format.</p>
72      *
73      * <p>This format is exposed to software decoders and applications.</p>
74      *
75      * <p>YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
76      * by (W/2) x (H/2) Cr and Cb planes.</p>
77      *
78      * <p>This format assumes
79      * <ul>
80      * <li>an even width</li>
81      * <li>an even height</li>
82      * <li>a horizontal stride multiple of 16 pixels</li>
83      * <li>a vertical stride equal to the height</li>
84      * </ul>
85      * </p>
86      *
87      * <pre> y_size = stride * height
88      * c_stride = ALIGN(stride/2, 16)
89      * c_size = c_stride * height/2
90      * size = y_size + c_size * 2
91      * cr_offset = y_size
92      * cb_offset = y_size + c_size</pre>
93      *
94      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
95      * recommended for YUV output instead.</p>
96      *
97      * <p>For the older camera API, this format is guaranteed to be supported for
98      * {@link android.hardware.Camera} preview images since API level 12; for earlier API versions,
99      * check {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
100      *
101      * <p>Note that for camera preview callback use (see
102      * {@link android.hardware.Camera#setPreviewCallback}), the
103      * <var>stride</var> value is the smallest possible; that is, it is equal
104      * to:
105      *
106      * <pre>stride = ALIGN(width, 16)</pre>
107      *
108      * @see android.hardware.Camera.Parameters#setPreviewCallback
109      * @see android.hardware.Camera.Parameters#setPreviewFormat
110      * @see android.hardware.Camera.Parameters#getSupportedPreviewFormats
111      * </p>
112      */
113     public static final int YV12 = 0x32315659;
114 
115     /**
116      * <p>Android Y8 format.</p>
117      *
118      * <p>Y8 is a YUV planar format comprised of a WxH Y plane only, with each pixel
119      * being represented by 8 bits. It is equivalent to just the Y plane from {@link #YV12}
120      * format.</p>
121      *
122      * <p>This format assumes
123      * <ul>
124      * <li>an even width</li>
125      * <li>an even height</li>
126      * <li>a horizontal stride multiple of 16 pixels</li>
127      * </ul>
128      * </p>
129      *
130      * <pre> size = stride * height </pre>
131      *
132      * <p>For example, the {@link android.media.Image} object can provide data
133      * in this format from a {@link android.hardware.camera2.CameraDevice} (if
134      * supported) through a {@link android.media.ImageReader} object. The
135      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
136      * single plane containing the pixel data. The pixel stride is always 1 in
137      * {@link android.media.Image.Plane#getPixelStride()}, and the
138      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
139      * neighboring pixel distance (in bytes) between adjacent rows.</p>
140      *
141      * @see android.media.Image
142      * @see android.media.ImageReader
143      * @see android.hardware.camera2.CameraDevice
144      */
145     public static final int Y8 = 0x20203859;
146 
147     /**
148      * <p>Android Y16 format.</p>
149      *
150      * Y16 is a YUV planar format comprised of a WxH Y plane, with each pixel
151      * being represented by 16 bits. It is just like {@link #Y8}, but has 16
152      * bits per pixel (little endian).</p>
153      *
154      * <p>This format assumes
155      * <ul>
156      * <li>an even width</li>
157      * <li>an even height</li>
158      * <li>a horizontal stride multiple of 16 pixels</li>
159      * </ul>
160      * </p>
161      *
162      * <pre> y_size = stride * height </pre>
163      *
164      * <p>For example, the {@link android.media.Image} object can provide data
165      * in this format from a {@link android.hardware.camera2.CameraDevice}
166      * through a {@link android.media.ImageReader} object if this format is
167      * supported by {@link android.hardware.camera2.CameraDevice}.</p>
168      *
169      * @see android.media.Image
170      * @see android.media.ImageReader
171      * @see android.hardware.camera2.CameraDevice
172      *
173      * @hide
174      */
175     public static final int Y16 = 0x20363159;
176 
177     /**
178      * <p>Android YUV P010 format.</p>
179      *
180      * P010 is a 4:2:0 YCbCr semiplanar format comprised of a WxH Y plane
181      * followed immediately by a Wx(H/2) CbCr plane. Each sample is
182      * represented by a 16-bit little-endian value, with the lower 6 bits set
183      * to zero.
184      *
185      * <p>This format assumes
186      * <ul>
187      * <li>an even height</li>
188      * <li>a vertical stride equal to the height</li>
189      * </ul>
190      * </p>
191      *
192      * <pre>   stride_in_bytes = stride * 2 </pre>
193      * <pre>   y_size = stride_in_bytes * height </pre>
194      * <pre>   cbcr_size = stride_in_bytes * (height / 2) </pre>
195      * <pre>   cb_offset = y_size </pre>
196      * <pre>   cr_offset = cb_offset + 2 </pre>
197      *
198      * <p>For example, the {@link android.media.Image} object can provide data
199      * in this format from a {@link android.hardware.camera2.CameraDevice}
200      * through a {@link android.media.ImageReader} object if this format is
201      * supported by {@link android.hardware.camera2.CameraDevice}.</p>
202      *
203      * @see android.media.Image
204      * @see android.media.ImageReader
205      * @see android.hardware.camera2.CameraDevice
206      *
207      */
208     public static final int YCBCR_P010 = 0x36;
209 
210     /**
211      * YCbCr format, used for video.
212      *
213      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
214      * recommended for YUV output instead.</p>
215      *
216      * <p>Whether this format is supported by the old camera API can be determined by
217      * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
218      *
219      */
220     public static final int NV16 = 0x10;
221 
222     /**
223      * YCrCb format used for images, which uses the NV21 encoding format.
224      *
225      * <p>This is the default format
226      * for {@link android.hardware.Camera} preview images, when not otherwise set with
227      * {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}.</p>
228      *
229      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
230      * recommended for YUV output instead.</p>
231      */
232     public static final int NV21 = 0x11;
233 
234     /**
235      * YCbCr format used for images, which uses YUYV (YUY2) encoding format.
236      *
237      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
238      * recommended for YUV output instead.</p>
239      *
240      * <p>This is an alternative format for {@link android.hardware.Camera} preview images. Whether
241      * this format is supported by the camera hardware can be determined by
242      * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
243      */
244     public static final int YUY2 = 0x14;
245 
246     /**
247      * Compressed JPEG format.
248      *
249      * <p>This format is always supported as an output format for the
250      * {@link android.hardware.camera2} API, and as a picture format for the older
251      * {@link android.hardware.Camera} API</p>
252      */
253     public static final int JPEG = 0x100;
254 
255     /**
256      * Depth augmented compressed JPEG format.
257      *
258      * <p>JPEG compressed main image along with XMP embedded depth metadata
259      * following ISO 16684-1:2011(E).</p>
260      */
261     public static final int DEPTH_JPEG = 0x69656963;
262 
263     /**
264      * <p>Multi-plane Android YUV 420 format</p>
265      *
266      * <p>This format is a generic YCbCr format, capable of describing any 4:2:0
267      * chroma-subsampled planar or semiplanar buffer (but not fully interleaved),
268      * with 8 bits per color sample.</p>
269      *
270      * <p>Images in this format are always represented by three separate buffers
271      * of data, one for each color plane. Additional information always
272      * accompanies the buffers, describing the row stride and the pixel stride
273      * for each plane.</p>
274      *
275      * <p>The order of planes in the array returned by
276      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
277      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
278      *
279      * <p>The Y-plane is guaranteed not to be interleaved with the U/V planes
280      * (in particular, pixel stride is always 1 in
281      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}).</p>
282      *
283      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
284      * (in particular,
285      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
286      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
287      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
288      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
289      * ).</p>
290      *
291      * <p>For example, the {@link android.media.Image} object can provide data
292      * in this format from a {@link android.hardware.camera2.CameraDevice}
293      * through a {@link android.media.ImageReader} object.</p>
294      *
295      * @see android.media.Image
296      * @see android.media.ImageReader
297      * @see android.hardware.camera2.CameraDevice
298      */
299     public static final int YUV_420_888 = 0x23;
300 
301     /**
302      * <p>Multi-plane Android YUV 422 format</p>
303      *
304      * <p>This format is a generic YCbCr format, capable of describing any 4:2:2
305      * chroma-subsampled (planar, semiplanar or interleaved) format,
306      * with 8 bits per color sample.</p>
307      *
308      * <p>Images in this format are always represented by three separate buffers
309      * of data, one for each color plane. Additional information always
310      * accompanies the buffers, describing the row stride and the pixel stride
311      * for each plane.</p>
312      *
313      * <p>The order of planes in the array returned by
314      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
315      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
316      *
317      * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel
318      * stride greater than 1 in
319      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p>
320      *
321      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
322      * (in particular,
323      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
324      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
325      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
326      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
327      * ).</p>
328      *
329      * <p>For example, the {@link android.media.Image} object can provide data
330      * in this format from a {@link android.media.MediaCodec}
331      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
332      *
333      * @see android.media.Image
334      * @see android.media.MediaCodec
335      */
336     public static final int YUV_422_888 = 0x27;
337 
338     /**
339      * <p>Multi-plane Android YUV 444 format</p>
340      *
341      * <p>This format is a generic YCbCr format, capable of describing any 4:4:4
342      * (planar, semiplanar or interleaved) format,
343      * with 8 bits per color sample.</p>
344      *
345      * <p>Images in this format are always represented by three separate buffers
346      * of data, one for each color plane. Additional information always
347      * accompanies the buffers, describing the row stride and the pixel stride
348      * for each plane.</p>
349      *
350      * <p>The order of planes in the array returned by
351      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
352      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
353      *
354      * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel
355      * stride greater than 1 in
356      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p>
357      *
358      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
359      * (in particular,
360      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
361      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
362      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
363      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
364      * ).</p>
365      *
366      * <p>For example, the {@link android.media.Image} object can provide data
367      * in this format from a {@link android.media.MediaCodec}
368      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
369      *
370      * @see android.media.Image
371      * @see android.media.MediaCodec
372      */
373     public static final int YUV_444_888 = 0x28;
374 
375     /**
376      * <p>Multi-plane Android RGB format</p>
377      *
378      * <p>This format is a generic RGB format, capable of describing most RGB formats,
379      * with 8 bits per color sample.</p>
380      *
381      * <p>Images in this format are always represented by three separate buffers
382      * of data, one for each color plane. Additional information always
383      * accompanies the buffers, describing the row stride and the pixel stride
384      * for each plane.</p>
385      *
386      * <p>The order of planes in the array returned by
387      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
388      * plane #0 is always R (red), plane #1 is always G (green), and plane #2 is always B
389      * (blue).</p>
390      *
391      * <p>All three planes are guaranteed to have the same row strides and pixel strides.</p>
392      *
393      * <p>For example, the {@link android.media.Image} object can provide data
394      * in this format from a {@link android.media.MediaCodec}
395      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
396      *
397      * @see android.media.Image
398      * @see android.media.MediaCodec
399      */
400     public static final int FLEX_RGB_888 = 0x29;
401 
402     /**
403      * <p>Multi-plane Android RGBA format</p>
404      *
405      * <p>This format is a generic RGBA format, capable of describing most RGBA formats,
406      * with 8 bits per color sample.</p>
407      *
408      * <p>Images in this format are always represented by four separate buffers
409      * of data, one for each color plane. Additional information always
410      * accompanies the buffers, describing the row stride and the pixel stride
411      * for each plane.</p>
412      *
413      * <p>The order of planes in the array returned by
414      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
415      * plane #0 is always R (red), plane #1 is always G (green), plane #2 is always B (blue),
416      * and plane #3 is always A (alpha). This format may represent pre-multiplied or
417      * non-premultiplied alpha.</p>
418      *
419      * <p>All four planes are guaranteed to have the same row strides and pixel strides.</p>
420      *
421      * <p>For example, the {@link android.media.Image} object can provide data
422      * in this format from a {@link android.media.MediaCodec}
423      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
424      *
425      * @see android.media.Image
426      * @see android.media.MediaCodec
427      */
428     public static final int FLEX_RGBA_8888 = 0x2A;
429 
430     /**
431      * <p>General raw camera sensor image format, usually representing a
432      * single-channel Bayer-mosaic image. Each pixel color sample is stored with
433      * 16 bits of precision.</p>
434      *
435      * <p>The layout of the color mosaic, the maximum and minimum encoding
436      * values of the raw pixel data, the color space of the image, and all other
437      * needed information to interpret a raw sensor image must be queried from
438      * the {@link android.hardware.camera2.CameraDevice} which produced the
439      * image.</p>
440      */
441     public static final int RAW_SENSOR = 0x20;
442 
443     /**
444      * <p>Private raw camera sensor image format, a single channel image with
445      * implementation depedent pixel layout.</p>
446      *
447      * <p>RAW_PRIVATE is a format for unprocessed raw image buffers coming from an
448      * image sensor. The actual structure of buffers of this format is
449      * implementation-dependent.</p>
450      *
451      */
452     public static final int RAW_PRIVATE = 0x24;
453 
454     /**
455      * <p>
456      * Android 10-bit raw format
457      * </p>
458      * <p>
459      * This is a single-plane, 10-bit per pixel, densely packed (in each row),
460      * unprocessed format, usually representing raw Bayer-pattern images coming
461      * from an image sensor.
462      * </p>
463      * <p>
464      * In an image buffer with this format, starting from the first pixel of
465      * each row, each 4 consecutive pixels are packed into 5 bytes (40 bits).
466      * Each one of the first 4 bytes contains the top 8 bits of each pixel, The
467      * fifth byte contains the 2 least significant bits of the 4 pixels, the
468      * exact layout data for each 4 consecutive pixels is illustrated below
469      * ({@code Pi[j]} stands for the jth bit of the ith pixel):
470      * </p>
471      * <table>
472      * <thead>
473      * <tr>
474      * <th align="center"></th>
475      * <th align="center">bit 7</th>
476      * <th align="center">bit 6</th>
477      * <th align="center">bit 5</th>
478      * <th align="center">bit 4</th>
479      * <th align="center">bit 3</th>
480      * <th align="center">bit 2</th>
481      * <th align="center">bit 1</th>
482      * <th align="center">bit 0</th>
483      * </tr>
484      * </thead> <tbody>
485      * <tr>
486      * <td align="center">Byte 0:</td>
487      * <td align="center">P0[9]</td>
488      * <td align="center">P0[8]</td>
489      * <td align="center">P0[7]</td>
490      * <td align="center">P0[6]</td>
491      * <td align="center">P0[5]</td>
492      * <td align="center">P0[4]</td>
493      * <td align="center">P0[3]</td>
494      * <td align="center">P0[2]</td>
495      * </tr>
496      * <tr>
497      * <td align="center">Byte 1:</td>
498      * <td align="center">P1[9]</td>
499      * <td align="center">P1[8]</td>
500      * <td align="center">P1[7]</td>
501      * <td align="center">P1[6]</td>
502      * <td align="center">P1[5]</td>
503      * <td align="center">P1[4]</td>
504      * <td align="center">P1[3]</td>
505      * <td align="center">P1[2]</td>
506      * </tr>
507      * <tr>
508      * <td align="center">Byte 2:</td>
509      * <td align="center">P2[9]</td>
510      * <td align="center">P2[8]</td>
511      * <td align="center">P2[7]</td>
512      * <td align="center">P2[6]</td>
513      * <td align="center">P2[5]</td>
514      * <td align="center">P2[4]</td>
515      * <td align="center">P2[3]</td>
516      * <td align="center">P2[2]</td>
517      * </tr>
518      * <tr>
519      * <td align="center">Byte 3:</td>
520      * <td align="center">P3[9]</td>
521      * <td align="center">P3[8]</td>
522      * <td align="center">P3[7]</td>
523      * <td align="center">P3[6]</td>
524      * <td align="center">P3[5]</td>
525      * <td align="center">P3[4]</td>
526      * <td align="center">P3[3]</td>
527      * <td align="center">P3[2]</td>
528      * </tr>
529      * <tr>
530      * <td align="center">Byte 4:</td>
531      * <td align="center">P3[1]</td>
532      * <td align="center">P3[0]</td>
533      * <td align="center">P2[1]</td>
534      * <td align="center">P2[0]</td>
535      * <td align="center">P1[1]</td>
536      * <td align="center">P1[0]</td>
537      * <td align="center">P0[1]</td>
538      * <td align="center">P0[0]</td>
539      * </tr>
540      * </tbody>
541      * </table>
542      * <p>
543      * This format assumes
544      * <ul>
545      * <li>a width multiple of 4 pixels</li>
546      * <li>an even height</li>
547      * </ul>
548      * </p>
549      *
550      * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
551      * not pixels.
552      *
553      * <p>
554      * Since this is a densely packed format, the pixel stride is always 0. The
555      * application must use the pixel data layout defined in above table to
556      * access each row data. When row stride is equal to {@code width * (10 / 8)}, there
557      * will be no padding bytes at the end of each row, the entire image data is
558      * densely packed. When stride is larger than {@code width * (10 / 8)}, padding
559      * bytes will be present at the end of each row.
560      * </p>
561      * <p>
562      * For example, the {@link android.media.Image} object can provide data in
563      * this format from a {@link android.hardware.camera2.CameraDevice} (if
564      * supported) through a {@link android.media.ImageReader} object. The
565      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
566      * single plane containing the pixel data. The pixel stride is always 0 in
567      * {@link android.media.Image.Plane#getPixelStride()}, and the
568      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
569      * neighboring pixel distance (in bytes) between adjacent rows.
570      * </p>
571      *
572      * @see android.media.Image
573      * @see android.media.ImageReader
574      * @see android.hardware.camera2.CameraDevice
575      */
576     public static final int RAW10 = 0x25;
577 
578     /**
579      * <p>
580      * Android 12-bit raw format
581      * </p>
582      * <p>
583      * This is a single-plane, 12-bit per pixel, densely packed (in each row),
584      * unprocessed format, usually representing raw Bayer-pattern images coming
585      * from an image sensor.
586      * </p>
587      * <p>
588      * In an image buffer with this format, starting from the first pixel of each
589      * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first
590      * and second byte contains the top 8 bits of first and second pixel. The third
591      * byte contains the 4 least significant bits of the two pixels, the exact layout
592      * data for each two consecutive pixels is illustrated below (Pi[j] stands for
593      * the jth bit of the ith pixel):
594      * </p>
595      * <table>
596      * <thead>
597      * <tr>
598      * <th align="center"></th>
599      * <th align="center">bit 7</th>
600      * <th align="center">bit 6</th>
601      * <th align="center">bit 5</th>
602      * <th align="center">bit 4</th>
603      * <th align="center">bit 3</th>
604      * <th align="center">bit 2</th>
605      * <th align="center">bit 1</th>
606      * <th align="center">bit 0</th>
607      * </tr>
608      * </thead> <tbody>
609      * <tr>
610      * <td align="center">Byte 0:</td>
611      * <td align="center">P0[11]</td>
612      * <td align="center">P0[10]</td>
613      * <td align="center">P0[ 9]</td>
614      * <td align="center">P0[ 8]</td>
615      * <td align="center">P0[ 7]</td>
616      * <td align="center">P0[ 6]</td>
617      * <td align="center">P0[ 5]</td>
618      * <td align="center">P0[ 4]</td>
619      * </tr>
620      * <tr>
621      * <td align="center">Byte 1:</td>
622      * <td align="center">P1[11]</td>
623      * <td align="center">P1[10]</td>
624      * <td align="center">P1[ 9]</td>
625      * <td align="center">P1[ 8]</td>
626      * <td align="center">P1[ 7]</td>
627      * <td align="center">P1[ 6]</td>
628      * <td align="center">P1[ 5]</td>
629      * <td align="center">P1[ 4]</td>
630      * </tr>
631      * <tr>
632      * <td align="center">Byte 2:</td>
633      * <td align="center">P1[ 3]</td>
634      * <td align="center">P1[ 2]</td>
635      * <td align="center">P1[ 1]</td>
636      * <td align="center">P1[ 0]</td>
637      * <td align="center">P0[ 3]</td>
638      * <td align="center">P0[ 2]</td>
639      * <td align="center">P0[ 1]</td>
640      * <td align="center">P0[ 0]</td>
641      * </tr>
642      * </tbody>
643      * </table>
644      * <p>
645      * This format assumes
646      * <ul>
647      * <li>a width multiple of 4 pixels</li>
648      * <li>an even height</li>
649      * </ul>
650      * </p>
651      *
652      * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
653      * not pixels.
654      *
655      * <p>
656      * Since this is a densely packed format, the pixel stride is always 0. The
657      * application must use the pixel data layout defined in above table to
658      * access each row data. When row stride is equal to {@code width * (12 / 8)}, there
659      * will be no padding bytes at the end of each row, the entire image data is
660      * densely packed. When stride is larger than {@code width * (12 / 8)}, padding
661      * bytes will be present at the end of each row.
662      * </p>
663      * <p>
664      * For example, the {@link android.media.Image} object can provide data in
665      * this format from a {@link android.hardware.camera2.CameraDevice} (if
666      * supported) through a {@link android.media.ImageReader} object. The
667      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
668      * single plane containing the pixel data. The pixel stride is always 0 in
669      * {@link android.media.Image.Plane#getPixelStride()}, and the
670      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
671      * neighboring pixel distance (in bytes) between adjacent rows.
672      * </p>
673      *
674      * @see android.media.Image
675      * @see android.media.ImageReader
676      * @see android.hardware.camera2.CameraDevice
677      */
678     public static final int RAW12 = 0x26;
679 
680     /**
681      * <p>Android dense depth image format.</p>
682      *
683      * <p>Each pixel is 16 bits, representing a depth ranging measurement from a depth camera or
684      * similar sensor. The 16-bit sample consists of a confidence value and the actual ranging
685      * measurement.</p>
686      *
687      * <p>The confidence value is an estimate of correctness for this sample.  It is encoded in the
688      * 3 most significant bits of the sample, with a value of 0 representing 100% confidence, a
689      * value of 1 representing 0% confidence, a value of 2 representing 1/7, a value of 3
690      * representing 2/7, and so on.</p>
691      *
692      * <p>As an example, the following sample extracts the range and confidence from the first pixel
693      * of a DEPTH16-format {@link android.media.Image}, and converts the confidence to a
694      * floating-point value between 0 and 1.f inclusive, with 1.f representing maximum confidence:
695      *
696      * <pre>
697      *    ShortBuffer shortDepthBuffer = img.getPlanes()[0].getBuffer().asShortBuffer();
698      *    short depthSample = shortDepthBuffer.get()
699      *    short depthRange = (short) (depthSample & 0x1FFF);
700      *    short depthConfidence = (short) ((depthSample >> 13) & 0x7);
701      *    float depthPercentage = depthConfidence == 0 ? 1.f : (depthConfidence - 1) / 7.f;
702      * </pre>
703      * </p>
704      *
705      * <p>This format assumes
706      * <ul>
707      * <li>an even width</li>
708      * <li>an even height</li>
709      * <li>a horizontal stride multiple of 16 pixels</li>
710      * </ul>
711      * </p>
712      *
713      * <pre> y_size = stride * height </pre>
714      *
715      * When produced by a camera, the units for the range are millimeters.
716      */
717     public static final int DEPTH16 = 0x44363159;
718 
719     /**
720      * Android sparse depth point cloud format.
721      *
722      * <p>A variable-length list of 3D points plus a confidence value, with each point represented
723      * by four floats; first the X, Y, Z position coordinates, and then the confidence value.</p>
724      *
725      * <p>The number of points is {@code (size of the buffer in bytes) / 16}.
726      *
727      * <p>The coordinate system and units of the position values depend on the source of the point
728      * cloud data. The confidence value is between 0.f and 1.f, inclusive, with 0 representing 0%
729      * confidence and 1.f representing 100% confidence in the measured position values.</p>
730      *
731      * <p>As an example, the following code extracts the first depth point in a DEPTH_POINT_CLOUD
732      * format {@link android.media.Image}:
733      * <pre>
734      *    FloatBuffer floatDepthBuffer = img.getPlanes()[0].getBuffer().asFloatBuffer();
735      *    float x = floatDepthBuffer.get();
736      *    float y = floatDepthBuffer.get();
737      *    float z = floatDepthBuffer.get();
738      *    float confidence = floatDepthBuffer.get();
739      * </pre>
740      *
741      * For camera devices that support the
742      * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT DEPTH_OUTPUT}
743      * capability, DEPTH_POINT_CLOUD coordinates have units of meters, and the coordinate system is
744      * defined by the camera's pose transforms:
745      * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_TRANSLATION} and
746      * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_ROTATION}. That means the origin is
747      * the optical center of the camera device, and the positive Z axis points along the camera's optical axis,
748      * toward the scene.
749      */
750     public static final int DEPTH_POINT_CLOUD = 0x101;
751 
752     /**
753      * Unprocessed implementation-dependent raw
754      * depth measurements, opaque with 16 bit
755      * samples.
756      *
757      * @hide
758      */
759     public static final int RAW_DEPTH = 0x1002;
760 
761     /**
762      * Unprocessed implementation-dependent raw
763      * depth measurements, opaque with 10 bit
764      * samples and device specific bit layout.
765      *
766      * @hide
767      */
768     public static final int RAW_DEPTH10 = 0x1003;
769 
770     /**
771      * Android private opaque image format.
772      * <p>
773      * The choices of the actual format and pixel data layout are entirely up to
774      * the device-specific and framework internal implementations, and may vary
775      * depending on use cases even for the same device. The buffers of this
776      * format can be produced by components like
777      * {@link android.media.ImageWriter ImageWriter} , and interpreted correctly
778      * by consumers like {@link android.hardware.camera2.CameraDevice
779      * CameraDevice} based on the device/framework private information. However,
780      * these buffers are not directly accessible to the application.
781      * </p>
782      * <p>
783      * When an {@link android.media.Image Image} of this format is obtained from
784      * an {@link android.media.ImageReader ImageReader} or
785      * {@link android.media.ImageWriter ImageWriter}, the
786      * {@link android.media.Image#getPlanes() getPlanes()} method will return an
787      * empty {@link android.media.Image.Plane Plane} array.
788      * </p>
789      * <p>
790      * If a buffer of this format is to be used as an OpenGL ES texture, the
791      * framework will assume that sampling the texture will always return an
792      * alpha value of 1.0 (i.e. the buffer contains only opaque pixel values).
793      * </p>
794      */
795     public static final int PRIVATE = 0x22;
796 
797     /**
798      * Compressed HEIC format.
799      *
800      * <p>This format defines the HEIC brand of High Efficiency Image File
801      * Format as described in ISO/IEC 23008-12.</p>
802      */
803     public static final int HEIC = 0x48454946;
804 
805     /**
806      * Use this function to retrieve the number of bits per pixel of an
807      * ImageFormat.
808      *
809      * @param format
810      * @return the number of bits per pixel of the given format or -1 if the
811      *         format doesn't exist or is not supported.
812      */
getBitsPerPixel(@ormat int format)813     public static int getBitsPerPixel(@Format int format) {
814         switch (format) {
815             case RGB_565:
816                 return 16;
817             case NV16:
818                 return 16;
819             case YUY2:
820                 return 16;
821             case YV12:
822                 return 12;
823             case Y8:
824                 return 8;
825             case Y16:
826             case DEPTH16:
827                 return 16;
828             case NV21:
829                 return 12;
830             case YUV_420_888:
831                 return 12;
832             case YUV_422_888:
833                 return 16;
834             case YUV_444_888:
835                 return 24;
836             case FLEX_RGB_888:
837                 return 24;
838             case FLEX_RGBA_8888:
839                 return 32;
840             case RAW_DEPTH:
841             case RAW_SENSOR:
842                 return 16;
843             case YCBCR_P010:
844                 return 20;
845             case RAW_DEPTH10:
846             case RAW10:
847                 return 10;
848             case RAW12:
849                 return 12;
850         }
851         return -1;
852     }
853 
854     /**
855      * Determine whether or not this is a public-visible {@code format}.
856      *
857      * <p>In particular, {@code @hide} formats will return {@code false}.</p>
858      *
859      * <p>Any other formats (including UNKNOWN) will return {@code false}.</p>
860      *
861      * @param format an integer format
862      * @return a boolean
863      *
864      * @hide
865      */
isPublicFormat(@ormat int format)866     public static boolean isPublicFormat(@Format int format) {
867         switch (format) {
868             case RGB_565:
869             case NV16:
870             case YUY2:
871             case YV12:
872             case JPEG:
873             case NV21:
874             case YUV_420_888:
875             case YUV_422_888:
876             case YUV_444_888:
877             case YCBCR_P010:
878             case FLEX_RGB_888:
879             case FLEX_RGBA_8888:
880             case RAW_SENSOR:
881             case RAW_PRIVATE:
882             case RAW10:
883             case RAW12:
884             case DEPTH16:
885             case DEPTH_POINT_CLOUD:
886             case PRIVATE:
887             case RAW_DEPTH:
888             case RAW_DEPTH10:
889             case Y8:
890             case DEPTH_JPEG:
891             case HEIC:
892                 return true;
893         }
894 
895         return false;
896     }
897 }
898