• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.graphics;
18 
19 import android.annotation.FlaggedApi;
20 import android.annotation.IntDef;
21 
22 import com.android.internal.camera.flags.Flags;
23 
24 import java.lang.annotation.Retention;
25 import java.lang.annotation.RetentionPolicy;
26 
27 @android.ravenwood.annotation.RavenwoodKeepWholeClass
28 public class ImageFormat {
29      /** @hide */
30      @Retention(RetentionPolicy.SOURCE)
31      @IntDef(value = {
32              UNKNOWN,
33              /*
34               * Since some APIs accept either ImageFormat or PixelFormat (and the two
35               * enums do not overlap since they're both partial versions of the
36               * internal format enum), add PixelFormat values here so linting
37               * tools won't complain when method arguments annotated with
38               * ImageFormat are provided with PixelFormat values.
39               */
40              PixelFormat.RGBA_8888,
41              PixelFormat.RGBX_8888,
42              PixelFormat.RGB_888,
43              RGB_565,
44              YV12,
45              Y8,
46              Y16,
47              YCBCR_P010,
48              YCBCR_P210,
49              NV16,
50              NV21,
51              YUY2,
52              JPEG,
53              DEPTH_JPEG,
54              YUV_420_888,
55              YUV_422_888,
56              YUV_444_888,
57              FLEX_RGB_888,
58              FLEX_RGBA_8888,
59              RAW_SENSOR,
60              RAW_PRIVATE,
61              RAW10,
62              RAW12,
63              DEPTH16,
64              DEPTH_POINT_CLOUD,
65              RAW_DEPTH,
66              RAW_DEPTH10,
67              PRIVATE,
68              HEIC,
69              HEIC_ULTRAHDR,
70              JPEG_R
71      })
72      public @interface Format {
73      }
74 
75     /*
76      * these constants are chosen to be binary compatible with their previous
77      * location in PixelFormat.java
78      */
79 
80     public static final int UNKNOWN = 0;
81 
82     /**
83      * RGB format used for pictures encoded as RGB_565. See
84      * {@link android.hardware.Camera.Parameters#setPictureFormat(int)}.
85      */
86     public static final int RGB_565 = 4;
87 
88     /**
89      * <p>Android YUV format.</p>
90      *
91      * <p>This format is exposed to software decoders and applications.</p>
92      *
93      * <p>YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
94      * by (W/2) x (H/2) Cr and Cb planes.</p>
95      *
96      * <p>This format assumes
97      * <ul>
98      * <li>an even width</li>
99      * <li>an even height</li>
100      * <li>a horizontal stride multiple of 16 pixels</li>
101      * <li>a vertical stride equal to the height</li>
102      * </ul>
103      * </p>
104      *
105      * <pre> y_size = stride * height
106      * c_stride = ALIGN(stride/2, 16)
107      * c_size = c_stride * height/2
108      * size = y_size + c_size * 2
109      * cr_offset = y_size
110      * cb_offset = y_size + c_size</pre>
111      *
112      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
113      * recommended for YUV output instead.</p>
114      *
115      * <p>For the older camera API, this format is guaranteed to be supported for
116      * {@link android.hardware.Camera} preview images since API level 12; for earlier API versions,
117      * check {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
118      *
119      * <p>Note that for camera preview callback use (see
120      * {@link android.hardware.Camera#setPreviewCallback}), the
121      * <var>stride</var> value is the smallest possible; that is, it is equal
122      * to:
123      *
124      * <pre>stride = ALIGN(width, 16)</pre>
125      *
126      * @see android.hardware.Camera.Parameters#setPreviewCallback
127      * @see android.hardware.Camera.Parameters#setPreviewFormat
128      * @see android.hardware.Camera.Parameters#getSupportedPreviewFormats
129      * </p>
130      */
131     public static final int YV12 = 0x32315659;
132 
133     /**
134      * <p>Android Y8 format.</p>
135      *
136      * <p>Y8 is a YUV planar format comprised of a WxH Y plane only, with each pixel
137      * being represented by 8 bits. It is equivalent to just the Y plane from {@link #YV12}
138      * format.</p>
139      *
140      * <p>This format assumes
141      * <ul>
142      * <li>an even width</li>
143      * <li>an even height</li>
144      * <li>a horizontal stride multiple of 16 pixels</li>
145      * </ul>
146      * </p>
147      *
148      * <pre> size = stride * height </pre>
149      *
150      * <p>For example, the {@link android.media.Image} object can provide data
151      * in this format from a {@link android.hardware.camera2.CameraDevice} (if
152      * supported) through a {@link android.media.ImageReader} object. The
153      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
154      * single plane containing the pixel data. The pixel stride is always 1 in
155      * {@link android.media.Image.Plane#getPixelStride()}, and the
156      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
157      * neighboring pixel distance (in bytes) between adjacent rows.</p>
158      *
159      * @see android.media.Image
160      * @see android.media.ImageReader
161      * @see android.hardware.camera2.CameraDevice
162      */
163     public static final int Y8 = 0x20203859;
164 
165     /**
166      * <p>Android Y16 format.</p>
167      *
168      * Y16 is a YUV planar format comprised of a WxH Y plane, with each pixel
169      * being represented by 16 bits. It is just like {@link #Y8}, but has 16
170      * bits per pixel (little endian).</p>
171      *
172      * <p>This format assumes
173      * <ul>
174      * <li>an even width</li>
175      * <li>an even height</li>
176      * <li>a horizontal stride multiple of 16 pixels</li>
177      * </ul>
178      * </p>
179      *
180      * <pre> y_size = stride * height </pre>
181      *
182      * <p>For example, the {@link android.media.Image} object can provide data
183      * in this format from a {@link android.hardware.camera2.CameraDevice}
184      * through a {@link android.media.ImageReader} object if this format is
185      * supported by {@link android.hardware.camera2.CameraDevice}.</p>
186      *
187      * @see android.media.Image
188      * @see android.media.ImageReader
189      * @see android.hardware.camera2.CameraDevice
190      *
191      * @hide
192      */
193     public static final int Y16 = 0x20363159;
194 
195     /**
196      * <p>Android YUV P010 format.</p>
197      *
198      * P010 is a 4:2:0 YCbCr semiplanar format comprised of a WxH Y plane
199      * followed by a Wx(H/2) CbCr plane. Each sample is represented by a 16-bit
200      * little-endian value, with the lower 6 bits set to zero.
201      *
202      * <p>For example, the {@link android.media.Image} object can provide data
203      * in this format from a {@link android.hardware.camera2.CameraDevice}
204      * through a {@link android.media.ImageReader} object if this format is
205      * supported by {@link android.hardware.camera2.CameraDevice}.</p>
206      *
207      * @see android.media.Image
208      * @see android.media.ImageReader
209      * @see android.hardware.camera2.CameraDevice
210      *
211      */
212     public static final int YCBCR_P010 = 0x36;
213 
214     /**
215      * <p>Android YUV P210 format.</p>
216      *
217      * P210 is a 4:2:2 YCbCr semiplanar format comprised of a WxH Y plane
218      * followed by a WxH CbCr plane. Each sample is represented by a 16-bit
219      * little-endian value, with the lower 6 bits set to zero.
220      *
221      * <p>For example, the {@link android.media.Image} object can provide data
222      * in this format from a {@link android.hardware.camera2.CameraDevice}
223      * through a {@link android.media.ImageReader} object if this format is
224      * supported by {@link android.hardware.camera2.CameraDevice}.</p>
225      *
226      * @see android.media.Image
227      * @see android.media.ImageReader
228      * @see android.hardware.camera2.CameraDevice
229      *
230      */
231     @FlaggedApi(android.media.codec.Flags.FLAG_P210_FORMAT_SUPPORT)
232     public static final int YCBCR_P210 = 0x3c;
233 
234     /**
235      * YCbCr format, used for video.
236      *
237      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
238      * recommended for YUV output instead.</p>
239      *
240      * <p>Whether this format is supported by the old camera API can be determined by
241      * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
242      *
243      */
244     public static final int NV16 = 0x10;
245 
246     /**
247      * YCrCb format used for images, which uses the NV21 encoding format.
248      *
249      * <p>This is the default format
250      * for {@link android.hardware.Camera} preview images, when not otherwise set with
251      * {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}.</p>
252      *
253      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
254      * recommended for YUV output instead.</p>
255      */
256     public static final int NV21 = 0x11;
257 
258     /**
259      * YCbCr format used for images, which uses YUYV (YUY2) encoding format.
260      *
261      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
262      * recommended for YUV output instead.</p>
263      *
264      * <p>This is an alternative format for {@link android.hardware.Camera} preview images. Whether
265      * this format is supported by the camera hardware can be determined by
266      * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
267      */
268     public static final int YUY2 = 0x14;
269 
270     /**
271      * Compressed JPEG format.
272      *
273      * <p>This format is always supported as an output format for the
274      * {@link android.hardware.camera2} API, and as a picture format for the older
275      * {@link android.hardware.Camera} API</p>
276      */
277     public static final int JPEG = 0x100;
278 
279     /**
280      * Depth augmented compressed JPEG format.
281      *
282      * <p>JPEG compressed main image along with XMP embedded depth metadata
283      * following ISO 16684-1:2011(E).</p>
284      */
285     public static final int DEPTH_JPEG = 0x69656963;
286 
287     /**
288      * Compressed JPEG format that includes an embedded recovery map.
289      *
290      * <p>JPEG compressed main image along with embedded recovery map following the
291      * <a href="https://developer.android.com/guide/topics/media/hdr-image-format">Ultra HDR
292      * Image format specification</a>.</p>
293      */
294     public static final int JPEG_R = 0x1005;
295 
296     /**
297      * <p>Multi-plane Android YUV 420 format</p>
298      *
299      * <p>This format is a generic YCbCr format, capable of describing any 4:2:0
300      * chroma-subsampled planar or semiplanar buffer (but not fully interleaved),
301      * with 8 bits per color sample.</p>
302      *
303      * <p>Images in this format are always represented by three separate buffers
304      * of data, one for each color plane. Additional information always
305      * accompanies the buffers, describing the row stride and the pixel stride
306      * for each plane.</p>
307      *
308      * <p>The order of planes in the array returned by
309      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
310      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
311      *
312      * <p>The Y-plane is guaranteed not to be interleaved with the U/V planes
313      * (in particular, pixel stride is always 1 in
314      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}).</p>
315      *
316      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
317      * (in particular,
318      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
319      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
320      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
321      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
322      * ).</p>
323      *
324      * <p>For example, the {@link android.media.Image} object can provide data
325      * in this format from a {@link android.hardware.camera2.CameraDevice}
326      * through a {@link android.media.ImageReader} object.</p>
327      *
328      * @see android.media.Image
329      * @see android.media.ImageReader
330      * @see android.hardware.camera2.CameraDevice
331      */
332     public static final int YUV_420_888 = 0x23;
333 
334     /**
335      * <p>Multi-plane Android YUV 422 format</p>
336      *
337      * <p>This format is a generic YCbCr format, capable of describing any 4:2:2
338      * chroma-subsampled (planar, semiplanar or interleaved) format,
339      * with 8 bits per color sample.</p>
340      *
341      * <p>Images in this format are always represented by three separate buffers
342      * of data, one for each color plane. Additional information always
343      * accompanies the buffers, describing the row stride and the pixel stride
344      * for each plane.</p>
345      *
346      * <p>The order of planes in the array returned by
347      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
348      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
349      *
350      * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel
351      * stride greater than 1 in
352      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p>
353      *
354      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
355      * (in particular,
356      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
357      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
358      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
359      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
360      * ).</p>
361      *
362      * <p>For example, the {@link android.media.Image} object can provide data
363      * in this format from a {@link android.media.MediaCodec}
364      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
365      *
366      * @see android.media.Image
367      * @see android.media.MediaCodec
368      */
369     public static final int YUV_422_888 = 0x27;
370 
371     /**
372      * <p>Multi-plane Android YUV 444 format</p>
373      *
374      * <p>This format is a generic YCbCr format, capable of describing any 4:4:4
375      * (planar, semiplanar or interleaved) format,
376      * with 8 bits per color sample.</p>
377      *
378      * <p>Images in this format are always represented by three separate buffers
379      * of data, one for each color plane. Additional information always
380      * accompanies the buffers, describing the row stride and the pixel stride
381      * for each plane.</p>
382      *
383      * <p>The order of planes in the array returned by
384      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
385      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
386      *
387      * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel
388      * stride greater than 1 in
389      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p>
390      *
391      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
392      * (in particular,
393      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
394      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
395      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
396      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
397      * ).</p>
398      *
399      * <p>For example, the {@link android.media.Image} object can provide data
400      * in this format from a {@link android.media.MediaCodec}
401      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
402      *
403      * @see android.media.Image
404      * @see android.media.MediaCodec
405      */
406     public static final int YUV_444_888 = 0x28;
407 
408     /**
409      * <p>Multi-plane Android RGB format</p>
410      *
411      * <p>This format is a generic RGB format, capable of describing most RGB formats,
412      * with 8 bits per color sample.</p>
413      *
414      * <p>Images in this format are always represented by three separate buffers
415      * of data, one for each color plane. Additional information always
416      * accompanies the buffers, describing the row stride and the pixel stride
417      * for each plane.</p>
418      *
419      * <p>The order of planes in the array returned by
420      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
421      * plane #0 is always R (red), plane #1 is always G (green), and plane #2 is always B
422      * (blue).</p>
423      *
424      * <p>All three planes are guaranteed to have the same row strides and pixel strides.</p>
425      *
426      * <p>For example, the {@link android.media.Image} object can provide data
427      * in this format from a {@link android.media.MediaCodec}
428      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
429      *
430      * @see android.media.Image
431      * @see android.media.MediaCodec
432      */
433     public static final int FLEX_RGB_888 = 0x29;
434 
435     /**
436      * <p>Multi-plane Android RGBA format</p>
437      *
438      * <p>This format is a generic RGBA format, capable of describing most RGBA formats,
439      * with 8 bits per color sample.</p>
440      *
441      * <p>Images in this format are always represented by four separate buffers
442      * of data, one for each color plane. Additional information always
443      * accompanies the buffers, describing the row stride and the pixel stride
444      * for each plane.</p>
445      *
446      * <p>The order of planes in the array returned by
447      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
448      * plane #0 is always R (red), plane #1 is always G (green), plane #2 is always B (blue),
449      * and plane #3 is always A (alpha). This format may represent pre-multiplied or
450      * non-premultiplied alpha.</p>
451      *
452      * <p>All four planes are guaranteed to have the same row strides and pixel strides.</p>
453      *
454      * <p>For example, the {@link android.media.Image} object can provide data
455      * in this format from a {@link android.media.MediaCodec}
456      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
457      *
458      * @see android.media.Image
459      * @see android.media.MediaCodec
460      */
461     public static final int FLEX_RGBA_8888 = 0x2A;
462 
463     /**
464      * <p>General raw camera sensor image format, usually representing a
465      * single-channel Bayer-mosaic image. Each pixel color sample is stored with
466      * 16 bits of precision.</p>
467      *
468      * <p>The layout of the color mosaic, the maximum and minimum encoding
469      * values of the raw pixel data, the color space of the image, and all other
470      * needed information to interpret a raw sensor image must be queried from
471      * the {@link android.hardware.camera2.CameraDevice} which produced the
472      * image.</p>
473      */
474     public static final int RAW_SENSOR = 0x20;
475 
476     /**
477      * <p>Private raw camera sensor image format, a single channel image with
478      * implementation dependent pixel layout.</p>
479      *
480      * <p>RAW_PRIVATE is a format for unprocessed raw image buffers coming from an
481      * image sensor. The actual structure of buffers of this format is
482      * implementation-dependent.</p>
483      *
484      */
485     public static final int RAW_PRIVATE = 0x24;
486 
487     /**
488      * <p>
489      * Android 10-bit raw format
490      * </p>
491      * <p>
492      * This is a single-plane, 10-bit per pixel, densely packed (in each row),
493      * unprocessed format, usually representing raw Bayer-pattern images coming
494      * from an image sensor.
495      * </p>
496      * <p>
497      * In an image buffer with this format, starting from the first pixel of
498      * each row, each 4 consecutive pixels are packed into 5 bytes (40 bits).
499      * Each one of the first 4 bytes contains the top 8 bits of each pixel, The
500      * fifth byte contains the 2 least significant bits of the 4 pixels, the
501      * exact layout data for each 4 consecutive pixels is illustrated below
502      * ({@code Pi[j]} stands for the jth bit of the ith pixel):
503      * </p>
504      * <table>
505      * <thead>
506      * <tr>
507      * <th align="center"></th>
508      * <th align="center">bit 7</th>
509      * <th align="center">bit 6</th>
510      * <th align="center">bit 5</th>
511      * <th align="center">bit 4</th>
512      * <th align="center">bit 3</th>
513      * <th align="center">bit 2</th>
514      * <th align="center">bit 1</th>
515      * <th align="center">bit 0</th>
516      * </tr>
517      * </thead> <tbody>
518      * <tr>
519      * <td align="center">Byte 0:</td>
520      * <td align="center">P0[9]</td>
521      * <td align="center">P0[8]</td>
522      * <td align="center">P0[7]</td>
523      * <td align="center">P0[6]</td>
524      * <td align="center">P0[5]</td>
525      * <td align="center">P0[4]</td>
526      * <td align="center">P0[3]</td>
527      * <td align="center">P0[2]</td>
528      * </tr>
529      * <tr>
530      * <td align="center">Byte 1:</td>
531      * <td align="center">P1[9]</td>
532      * <td align="center">P1[8]</td>
533      * <td align="center">P1[7]</td>
534      * <td align="center">P1[6]</td>
535      * <td align="center">P1[5]</td>
536      * <td align="center">P1[4]</td>
537      * <td align="center">P1[3]</td>
538      * <td align="center">P1[2]</td>
539      * </tr>
540      * <tr>
541      * <td align="center">Byte 2:</td>
542      * <td align="center">P2[9]</td>
543      * <td align="center">P2[8]</td>
544      * <td align="center">P2[7]</td>
545      * <td align="center">P2[6]</td>
546      * <td align="center">P2[5]</td>
547      * <td align="center">P2[4]</td>
548      * <td align="center">P2[3]</td>
549      * <td align="center">P2[2]</td>
550      * </tr>
551      * <tr>
552      * <td align="center">Byte 3:</td>
553      * <td align="center">P3[9]</td>
554      * <td align="center">P3[8]</td>
555      * <td align="center">P3[7]</td>
556      * <td align="center">P3[6]</td>
557      * <td align="center">P3[5]</td>
558      * <td align="center">P3[4]</td>
559      * <td align="center">P3[3]</td>
560      * <td align="center">P3[2]</td>
561      * </tr>
562      * <tr>
563      * <td align="center">Byte 4:</td>
564      * <td align="center">P3[1]</td>
565      * <td align="center">P3[0]</td>
566      * <td align="center">P2[1]</td>
567      * <td align="center">P2[0]</td>
568      * <td align="center">P1[1]</td>
569      * <td align="center">P1[0]</td>
570      * <td align="center">P0[1]</td>
571      * <td align="center">P0[0]</td>
572      * </tr>
573      * </tbody>
574      * </table>
575      * <p>
576      * This format assumes
577      * <ul>
578      * <li>a width multiple of 4 pixels</li>
579      * <li>an even height</li>
580      * </ul>
581      * </p>
582      *
583      * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
584      * not pixels.
585      *
586      * <p>
587      * Since this is a densely packed format, the pixel stride is always 0. The
588      * application must use the pixel data layout defined in above table to
589      * access each row data. When row stride is equal to {@code width * (10 / 8)}, there
590      * will be no padding bytes at the end of each row, the entire image data is
591      * densely packed. When stride is larger than {@code width * (10 / 8)}, padding
592      * bytes will be present at the end of each row.
593      * </p>
594      * <p>
595      * For example, the {@link android.media.Image} object can provide data in
596      * this format from a {@link android.hardware.camera2.CameraDevice} (if
597      * supported) through a {@link android.media.ImageReader} object. The
598      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
599      * single plane containing the pixel data. The pixel stride is always 0 in
600      * {@link android.media.Image.Plane#getPixelStride()}, and the
601      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
602      * neighboring pixel distance (in bytes) between adjacent rows.
603      * </p>
604      *
605      * @see android.media.Image
606      * @see android.media.ImageReader
607      * @see android.hardware.camera2.CameraDevice
608      */
609     public static final int RAW10 = 0x25;
610 
611     /**
612      * <p>
613      * Android 12-bit raw format
614      * </p>
615      * <p>
616      * This is a single-plane, 12-bit per pixel, densely packed (in each row),
617      * unprocessed format, usually representing raw Bayer-pattern images coming
618      * from an image sensor.
619      * </p>
620      * <p>
621      * In an image buffer with this format, starting from the first pixel of each
622      * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first
623      * and second byte contains the top 8 bits of first and second pixel. The third
624      * byte contains the 4 least significant bits of the two pixels, the exact layout
625      * data for each two consecutive pixels is illustrated below (Pi[j] stands for
626      * the jth bit of the ith pixel):
627      * </p>
628      * <table>
629      * <thead>
630      * <tr>
631      * <th align="center"></th>
632      * <th align="center">bit 7</th>
633      * <th align="center">bit 6</th>
634      * <th align="center">bit 5</th>
635      * <th align="center">bit 4</th>
636      * <th align="center">bit 3</th>
637      * <th align="center">bit 2</th>
638      * <th align="center">bit 1</th>
639      * <th align="center">bit 0</th>
640      * </tr>
641      * </thead> <tbody>
642      * <tr>
643      * <td align="center">Byte 0:</td>
644      * <td align="center">P0[11]</td>
645      * <td align="center">P0[10]</td>
646      * <td align="center">P0[ 9]</td>
647      * <td align="center">P0[ 8]</td>
648      * <td align="center">P0[ 7]</td>
649      * <td align="center">P0[ 6]</td>
650      * <td align="center">P0[ 5]</td>
651      * <td align="center">P0[ 4]</td>
652      * </tr>
653      * <tr>
654      * <td align="center">Byte 1:</td>
655      * <td align="center">P1[11]</td>
656      * <td align="center">P1[10]</td>
657      * <td align="center">P1[ 9]</td>
658      * <td align="center">P1[ 8]</td>
659      * <td align="center">P1[ 7]</td>
660      * <td align="center">P1[ 6]</td>
661      * <td align="center">P1[ 5]</td>
662      * <td align="center">P1[ 4]</td>
663      * </tr>
664      * <tr>
665      * <td align="center">Byte 2:</td>
666      * <td align="center">P1[ 3]</td>
667      * <td align="center">P1[ 2]</td>
668      * <td align="center">P1[ 1]</td>
669      * <td align="center">P1[ 0]</td>
670      * <td align="center">P0[ 3]</td>
671      * <td align="center">P0[ 2]</td>
672      * <td align="center">P0[ 1]</td>
673      * <td align="center">P0[ 0]</td>
674      * </tr>
675      * </tbody>
676      * </table>
677      * <p>
678      * This format assumes
679      * <ul>
680      * <li>a width multiple of 4 pixels</li>
681      * <li>an even height</li>
682      * </ul>
683      * </p>
684      *
685      * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
686      * not pixels.
687      *
688      * <p>
689      * Since this is a densely packed format, the pixel stride is always 0. The
690      * application must use the pixel data layout defined in above table to
691      * access each row data. When row stride is equal to {@code width * (12 / 8)}, there
692      * will be no padding bytes at the end of each row, the entire image data is
693      * densely packed. When stride is larger than {@code width * (12 / 8)}, padding
694      * bytes will be present at the end of each row.
695      * </p>
696      * <p>
697      * For example, the {@link android.media.Image} object can provide data in
698      * this format from a {@link android.hardware.camera2.CameraDevice} (if
699      * supported) through a {@link android.media.ImageReader} object. The
700      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
701      * single plane containing the pixel data. The pixel stride is always 0 in
702      * {@link android.media.Image.Plane#getPixelStride()}, and the
703      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
704      * neighboring pixel distance (in bytes) between adjacent rows.
705      * </p>
706      *
707      * @see android.media.Image
708      * @see android.media.ImageReader
709      * @see android.hardware.camera2.CameraDevice
710      */
711     public static final int RAW12 = 0x26;
712 
713     /**
714      * <p>Android dense depth image format.</p>
715      *
716      * <p>Each pixel is 16 bits, representing a depth ranging measurement from a depth camera or
717      * similar sensor. The 16-bit sample consists of a confidence value and the actual ranging
718      * measurement.</p>
719      *
720      * <p>The confidence value is an estimate of correctness for this sample.  It is encoded in the
721      * 3 most significant bits of the sample, with a value of 0 representing 100% confidence, a
722      * value of 1 representing 0% confidence, a value of 2 representing 1/7, a value of 3
723      * representing 2/7, and so on.</p>
724      *
725      * <p>As an example, the following sample extracts the range and confidence from the first pixel
726      * of a DEPTH16-format {@link android.media.Image}, and converts the confidence to a
727      * floating-point value between 0 and 1.f inclusive, with 1.f representing maximum confidence:
728      *
729      * <pre>
730      *    ShortBuffer shortDepthBuffer = img.getPlanes()[0].getBuffer().asShortBuffer();
731      *    short depthSample = shortDepthBuffer.get()
732      *    short depthRange = (short) (depthSample & 0x1FFF);
733      *    short depthConfidence = (short) ((depthSample >> 13) & 0x7);
734      *    float depthPercentage = depthConfidence == 0 ? 1.f : (depthConfidence - 1) / 7.f;
735      * </pre>
736      * </p>
737      *
738      * <p>This format assumes
739      * <ul>
740      * <li>an even width</li>
741      * <li>an even height</li>
742      * <li>a horizontal stride multiple of 16 pixels</li>
743      * </ul>
744      * </p>
745      *
746      * <pre> y_size = stride * height </pre>
747      *
748      * When produced by a camera, the units for the range are millimeters.
749      */
750     public static final int DEPTH16 = 0x44363159;
751 
752     /**
753      * Android sparse depth point cloud format.
754      *
755      * <p>A variable-length list of 3D points plus a confidence value, with each point represented
756      * by four floats; first the X, Y, Z position coordinates, and then the confidence value.</p>
757      *
758      * <p>The number of points is {@code (size of the buffer in bytes) / 16}.
759      *
760      * <p>The coordinate system and units of the position values depend on the source of the point
761      * cloud data. The confidence value is between 0.f and 1.f, inclusive, with 0 representing 0%
762      * confidence and 1.f representing 100% confidence in the measured position values.</p>
763      *
764      * <p>As an example, the following code extracts the first depth point in a DEPTH_POINT_CLOUD
765      * format {@link android.media.Image}:
766      * <pre>
767      *    FloatBuffer floatDepthBuffer = img.getPlanes()[0].getBuffer().asFloatBuffer();
768      *    float x = floatDepthBuffer.get();
769      *    float y = floatDepthBuffer.get();
770      *    float z = floatDepthBuffer.get();
771      *    float confidence = floatDepthBuffer.get();
772      * </pre>
773      *
774      * For camera devices that support the
775      * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT DEPTH_OUTPUT}
776      * capability, DEPTH_POINT_CLOUD coordinates have units of meters, and the coordinate system is
777      * defined by the camera's pose transforms:
778      * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_TRANSLATION} and
779      * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_ROTATION}. That means the origin is
780      * the optical center of the camera device, and the positive Z axis points along the camera's optical axis,
781      * toward the scene.
782      */
783     public static final int DEPTH_POINT_CLOUD = 0x101;
784 
785     /**
786      * Unprocessed implementation-dependent raw
787      * depth measurements, opaque with 16 bit
788      * samples.
789      *
790      * @hide
791      */
792     public static final int RAW_DEPTH = 0x1002;
793 
794     /**
795      * Unprocessed implementation-dependent raw
796      * depth measurements, opaque with 10 bit
797      * samples and device specific bit layout.
798      *
799      * @hide
800      */
801     public static final int RAW_DEPTH10 = 0x1003;
802 
803     /**
804      * Android private opaque image format.
805      * <p>
806      * The choices of the actual format and pixel data layout are entirely up to
807      * the device-specific and framework internal implementations, and may vary
808      * depending on use cases even for the same device. The buffers of this
809      * format can be produced by components like
810      * {@link android.media.ImageWriter ImageWriter} , and interpreted correctly
811      * by consumers like {@link android.hardware.camera2.CameraDevice
812      * CameraDevice} based on the device/framework private information. However,
813      * these buffers are not directly accessible to the application.
814      * </p>
815      * <p>
816      * When an {@link android.media.Image Image} of this format is obtained from
817      * an {@link android.media.ImageReader ImageReader} or
818      * {@link android.media.ImageWriter ImageWriter}, the
819      * {@link android.media.Image#getPlanes() getPlanes()} method will return an
820      * empty {@link android.media.Image.Plane Plane} array.
821      * </p>
822      * <p>
823      * If a buffer of this format is to be used as an OpenGL ES texture, the
824      * framework will assume that sampling the texture will always return an
825      * alpha value of 1.0 (i.e. the buffer contains only opaque pixel values).
826      * </p>
827      */
828     public static final int PRIVATE = 0x22;
829 
830     /**
831      * Compressed HEIC format.
832      *
833      * <p>This format defines the HEIC brand of High Efficiency Image File
834      * Format as described in ISO/IEC 23008-12.</p>
835      */
836     public static final int HEIC = 0x48454946;
837 
838     /**
839      * High Efficiency Image File Format (HEIF) with embedded HDR gain map
840      *
841      * <p>This format defines the HEIC brand of High Efficiency Image File
842      * Format as described in ISO/IEC 23008-12:2024 with HDR gain map according
843      * to ISO/CD 21496‐1.</p>
844      */
845     @FlaggedApi(Flags.FLAG_CAMERA_HEIF_GAINMAP)
846     public static final int HEIC_ULTRAHDR = 0x1006;
847 
848     /**
849      * Use this function to retrieve the number of bits per pixel of an
850      * ImageFormat.
851      *
852      * @param format
853      * @return the number of bits per pixel of the given format or -1 if the
854      *         format doesn't exist or is not supported.
855      */
getBitsPerPixel(@ormat int format)856     public static int getBitsPerPixel(@Format int format) {
857         switch (format) {
858             case RGB_565:
859                 return 16;
860             case NV16:
861                 return 16;
862             case YUY2:
863                 return 16;
864             case YV12:
865                 return 12;
866             case Y8:
867                 return 8;
868             case Y16:
869             case DEPTH16:
870                 return 16;
871             case NV21:
872                 return 12;
873             case YUV_420_888:
874                 return 12;
875             case YUV_422_888:
876                 return 16;
877             case YUV_444_888:
878                 return 24;
879             case FLEX_RGB_888:
880                 return 24;
881             case FLEX_RGBA_8888:
882                 return 32;
883             case RAW_DEPTH:
884             case RAW_SENSOR:
885                 return 16;
886             case YCBCR_P010:
887                 return 24;
888             case YCBCR_P210:
889                 return 32;
890             case RAW_DEPTH10:
891             case RAW10:
892                 return 10;
893             case RAW12:
894                 return 12;
895         }
896         return -1;
897     }
898 
899     /**
900      * Determine whether or not this is a public-visible {@code format}.
901      *
902      * <p>In particular, {@code @hide} formats will return {@code false}.</p>
903      *
904      * <p>Any other formats (including UNKNOWN) will return {@code false}.</p>
905      *
906      * @param format an integer format
907      * @return a boolean
908      *
909      * @hide
910      */
isPublicFormat(@ormat int format)911     public static boolean isPublicFormat(@Format int format) {
912         switch (format) {
913             case RGB_565:
914             case NV16:
915             case YUY2:
916             case YV12:
917             case JPEG:
918             case NV21:
919             case YUV_420_888:
920             case YUV_422_888:
921             case YUV_444_888:
922             case YCBCR_P010:
923             case FLEX_RGB_888:
924             case FLEX_RGBA_8888:
925             case RAW_SENSOR:
926             case RAW_PRIVATE:
927             case RAW10:
928             case RAW12:
929             case DEPTH16:
930             case DEPTH_POINT_CLOUD:
931             case PRIVATE:
932             case RAW_DEPTH:
933             case RAW_DEPTH10:
934             case Y8:
935             case DEPTH_JPEG:
936             case HEIC:
937             case JPEG_R:
938                 return true;
939         }
940         if (android.media.codec.Flags.p210FormatSupport() && format == YCBCR_P210) {
941             return true;
942         }
943         if (Flags.cameraHeifGainmap()){
944             if (format == HEIC_ULTRAHDR) {
945                 return true;
946             }
947         }
948         return false;
949     }
950 }
951