1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.hardware.camera2.params; 18 19 import android.graphics.ImageFormat; 20 import android.graphics.PixelFormat; 21 import android.hardware.camera2.CameraCharacteristics; 22 import android.hardware.camera2.CameraDevice; 23 import android.hardware.camera2.CameraMetadata; 24 import android.hardware.camera2.CaptureRequest; 25 import android.hardware.camera2.utils.HashCodeHelpers; 26 import android.hardware.camera2.utils.SurfaceUtils; 27 import android.hardware.camera2.legacy.LegacyCameraDevice; 28 import android.hardware.camera2.legacy.LegacyMetadataMapper; 29 import android.view.Surface; 30 import android.util.Range; 31 import android.util.Size; 32 import android.util.SparseIntArray; 33 34 import java.util.Arrays; 35 import java.util.HashMap; 36 import java.util.Objects; 37 import java.util.Set; 38 39 import static com.android.internal.util.Preconditions.*; 40 41 /** 42 * Immutable class to store the available stream 43 * {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP configurations} to set up 44 * {@link android.view.Surface Surfaces} for creating a 45 * {@link android.hardware.camera2.CameraCaptureSession capture session} with 46 * {@link android.hardware.camera2.CameraDevice#createCaptureSession}. 47 * <!-- TODO: link to input stream configuration --> 48 * 49 * <p>This is the authoritative list for all <!-- input/ -->output formats (and sizes respectively 50 * for that format) that are supported by a camera device.</p> 51 * 52 * <p>This also contains the minimum frame durations and stall durations for each format/size 53 * combination that can be used to calculate effective frame rate when submitting multiple captures. 54 * </p> 55 * 56 * <p>An instance of this object is available from {@link CameraCharacteristics} using 57 * the {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP} key and the 58 * {@link CameraCharacteristics#get} method.</p> 59 * 60 * <pre><code>{@code 61 * CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId); 62 * StreamConfigurationMap configs = characteristics.get( 63 * CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP); 64 * }</code></pre> 65 * 66 * @see CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP 67 * @see CameraDevice#createCaptureSession 68 */ 69 public final class StreamConfigurationMap { 70 71 private static final String TAG = "StreamConfigurationMap"; 72 73 /** 74 * Create a new {@link StreamConfigurationMap}. 75 * 76 * <p>The array parameters ownership is passed to this object after creation; do not 77 * write to them after this constructor is invoked.</p> 78 * 79 * @param configurations a non-{@code null} array of {@link StreamConfiguration} 80 * @param minFrameDurations a non-{@code null} array of {@link StreamConfigurationDuration} 81 * @param stallDurations a non-{@code null} array of {@link StreamConfigurationDuration} 82 * @param highSpeedVideoConfigurations an array of {@link HighSpeedVideoConfiguration}, null if 83 * camera device does not support high speed video recording 84 * @param listHighResolution a flag indicating whether the device supports BURST_CAPTURE 85 * and thus needs a separate list of slow high-resolution output sizes 86 * @throws NullPointerException if any of the arguments except highSpeedVideoConfigurations 87 * were {@code null} or any subelements were {@code null} 88 * 89 * @hide 90 */ StreamConfigurationMap( StreamConfiguration[] configurations, StreamConfigurationDuration[] minFrameDurations, StreamConfigurationDuration[] stallDurations, StreamConfiguration[] depthConfigurations, StreamConfigurationDuration[] depthMinFrameDurations, StreamConfigurationDuration[] depthStallDurations, HighSpeedVideoConfiguration[] highSpeedVideoConfigurations, ReprocessFormatsMap inputOutputFormatsMap, boolean listHighResolution)91 public StreamConfigurationMap( 92 StreamConfiguration[] configurations, 93 StreamConfigurationDuration[] minFrameDurations, 94 StreamConfigurationDuration[] stallDurations, 95 StreamConfiguration[] depthConfigurations, 96 StreamConfigurationDuration[] depthMinFrameDurations, 97 StreamConfigurationDuration[] depthStallDurations, 98 HighSpeedVideoConfiguration[] highSpeedVideoConfigurations, 99 ReprocessFormatsMap inputOutputFormatsMap, 100 boolean listHighResolution) { 101 102 if (configurations == null) { 103 // If no color configurations exist, ensure depth ones do 104 checkArrayElementsNotNull(depthConfigurations, "depthConfigurations"); 105 mConfigurations = new StreamConfiguration[0]; 106 mMinFrameDurations = new StreamConfigurationDuration[0]; 107 mStallDurations = new StreamConfigurationDuration[0]; 108 } else { 109 mConfigurations = checkArrayElementsNotNull(configurations, "configurations"); 110 mMinFrameDurations = checkArrayElementsNotNull(minFrameDurations, "minFrameDurations"); 111 mStallDurations = checkArrayElementsNotNull(stallDurations, "stallDurations"); 112 } 113 114 mListHighResolution = listHighResolution; 115 116 if (depthConfigurations == null) { 117 mDepthConfigurations = new StreamConfiguration[0]; 118 mDepthMinFrameDurations = new StreamConfigurationDuration[0]; 119 mDepthStallDurations = new StreamConfigurationDuration[0]; 120 } else { 121 mDepthConfigurations = checkArrayElementsNotNull(depthConfigurations, 122 "depthConfigurations"); 123 mDepthMinFrameDurations = checkArrayElementsNotNull(depthMinFrameDurations, 124 "depthMinFrameDurations"); 125 mDepthStallDurations = checkArrayElementsNotNull(depthStallDurations, 126 "depthStallDurations"); 127 } 128 129 if (highSpeedVideoConfigurations == null) { 130 mHighSpeedVideoConfigurations = new HighSpeedVideoConfiguration[0]; 131 } else { 132 mHighSpeedVideoConfigurations = checkArrayElementsNotNull( 133 highSpeedVideoConfigurations, "highSpeedVideoConfigurations"); 134 } 135 136 // For each format, track how many sizes there are available to configure 137 for (StreamConfiguration config : mConfigurations) { 138 int fmt = config.getFormat(); 139 SparseIntArray map = null; 140 if (config.isOutput()) { 141 mAllOutputFormats.put(fmt, mAllOutputFormats.get(fmt) + 1); 142 long duration = 0; 143 if (mListHighResolution) { 144 for (StreamConfigurationDuration configurationDuration : mMinFrameDurations) { 145 if (configurationDuration.getFormat() == fmt && 146 configurationDuration.getWidth() == config.getSize().getWidth() && 147 configurationDuration.getHeight() == config.getSize().getHeight()) { 148 duration = configurationDuration.getDuration(); 149 break; 150 } 151 } 152 } 153 map = duration <= DURATION_20FPS_NS ? 154 mOutputFormats : mHighResOutputFormats; 155 } else { 156 map = mInputFormats; 157 } 158 map.put(fmt, map.get(fmt) + 1); 159 } 160 161 // For each depth format, track how many sizes there are available to configure 162 for (StreamConfiguration config : mDepthConfigurations) { 163 if (!config.isOutput()) { 164 // Ignoring input depth configs 165 continue; 166 } 167 168 mDepthOutputFormats.put(config.getFormat(), 169 mDepthOutputFormats.get(config.getFormat()) + 1); 170 } 171 172 if (configurations != null && 173 mOutputFormats.indexOfKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) < 0) { 174 throw new AssertionError( 175 "At least one stream configuration for IMPLEMENTATION_DEFINED must exist"); 176 } 177 178 // For each Size/FPS range, track how many FPS range/Size there are available 179 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) { 180 Size size = config.getSize(); 181 Range<Integer> fpsRange = config.getFpsRange(); 182 Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size); 183 if (fpsRangeCount == null) { 184 fpsRangeCount = 0; 185 } 186 mHighSpeedVideoSizeMap.put(size, fpsRangeCount + 1); 187 Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange); 188 if (sizeCount == null) { 189 sizeCount = 0; 190 } 191 mHighSpeedVideoFpsRangeMap.put(fpsRange, sizeCount + 1); 192 } 193 194 mInputOutputFormatsMap = inputOutputFormatsMap; 195 } 196 197 /** 198 * Get the image {@code format} output formats in this stream configuration. 199 * 200 * <p>All image formats returned by this function will be defined in either {@link ImageFormat} 201 * or in {@link PixelFormat} (and there is no possibility of collision).</p> 202 * 203 * <p>Formats listed in this array are guaranteed to return true if queried with 204 * {@link #isOutputSupportedFor(int)}.</p> 205 * 206 * @return an array of integer format 207 * 208 * @see ImageFormat 209 * @see PixelFormat 210 */ getOutputFormats()211 public final int[] getOutputFormats() { 212 return getPublicFormats(/*output*/true); 213 } 214 215 /** 216 * Get the image {@code format} output formats for a reprocessing input format. 217 * 218 * <p>When submitting a {@link CaptureRequest} with an input Surface of a given format, 219 * the only allowed target outputs of the {@link CaptureRequest} are the ones with a format 220 * listed in the return value of this method. Including any other output Surface as a target 221 * will throw an IllegalArgumentException. If no output format is supported given the input 222 * format, an empty int[] will be returned.</p> 223 * 224 * <p>All image formats returned by this function will be defined in either {@link ImageFormat} 225 * or in {@link PixelFormat} (and there is no possibility of collision).</p> 226 * 227 * <p>Formats listed in this array are guaranteed to return true if queried with 228 * {@link #isOutputSupportedFor(int)}.</p> 229 * 230 * @return an array of integer format 231 * 232 * @see ImageFormat 233 * @see PixelFormat 234 */ getValidOutputFormatsForInput(int inputFormat)235 public final int[] getValidOutputFormatsForInput(int inputFormat) { 236 if (mInputOutputFormatsMap == null) { 237 return new int[0]; 238 } 239 return mInputOutputFormatsMap.getOutputs(inputFormat); 240 } 241 242 /** 243 * Get the image {@code format} input formats in this stream configuration. 244 * 245 * <p>All image formats returned by this function will be defined in either {@link ImageFormat} 246 * or in {@link PixelFormat} (and there is no possibility of collision).</p> 247 * 248 * @return an array of integer format 249 * 250 * @see ImageFormat 251 * @see PixelFormat 252 */ getInputFormats()253 public final int[] getInputFormats() { 254 return getPublicFormats(/*output*/false); 255 } 256 257 /** 258 * Get the supported input sizes for this input format. 259 * 260 * <p>The format must have come from {@link #getInputFormats}; otherwise 261 * {@code null} is returned.</p> 262 * 263 * @param format a format from {@link #getInputFormats} 264 * @return a non-empty array of sizes, or {@code null} if the format was not available. 265 */ getInputSizes(final int format)266 public Size[] getInputSizes(final int format) { 267 return getPublicFormatSizes(format, /*output*/false, /*highRes*/false); 268 } 269 270 /** 271 * Determine whether or not output surfaces with a particular user-defined format can be passed 272 * {@link CameraDevice#createCaptureSession createCaptureSession}. 273 * 274 * <p>This method determines that the output {@code format} is supported by the camera device; 275 * each output {@code surface} target may or may not itself support that {@code format}. 276 * Refer to the class which provides the surface for additional documentation.</p> 277 * 278 * <p>Formats for which this returns {@code true} are guaranteed to exist in the result 279 * returned by {@link #getOutputSizes}.</p> 280 * 281 * @param format an image format from either {@link ImageFormat} or {@link PixelFormat} 282 * @return 283 * {@code true} iff using a {@code surface} with this {@code format} will be 284 * supported with {@link CameraDevice#createCaptureSession} 285 * 286 * @throws IllegalArgumentException 287 * if the image format was not a defined named constant 288 * from either {@link ImageFormat} or {@link PixelFormat} 289 * 290 * @see ImageFormat 291 * @see PixelFormat 292 * @see CameraDevice#createCaptureSession 293 */ isOutputSupportedFor(int format)294 public boolean isOutputSupportedFor(int format) { 295 checkArgumentFormat(format); 296 297 int internalFormat = imageFormatToInternal(format); 298 int dataspace = imageFormatToDataspace(format); 299 if (dataspace == HAL_DATASPACE_DEPTH) { 300 return mDepthOutputFormats.indexOfKey(internalFormat) >= 0; 301 } else { 302 return getFormatsMap(/*output*/true).indexOfKey(internalFormat) >= 0; 303 } 304 } 305 306 /** 307 * Determine whether or not output streams can be configured with a particular class 308 * as a consumer. 309 * 310 * <p>The following list is generally usable for outputs: 311 * <ul> 312 * <li>{@link android.media.ImageReader} - 313 * Recommended for image processing or streaming to external resources (such as a file or 314 * network) 315 * <li>{@link android.media.MediaRecorder} - 316 * Recommended for recording video (simple to use) 317 * <li>{@link android.media.MediaCodec} - 318 * Recommended for recording video (more complicated to use, with more flexibility) 319 * <li>{@link android.renderscript.Allocation} - 320 * Recommended for image processing with {@link android.renderscript RenderScript} 321 * <li>{@link android.view.SurfaceHolder} - 322 * Recommended for low-power camera preview with {@link android.view.SurfaceView} 323 * <li>{@link android.graphics.SurfaceTexture} - 324 * Recommended for OpenGL-accelerated preview processing or compositing with 325 * {@link android.view.TextureView} 326 * </ul> 327 * </p> 328 * 329 * <p>Generally speaking this means that creating a {@link Surface} from that class <i>may</i> 330 * provide a producer endpoint that is suitable to be used with 331 * {@link CameraDevice#createCaptureSession}.</p> 332 * 333 * <p>Since not all of the above classes support output of all format and size combinations, 334 * the particular combination should be queried with {@link #isOutputSupportedFor(Surface)}.</p> 335 * 336 * @param klass a non-{@code null} {@link Class} object reference 337 * @return {@code true} if this class is supported as an output, {@code false} otherwise 338 * 339 * @throws NullPointerException if {@code klass} was {@code null} 340 * 341 * @see CameraDevice#createCaptureSession 342 * @see #isOutputSupportedFor(Surface) 343 */ isOutputSupportedFor(Class<T> klass)344 public static <T> boolean isOutputSupportedFor(Class<T> klass) { 345 checkNotNull(klass, "klass must not be null"); 346 347 if (klass == android.media.ImageReader.class) { 348 return true; 349 } else if (klass == android.media.MediaRecorder.class) { 350 return true; 351 } else if (klass == android.media.MediaCodec.class) { 352 return true; 353 } else if (klass == android.renderscript.Allocation.class) { 354 return true; 355 } else if (klass == android.view.SurfaceHolder.class) { 356 return true; 357 } else if (klass == android.graphics.SurfaceTexture.class) { 358 return true; 359 } 360 361 return false; 362 } 363 364 /** 365 * Determine whether or not the {@code surface} in its current state is suitable to be included 366 * in a {@link CameraDevice#createCaptureSession capture session} as an output. 367 * 368 * <p>Not all surfaces are usable with the {@link CameraDevice}, and not all configurations 369 * of that {@code surface} are compatible. Some classes that provide the {@code surface} are 370 * compatible with the {@link CameraDevice} in general 371 * (see {@link #isOutputSupportedFor(Class)}, but it is the caller's responsibility to put the 372 * {@code surface} into a state that will be compatible with the {@link CameraDevice}.</p> 373 * 374 * <p>Reasons for a {@code surface} being specifically incompatible might be: 375 * <ul> 376 * <li>Using a format that's not listed by {@link #getOutputFormats} 377 * <li>Using a format/size combination that's not listed by {@link #getOutputSizes} 378 * <li>The {@code surface} itself is not in a state where it can service a new producer.</p> 379 * </li> 380 * </ul> 381 * 382 * <p>Surfaces from flexible sources will return true even if the exact size of the Surface does 383 * not match a camera-supported size, as long as the format (or class) is supported and the 384 * camera device supports a size that is equal to or less than 1080p in that format. If such as 385 * Surface is used to create a capture session, it will have its size rounded to the nearest 386 * supported size, below or equal to 1080p. Flexible sources include SurfaceView, SurfaceTexture, 387 * and ImageReader.</p> 388 * 389 * <p>This is not an exhaustive list; see the particular class's documentation for further 390 * possible reasons of incompatibility.</p> 391 * 392 * @param surface a non-{@code null} {@link Surface} object reference 393 * @return {@code true} if this is supported, {@code false} otherwise 394 * 395 * @throws NullPointerException if {@code surface} was {@code null} 396 * @throws IllegalArgumentException if the Surface endpoint is no longer valid 397 * 398 * @see CameraDevice#createCaptureSession 399 * @see #isOutputSupportedFor(Class) 400 */ isOutputSupportedFor(Surface surface)401 public boolean isOutputSupportedFor(Surface surface) { 402 checkNotNull(surface, "surface must not be null"); 403 404 Size surfaceSize = SurfaceUtils.getSurfaceSize(surface); 405 int surfaceFormat = SurfaceUtils.getSurfaceFormat(surface); 406 int surfaceDataspace = SurfaceUtils.getSurfaceDataspace(surface); 407 408 // See if consumer is flexible. 409 boolean isFlexible = SurfaceUtils.isFlexibleConsumer(surface); 410 411 StreamConfiguration[] configs = 412 surfaceDataspace != HAL_DATASPACE_DEPTH ? mConfigurations : mDepthConfigurations; 413 for (StreamConfiguration config : configs) { 414 if (config.getFormat() == surfaceFormat && config.isOutput()) { 415 // Matching format, either need exact size match, or a flexible consumer 416 // and a size no bigger than MAX_DIMEN_FOR_ROUNDING 417 if (config.getSize().equals(surfaceSize)) { 418 return true; 419 } else if (isFlexible && 420 (config.getSize().getWidth() <= LegacyCameraDevice.MAX_DIMEN_FOR_ROUNDING)) { 421 return true; 422 } 423 } 424 } 425 return false; 426 } 427 428 /** 429 * Get a list of sizes compatible with {@code klass} to use as an output. 430 * 431 * <p>Some of the supported classes may support additional formats beyond 432 * {@link ImageFormat#PRIVATE}; this function only returns 433 * sizes for {@link ImageFormat#PRIVATE}. For example, {@link android.media.ImageReader} 434 * supports {@link ImageFormat#YUV_420_888} and {@link ImageFormat#PRIVATE}, this method will 435 * only return the sizes for {@link ImageFormat#PRIVATE} for {@link android.media.ImageReader} 436 * class.</p> 437 * 438 * <p>If a well-defined format such as {@code NV21} is required, use 439 * {@link #getOutputSizes(int)} instead.</p> 440 * 441 * <p>The {@code klass} should be a supported output, that querying 442 * {@code #isOutputSupportedFor(Class)} should return {@code true}.</p> 443 * 444 * @param klass 445 * a non-{@code null} {@link Class} object reference 446 * @return 447 * an array of supported sizes for {@link ImageFormat#PRIVATE} format, 448 * or {@code null} iff the {@code klass} is not a supported output. 449 * 450 * 451 * @throws NullPointerException if {@code klass} was {@code null} 452 * 453 * @see #isOutputSupportedFor(Class) 454 */ getOutputSizes(Class<T> klass)455 public <T> Size[] getOutputSizes(Class<T> klass) { 456 if (isOutputSupportedFor(klass) == false) { 457 return null; 458 } 459 460 return getInternalFormatSizes(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 461 HAL_DATASPACE_UNKNOWN,/*output*/true, /*highRes*/false); 462 } 463 464 /** 465 * Get a list of sizes compatible with the requested image {@code format}. 466 * 467 * <p>The {@code format} should be a supported format (one of the formats returned by 468 * {@link #getOutputFormats}).</p> 469 * 470 * As of API level 23, the {@link #getHighResolutionOutputSizes} method can be used on devices 471 * that support the 472 * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE} 473 * capability to get a list of high-resolution output sizes that cannot operate at the preferred 474 * 20fps rate. This means that for some supported formats, this method will return an empty 475 * list, if all the supported resolutions operate at below 20fps. For devices that do not 476 * support the BURST_CAPTURE capability, all output resolutions are listed through this method. 477 * 478 * @param format an image format from {@link ImageFormat} or {@link PixelFormat} 479 * @return 480 * an array of supported sizes, 481 * or {@code null} if the {@code format} is not a supported output 482 * 483 * @see ImageFormat 484 * @see PixelFormat 485 * @see #getOutputFormats 486 */ getOutputSizes(int format)487 public Size[] getOutputSizes(int format) { 488 return getPublicFormatSizes(format, /*output*/true, /*highRes*/ false); 489 } 490 491 /** 492 * Get a list of supported high speed video recording sizes. 493 * <p> 494 * When {@link CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO} is 495 * supported in {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES}, this method will 496 * list the supported high speed video size configurations. All the sizes listed will be a 497 * subset of the sizes reported by {@link #getOutputSizes} for processed non-stalling formats 498 * (typically {@link ImageFormat#PRIVATE} {@link ImageFormat#YUV_420_888}, etc.) 499 * </p> 500 * <p> 501 * To enable high speed video recording, application must create a constrained create high speed 502 * capture session via {@link CameraDevice#createConstrainedHighSpeedCaptureSession}, and submit 503 * a CaptureRequest list created by 504 * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList} 505 * to this session. The application must select the video size from this method and 506 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from 507 * {@link #getHighSpeedVideoFpsRangesFor} to configure the constrained high speed session and 508 * generate the high speed request list. For example, if the application intends to do high 509 * speed recording, it can select the maximum size reported by this method to create high speed 510 * capture session. Note that for the use case of multiple output streams, application must 511 * select one unique size from this method to use (e.g., preview and recording streams must have 512 * the same size). Otherwise, the high speed session creation will fail. Once the size is 513 * selected, application can get the supported FPS ranges by 514 * {@link #getHighSpeedVideoFpsRangesFor}, and use these FPS ranges to setup the recording 515 * request lists via 516 * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList}. 517 * </p> 518 * 519 * @return an array of supported high speed video recording sizes 520 * @see #getHighSpeedVideoFpsRangesFor(Size) 521 * @see CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO 522 * @see CameraDevice#createConstrainedHighSpeedCaptureSession 523 * @see android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList 524 */ getHighSpeedVideoSizes()525 public Size[] getHighSpeedVideoSizes() { 526 Set<Size> keySet = mHighSpeedVideoSizeMap.keySet(); 527 return keySet.toArray(new Size[keySet.size()]); 528 } 529 530 /** 531 * Get the frame per second ranges (fpsMin, fpsMax) for input high speed video size. 532 * <p> 533 * See {@link #getHighSpeedVideoFpsRanges} for how to enable high speed recording. 534 * </p> 535 * <p> 536 * The {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in this method 537 * must not be used to setup capture requests that are submitted to unconstrained capture 538 * sessions, or it will result in {@link IllegalArgumentException IllegalArgumentExceptions}. 539 * </p> 540 * <p> 541 * See {@link #getHighSpeedVideoFpsRanges} for the characteristics of the returned FPS ranges. 542 * </p> 543 * 544 * @param size one of the sizes returned by {@link #getHighSpeedVideoSizes()} 545 * @return an array of supported high speed video recording FPS ranges The upper bound of 546 * returned ranges is guaranteed to be greater than or equal to 120. 547 * @throws IllegalArgumentException if input size does not exist in the return value of 548 * getHighSpeedVideoSizes 549 * @see #getHighSpeedVideoSizes() 550 * @see #getHighSpeedVideoFpsRanges() 551 */ getHighSpeedVideoFpsRangesFor(Size size)552 public Range<Integer>[] getHighSpeedVideoFpsRangesFor(Size size) { 553 Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size); 554 if (fpsRangeCount == null || fpsRangeCount == 0) { 555 throw new IllegalArgumentException(String.format( 556 "Size %s does not support high speed video recording", size)); 557 } 558 559 @SuppressWarnings("unchecked") 560 Range<Integer>[] fpsRanges = new Range[fpsRangeCount]; 561 int i = 0; 562 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) { 563 if (size.equals(config.getSize())) { 564 fpsRanges[i++] = config.getFpsRange(); 565 } 566 } 567 return fpsRanges; 568 } 569 570 /** 571 * Get a list of supported high speed video recording FPS ranges. 572 * <p> 573 * When {@link CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO} is 574 * supported in {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES}, this method will 575 * list the supported high speed video FPS range configurations. Application can then use 576 * {@link #getHighSpeedVideoSizesFor} to query available sizes for one of returned FPS range. 577 * </p> 578 * <p> 579 * To enable high speed video recording, application must create a constrained create high speed 580 * capture session via {@link CameraDevice#createConstrainedHighSpeedCaptureSession}, and submit 581 * a CaptureRequest list created by 582 * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList} 583 * to this session. The application must select the video size from this method and 584 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from 585 * {@link #getHighSpeedVideoFpsRangesFor} to configure the constrained high speed session and 586 * generate the high speed request list. For example, if the application intends to do high 587 * speed recording, it can select one FPS range reported by this method, query the video sizes 588 * corresponding to this FPS range by {@link #getHighSpeedVideoSizesFor} and use one of reported 589 * sizes to create a high speed capture session. Note that for the use case of multiple output 590 * streams, application must select one unique size from this method to use (e.g., preview and 591 * recording streams must have the same size). Otherwise, the high speed session creation will 592 * fail. Once the high speed capture session is created, the application can set the FPS range 593 * in the recording request lists via 594 * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList}. 595 * </p> 596 * <p> 597 * The FPS ranges reported by this method will have below characteristics: 598 * <li>The fpsMin and fpsMax will be a multiple 30fps.</li> 599 * <li>The fpsMin will be no less than 30fps, the fpsMax will be no less than 120fps.</li> 600 * <li>At least one range will be a fixed FPS range where fpsMin == fpsMax.</li> 601 * <li>For each fixed FPS range, there will be one corresponding variable FPS range [30, 602 * fps_max]. These kinds of FPS ranges are suitable for preview-only use cases where the 603 * application doesn't want the camera device always produce higher frame rate than the display 604 * refresh rate.</li> 605 * </p> 606 * 607 * @return an array of supported high speed video recording FPS ranges The upper bound of 608 * returned ranges is guaranteed to be larger or equal to 120. 609 * @see #getHighSpeedVideoSizesFor 610 * @see CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO 611 * @see CameraDevice#createConstrainedHighSpeedCaptureSession 612 * @see CameraDevice#createHighSpeedRequestList 613 */ 614 @SuppressWarnings("unchecked") getHighSpeedVideoFpsRanges()615 public Range<Integer>[] getHighSpeedVideoFpsRanges() { 616 Set<Range<Integer>> keySet = mHighSpeedVideoFpsRangeMap.keySet(); 617 return keySet.toArray(new Range[keySet.size()]); 618 } 619 620 /** 621 * Get the supported video sizes for an input high speed FPS range. 622 * 623 * <p> See {@link #getHighSpeedVideoSizes} for how to enable high speed recording.</p> 624 * 625 * @param fpsRange one of the FPS range returned by {@link #getHighSpeedVideoFpsRanges()} 626 * @return An array of video sizes to create high speed capture sessions for high speed streaming 627 * use cases. 628 * 629 * @throws IllegalArgumentException if input FPS range does not exist in the return value of 630 * getHighSpeedVideoFpsRanges 631 * @see #getHighSpeedVideoFpsRanges() 632 */ getHighSpeedVideoSizesFor(Range<Integer> fpsRange)633 public Size[] getHighSpeedVideoSizesFor(Range<Integer> fpsRange) { 634 Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange); 635 if (sizeCount == null || sizeCount == 0) { 636 throw new IllegalArgumentException(String.format( 637 "FpsRange %s does not support high speed video recording", fpsRange)); 638 } 639 640 Size[] sizes = new Size[sizeCount]; 641 int i = 0; 642 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) { 643 if (fpsRange.equals(config.getFpsRange())) { 644 sizes[i++] = config.getSize(); 645 } 646 } 647 return sizes; 648 } 649 650 /** 651 * Get a list of supported high resolution sizes, which cannot operate at full BURST_CAPTURE 652 * rate. 653 * 654 * <p>This includes all output sizes that cannot meet the 20 fps frame rate requirements for the 655 * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE} 656 * capability. This does not include the stall duration, so for example, a JPEG or RAW16 output 657 * resolution with a large stall duration but a minimum frame duration that's above 20 fps will 658 * still be listed in the regular {@link #getOutputSizes} list. All the sizes on this list are 659 * still guaranteed to operate at a rate of at least 10 fps, not including stall duration.</p> 660 * 661 * <p>For a device that does not support the BURST_CAPTURE capability, this list will be 662 * {@code null}, since resolutions in the {@link #getOutputSizes} list are already not 663 * guaranteed to meet >= 20 fps rate requirements. For a device that does support the 664 * BURST_CAPTURE capability, this list may be empty, if all supported resolutions meet the 20 665 * fps requirement.</p> 666 * 667 * @return an array of supported slower high-resolution sizes, or {@code null} if the 668 * BURST_CAPTURE capability is not supported 669 */ getHighResolutionOutputSizes(int format)670 public Size[] getHighResolutionOutputSizes(int format) { 671 if (!mListHighResolution) return null; 672 673 return getPublicFormatSizes(format, /*output*/true, /*highRes*/ true); 674 } 675 676 /** 677 * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration} 678 * for the format/size combination (in nanoseconds). 679 * 680 * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p> 681 * <p>{@code size} should be one of the ones returned by 682 * {@link #getOutputSizes(int)}.</p> 683 * 684 * <p>This should correspond to the frame duration when only that stream is active, with all 685 * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}. 686 * </p> 687 * 688 * <p>When multiple streams are used in a request, the minimum frame duration will be 689 * {@code max(individual stream min durations)}.</p> 690 * 691 * <p>For devices that do not support manual sensor control 692 * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}), 693 * this function may return 0.</p> 694 * 695 * <!-- 696 * TODO: uncomment after adding input stream support 697 * <p>The minimum frame duration of a stream (of a particular format, size) is the same 698 * regardless of whether the stream is input or output.</p> 699 * --> 700 * 701 * @param format an image format from {@link ImageFormat} or {@link PixelFormat} 702 * @param size an output-compatible size 703 * @return a minimum frame duration {@code >} 0 in nanoseconds, or 704 * 0 if the minimum frame duration is not available. 705 * 706 * @throws IllegalArgumentException if {@code format} or {@code size} was not supported 707 * @throws NullPointerException if {@code size} was {@code null} 708 * 709 * @see CaptureRequest#SENSOR_FRAME_DURATION 710 * @see #getOutputStallDuration(int, Size) 711 * @see ImageFormat 712 * @see PixelFormat 713 */ getOutputMinFrameDuration(int format, Size size)714 public long getOutputMinFrameDuration(int format, Size size) { 715 checkNotNull(size, "size must not be null"); 716 checkArgumentFormatSupported(format, /*output*/true); 717 718 return getInternalFormatDuration(imageFormatToInternal(format), 719 imageFormatToDataspace(format), 720 size, 721 DURATION_MIN_FRAME); 722 } 723 724 /** 725 * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration} 726 * for the class/size combination (in nanoseconds). 727 * 728 * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}. 729 * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p> 730 * 731 * <p>{@code klass} should be one of the ones which is supported by 732 * {@link #isOutputSupportedFor(Class)}.</p> 733 * 734 * <p>{@code size} should be one of the ones returned by 735 * {@link #getOutputSizes(int)}.</p> 736 * 737 * <p>This should correspond to the frame duration when only that stream is active, with all 738 * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}. 739 * </p> 740 * 741 * <p>When multiple streams are used in a request, the minimum frame duration will be 742 * {@code max(individual stream min durations)}.</p> 743 * 744 * <p>For devices that do not support manual sensor control 745 * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}), 746 * this function may return 0.</p> 747 * 748 * <!-- 749 * TODO: uncomment after adding input stream support 750 * <p>The minimum frame duration of a stream (of a particular format, size) is the same 751 * regardless of whether the stream is input or output.</p> 752 * --> 753 * 754 * @param klass 755 * a class which is supported by {@link #isOutputSupportedFor(Class)} and has a 756 * non-empty array returned by {@link #getOutputSizes(Class)} 757 * @param size an output-compatible size 758 * @return a minimum frame duration {@code >} 0 in nanoseconds, or 759 * 0 if the minimum frame duration is not available. 760 * 761 * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported 762 * @throws NullPointerException if {@code size} or {@code klass} was {@code null} 763 * 764 * @see CaptureRequest#SENSOR_FRAME_DURATION 765 * @see ImageFormat 766 * @see PixelFormat 767 */ getOutputMinFrameDuration(final Class<T> klass, final Size size)768 public <T> long getOutputMinFrameDuration(final Class<T> klass, final Size size) { 769 if (!isOutputSupportedFor(klass)) { 770 throw new IllegalArgumentException("klass was not supported"); 771 } 772 773 return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 774 HAL_DATASPACE_UNKNOWN, 775 size, DURATION_MIN_FRAME); 776 } 777 778 /** 779 * Get the stall duration for the format/size combination (in nanoseconds). 780 * 781 * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p> 782 * <p>{@code size} should be one of the ones returned by 783 * {@link #getOutputSizes(int)}.</p> 784 * 785 * <p> 786 * A stall duration is how much extra time would get added to the normal minimum frame duration 787 * for a repeating request that has streams with non-zero stall. 788 * 789 * <p>For example, consider JPEG captures which have the following characteristics: 790 * 791 * <ul> 792 * <li>JPEG streams act like processed YUV streams in requests for which they are not included; 793 * in requests in which they are directly referenced, they act as JPEG streams. 794 * This is because supporting a JPEG stream requires the underlying YUV data to always be ready 795 * for use by a JPEG encoder, but the encoder will only be used (and impact frame duration) on 796 * requests that actually reference a JPEG stream. 797 * <li>The JPEG processor can run concurrently to the rest of the camera pipeline, but cannot 798 * process more than 1 capture at a time. 799 * </ul> 800 * 801 * <p>In other words, using a repeating YUV request would result in a steady frame rate 802 * (let's say it's 30 FPS). If a single JPEG request is submitted periodically, 803 * the frame rate will stay at 30 FPS (as long as we wait for the previous JPEG to return each 804 * time). If we try to submit a repeating YUV + JPEG request, then the frame rate will drop from 805 * 30 FPS.</p> 806 * 807 * <p>In general, submitting a new request with a non-0 stall time stream will <em>not</em> cause a 808 * frame rate drop unless there are still outstanding buffers for that stream from previous 809 * requests.</p> 810 * 811 * <p>Submitting a repeating request with streams (call this {@code S}) is the same as setting 812 * the minimum frame duration from the normal minimum frame duration corresponding to {@code S}, 813 * added with the maximum stall duration for {@code S}.</p> 814 * 815 * <p>If interleaving requests with and without a stall duration, a request will stall by the 816 * maximum of the remaining times for each can-stall stream with outstanding buffers.</p> 817 * 818 * <p>This means that a stalling request will not have an exposure start until the stall has 819 * completed.</p> 820 * 821 * <p>This should correspond to the stall duration when only that stream is active, with all 822 * processing (typically in {@code android.*.mode}) set to {@code FAST} or {@code OFF}. 823 * Setting any of the processing modes to {@code HIGH_QUALITY} effectively results in an 824 * indeterminate stall duration for all streams in a request (the regular stall calculation 825 * rules are ignored).</p> 826 * 827 * <p>The following formats may always have a stall duration: 828 * <ul> 829 * <li>{@link ImageFormat#JPEG JPEG} 830 * <li>{@link ImageFormat#RAW_SENSOR RAW16} 831 * <li>{@link ImageFormat#RAW_PRIVATE RAW_PRIVATE} 832 * </ul> 833 * </p> 834 * 835 * <p>The following formats will never have a stall duration: 836 * <ul> 837 * <li>{@link ImageFormat#YUV_420_888 YUV_420_888} 838 * <li>{@link #isOutputSupportedFor(Class) Implementation-Defined} 839 * </ul></p> 840 * 841 * <p> 842 * All other formats may or may not have an allowed stall duration on a per-capability basis; 843 * refer to {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES 844 * android.request.availableCapabilities} for more details.</p> 845 * </p> 846 * 847 * <p>See {@link CaptureRequest#SENSOR_FRAME_DURATION android.sensor.frameDuration} 848 * for more information about calculating the max frame rate (absent stalls).</p> 849 * 850 * @param format an image format from {@link ImageFormat} or {@link PixelFormat} 851 * @param size an output-compatible size 852 * @return a stall duration {@code >=} 0 in nanoseconds 853 * 854 * @throws IllegalArgumentException if {@code format} or {@code size} was not supported 855 * @throws NullPointerException if {@code size} was {@code null} 856 * 857 * @see CaptureRequest#SENSOR_FRAME_DURATION 858 * @see ImageFormat 859 * @see PixelFormat 860 */ getOutputStallDuration(int format, Size size)861 public long getOutputStallDuration(int format, Size size) { 862 checkArgumentFormatSupported(format, /*output*/true); 863 864 return getInternalFormatDuration(imageFormatToInternal(format), 865 imageFormatToDataspace(format), 866 size, 867 DURATION_STALL); 868 } 869 870 /** 871 * Get the stall duration for the class/size combination (in nanoseconds). 872 * 873 * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}. 874 * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p> 875 * 876 * <p>{@code klass} should be one of the ones with a non-empty array returned by 877 * {@link #getOutputSizes(Class)}.</p> 878 * 879 * <p>{@code size} should be one of the ones returned by 880 * {@link #getOutputSizes(Class)}.</p> 881 * 882 * <p>See {@link #getOutputStallDuration(int, Size)} for a definition of a 883 * <em>stall duration</em>.</p> 884 * 885 * @param klass 886 * a class which is supported by {@link #isOutputSupportedFor(Class)} and has a 887 * non-empty array returned by {@link #getOutputSizes(Class)} 888 * @param size an output-compatible size 889 * @return a minimum frame duration {@code >=} 0 in nanoseconds 890 * 891 * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported 892 * @throws NullPointerException if {@code size} or {@code klass} was {@code null} 893 * 894 * @see CaptureRequest#SENSOR_FRAME_DURATION 895 * @see ImageFormat 896 * @see PixelFormat 897 */ getOutputStallDuration(final Class<T> klass, final Size size)898 public <T> long getOutputStallDuration(final Class<T> klass, final Size size) { 899 if (!isOutputSupportedFor(klass)) { 900 throw new IllegalArgumentException("klass was not supported"); 901 } 902 903 return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 904 HAL_DATASPACE_UNKNOWN, size, DURATION_STALL); 905 } 906 907 /** 908 * Check if this {@link StreamConfigurationMap} is equal to another 909 * {@link StreamConfigurationMap}. 910 * 911 * <p>Two vectors are only equal if and only if each of the respective elements is equal.</p> 912 * 913 * @return {@code true} if the objects were equal, {@code false} otherwise 914 */ 915 @Override equals(final Object obj)916 public boolean equals(final Object obj) { 917 if (obj == null) { 918 return false; 919 } 920 if (this == obj) { 921 return true; 922 } 923 if (obj instanceof StreamConfigurationMap) { 924 final StreamConfigurationMap other = (StreamConfigurationMap) obj; 925 // XX: do we care about order? 926 return Arrays.equals(mConfigurations, other.mConfigurations) && 927 Arrays.equals(mMinFrameDurations, other.mMinFrameDurations) && 928 Arrays.equals(mStallDurations, other.mStallDurations) && 929 Arrays.equals(mDepthConfigurations, other.mDepthConfigurations) && 930 Arrays.equals(mHighSpeedVideoConfigurations, 931 other.mHighSpeedVideoConfigurations); 932 } 933 return false; 934 } 935 936 /** 937 * {@inheritDoc} 938 */ 939 @Override hashCode()940 public int hashCode() { 941 // XX: do we care about order? 942 return HashCodeHelpers.hashCodeGeneric( 943 mConfigurations, mMinFrameDurations, 944 mStallDurations, 945 mDepthConfigurations, mHighSpeedVideoConfigurations); 946 } 947 948 // Check that the argument is supported by #getOutputFormats or #getInputFormats checkArgumentFormatSupported(int format, boolean output)949 private int checkArgumentFormatSupported(int format, boolean output) { 950 checkArgumentFormat(format); 951 952 int internalFormat = imageFormatToInternal(format); 953 int internalDataspace = imageFormatToDataspace(format); 954 955 if (output) { 956 if (internalDataspace == HAL_DATASPACE_DEPTH) { 957 if (mDepthOutputFormats.indexOfKey(internalFormat) >= 0) { 958 return format; 959 } 960 } else { 961 if (mAllOutputFormats.indexOfKey(internalFormat) >= 0) { 962 return format; 963 } 964 } 965 } else { 966 if (mInputFormats.indexOfKey(internalFormat) >= 0) { 967 return format; 968 } 969 } 970 971 throw new IllegalArgumentException(String.format( 972 "format %x is not supported by this stream configuration map", format)); 973 } 974 975 /** 976 * Ensures that the format is either user-defined or implementation defined. 977 * 978 * <p>If a format has a different internal representation than the public representation, 979 * passing in the public representation here will fail.</p> 980 * 981 * <p>For example if trying to use {@link ImageFormat#JPEG}: 982 * it has a different public representation than the internal representation 983 * {@code HAL_PIXEL_FORMAT_BLOB}, this check will fail.</p> 984 * 985 * <p>Any invalid/undefined formats will raise an exception.</p> 986 * 987 * @param format image format 988 * @return the format 989 * 990 * @throws IllegalArgumentException if the format was invalid 991 */ checkArgumentFormatInternal(int format)992 static int checkArgumentFormatInternal(int format) { 993 switch (format) { 994 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: 995 case HAL_PIXEL_FORMAT_BLOB: 996 case HAL_PIXEL_FORMAT_RAW_OPAQUE: 997 case HAL_PIXEL_FORMAT_Y16: 998 return format; 999 case ImageFormat.JPEG: 1000 throw new IllegalArgumentException( 1001 "ImageFormat.JPEG is an unknown internal format"); 1002 default: 1003 return checkArgumentFormat(format); 1004 } 1005 } 1006 1007 /** 1008 * Ensures that the format is publicly user-defined in either ImageFormat or PixelFormat. 1009 * 1010 * <p>If a format has a different public representation than the internal representation, 1011 * passing in the internal representation here will fail.</p> 1012 * 1013 * <p>For example if trying to use {@code HAL_PIXEL_FORMAT_BLOB}: 1014 * it has a different internal representation than the public representation 1015 * {@link ImageFormat#JPEG}, this check will fail.</p> 1016 * 1017 * <p>Any invalid/undefined formats will raise an exception, including implementation-defined. 1018 * </p> 1019 * 1020 * <p>Note that {@code @hide} and deprecated formats will not pass this check.</p> 1021 * 1022 * @param format image format 1023 * @return the format 1024 * 1025 * @throws IllegalArgumentException if the format was not user-defined 1026 */ checkArgumentFormat(int format)1027 static int checkArgumentFormat(int format) { 1028 if (!ImageFormat.isPublicFormat(format) && !PixelFormat.isPublicFormat(format)) { 1029 throw new IllegalArgumentException(String.format( 1030 "format 0x%x was not defined in either ImageFormat or PixelFormat", format)); 1031 } 1032 1033 return format; 1034 } 1035 1036 /** 1037 * Convert an internal format compatible with {@code graphics.h} into public-visible 1038 * {@code ImageFormat}. This assumes the dataspace of the format is not HAL_DATASPACE_DEPTH. 1039 * 1040 * <p>In particular these formats are converted: 1041 * <ul> 1042 * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.JPEG</li> 1043 * </ul> 1044 * </p> 1045 * 1046 * <p>Passing in a format which has no public equivalent will fail; 1047 * as will passing in a public format which has a different internal format equivalent. 1048 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1049 * 1050 * <p>All other formats are returned as-is, no further invalid check is performed.</p> 1051 * 1052 * <p>This function is the dual of {@link #imageFormatToInternal} for dataspaces other than 1053 * HAL_DATASPACE_DEPTH.</p> 1054 * 1055 * @param format image format from {@link ImageFormat} or {@link PixelFormat} 1056 * @return the converted image formats 1057 * 1058 * @throws IllegalArgumentException 1059 * if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or 1060 * {@link ImageFormat#JPEG} 1061 * 1062 * @see ImageFormat 1063 * @see PixelFormat 1064 * @see #checkArgumentFormat 1065 */ imageFormatToPublic(int format)1066 static int imageFormatToPublic(int format) { 1067 switch (format) { 1068 case HAL_PIXEL_FORMAT_BLOB: 1069 return ImageFormat.JPEG; 1070 case ImageFormat.JPEG: 1071 throw new IllegalArgumentException( 1072 "ImageFormat.JPEG is an unknown internal format"); 1073 default: 1074 return format; 1075 } 1076 } 1077 1078 /** 1079 * Convert an internal format compatible with {@code graphics.h} into public-visible 1080 * {@code ImageFormat}. This assumes the dataspace of the format is HAL_DATASPACE_DEPTH. 1081 * 1082 * <p>In particular these formats are converted: 1083 * <ul> 1084 * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.DEPTH_POINT_CLOUD 1085 * <li>HAL_PIXEL_FORMAT_Y16 => ImageFormat.DEPTH16 1086 * </ul> 1087 * </p> 1088 * 1089 * <p>Passing in an implementation-defined format which has no public equivalent will fail; 1090 * as will passing in a public format which has a different internal format equivalent. 1091 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1092 * 1093 * <p>All other formats are returned as-is, no further invalid check is performed.</p> 1094 * 1095 * <p>This function is the dual of {@link #imageFormatToInternal} for formats associated with 1096 * HAL_DATASPACE_DEPTH.</p> 1097 * 1098 * @param format image format from {@link ImageFormat} or {@link PixelFormat} 1099 * @return the converted image formats 1100 * 1101 * @throws IllegalArgumentException 1102 * if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or 1103 * {@link ImageFormat#JPEG} 1104 * 1105 * @see ImageFormat 1106 * @see PixelFormat 1107 * @see #checkArgumentFormat 1108 */ depthFormatToPublic(int format)1109 static int depthFormatToPublic(int format) { 1110 switch (format) { 1111 case HAL_PIXEL_FORMAT_BLOB: 1112 return ImageFormat.DEPTH_POINT_CLOUD; 1113 case HAL_PIXEL_FORMAT_Y16: 1114 return ImageFormat.DEPTH16; 1115 case HAL_PIXEL_FORMAT_RAW16: 1116 return ImageFormat.RAW_DEPTH; 1117 case ImageFormat.JPEG: 1118 throw new IllegalArgumentException( 1119 "ImageFormat.JPEG is an unknown internal format"); 1120 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: 1121 throw new IllegalArgumentException( 1122 "IMPLEMENTATION_DEFINED must not leak to public API"); 1123 default: 1124 throw new IllegalArgumentException( 1125 "Unknown DATASPACE_DEPTH format " + format); 1126 } 1127 } 1128 1129 /** 1130 * Convert image formats from internal to public formats (in-place). 1131 * 1132 * @param formats an array of image formats 1133 * @return {@code formats} 1134 * 1135 * @see #imageFormatToPublic 1136 */ imageFormatToPublic(int[] formats)1137 static int[] imageFormatToPublic(int[] formats) { 1138 if (formats == null) { 1139 return null; 1140 } 1141 1142 for (int i = 0; i < formats.length; ++i) { 1143 formats[i] = imageFormatToPublic(formats[i]); 1144 } 1145 1146 return formats; 1147 } 1148 1149 /** 1150 * Convert a public format compatible with {@code ImageFormat} to an internal format 1151 * from {@code graphics.h}. 1152 * 1153 * <p>In particular these formats are converted: 1154 * <ul> 1155 * <li>ImageFormat.JPEG => HAL_PIXEL_FORMAT_BLOB 1156 * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_PIXEL_FORMAT_BLOB 1157 * <li>ImageFormat.DEPTH16 => HAL_PIXEL_FORMAT_Y16 1158 * </ul> 1159 * </p> 1160 * 1161 * <p>Passing in an internal format which has a different public format equivalent will fail. 1162 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1163 * 1164 * <p>All other formats are returned as-is, no invalid check is performed.</p> 1165 * 1166 * <p>This function is the dual of {@link #imageFormatToPublic}.</p> 1167 * 1168 * @param format public image format from {@link ImageFormat} or {@link PixelFormat} 1169 * @return the converted image formats 1170 * 1171 * @see ImageFormat 1172 * @see PixelFormat 1173 * 1174 * @throws IllegalArgumentException 1175 * if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} 1176 */ imageFormatToInternal(int format)1177 static int imageFormatToInternal(int format) { 1178 switch (format) { 1179 case ImageFormat.JPEG: 1180 case ImageFormat.DEPTH_POINT_CLOUD: 1181 return HAL_PIXEL_FORMAT_BLOB; 1182 case ImageFormat.DEPTH16: 1183 return HAL_PIXEL_FORMAT_Y16; 1184 case ImageFormat.RAW_DEPTH: 1185 return HAL_PIXEL_FORMAT_RAW16; 1186 default: 1187 return format; 1188 } 1189 } 1190 1191 /** 1192 * Convert a public format compatible with {@code ImageFormat} to an internal dataspace 1193 * from {@code graphics.h}. 1194 * 1195 * <p>In particular these formats are converted: 1196 * <ul> 1197 * <li>ImageFormat.JPEG => HAL_DATASPACE_V0_JFIF 1198 * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_DATASPACE_DEPTH 1199 * <li>ImageFormat.DEPTH16 => HAL_DATASPACE_DEPTH 1200 * <li>others => HAL_DATASPACE_UNKNOWN 1201 * </ul> 1202 * </p> 1203 * 1204 * <p>Passing in an implementation-defined format here will fail (it's not a public format); 1205 * as will passing in an internal format which has a different public format equivalent. 1206 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1207 * 1208 * <p>All other formats are returned as-is, no invalid check is performed.</p> 1209 * 1210 * <p>This function is the dual of {@link #imageFormatToPublic}.</p> 1211 * 1212 * @param format public image format from {@link ImageFormat} or {@link PixelFormat} 1213 * @return the converted image formats 1214 * 1215 * @see ImageFormat 1216 * @see PixelFormat 1217 * 1218 * @throws IllegalArgumentException 1219 * if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} 1220 */ imageFormatToDataspace(int format)1221 static int imageFormatToDataspace(int format) { 1222 switch (format) { 1223 case ImageFormat.JPEG: 1224 return HAL_DATASPACE_V0_JFIF; 1225 case ImageFormat.DEPTH_POINT_CLOUD: 1226 case ImageFormat.DEPTH16: 1227 case ImageFormat.RAW_DEPTH: 1228 return HAL_DATASPACE_DEPTH; 1229 default: 1230 return HAL_DATASPACE_UNKNOWN; 1231 } 1232 } 1233 1234 /** 1235 * Convert image formats from public to internal formats (in-place). 1236 * 1237 * @param formats an array of image formats 1238 * @return {@code formats} 1239 * 1240 * @see #imageFormatToInternal 1241 * 1242 * @hide 1243 */ imageFormatToInternal(int[] formats)1244 public static int[] imageFormatToInternal(int[] formats) { 1245 if (formats == null) { 1246 return null; 1247 } 1248 1249 for (int i = 0; i < formats.length; ++i) { 1250 formats[i] = imageFormatToInternal(formats[i]); 1251 } 1252 1253 return formats; 1254 } 1255 getPublicFormatSizes(int format, boolean output, boolean highRes)1256 private Size[] getPublicFormatSizes(int format, boolean output, boolean highRes) { 1257 try { 1258 checkArgumentFormatSupported(format, output); 1259 } catch (IllegalArgumentException e) { 1260 return null; 1261 } 1262 1263 int internalFormat = imageFormatToInternal(format); 1264 int dataspace = imageFormatToDataspace(format); 1265 1266 return getInternalFormatSizes(internalFormat, dataspace, output, highRes); 1267 } 1268 getInternalFormatSizes(int format, int dataspace, boolean output, boolean highRes)1269 private Size[] getInternalFormatSizes(int format, int dataspace, 1270 boolean output, boolean highRes) { 1271 // All depth formats are non-high-res. 1272 if (dataspace == HAL_DATASPACE_DEPTH && highRes) { 1273 return new Size[0]; 1274 } 1275 1276 SparseIntArray formatsMap = 1277 !output ? mInputFormats : 1278 dataspace == HAL_DATASPACE_DEPTH ? mDepthOutputFormats : 1279 highRes ? mHighResOutputFormats : 1280 mOutputFormats; 1281 1282 int sizesCount = formatsMap.get(format); 1283 if ( ((!output || dataspace == HAL_DATASPACE_DEPTH) && sizesCount == 0) || 1284 (output && dataspace != HAL_DATASPACE_DEPTH && mAllOutputFormats.get(format) == 0)) { 1285 // Only throw if this is really not supported at all 1286 throw new IllegalArgumentException("format not available"); 1287 } 1288 1289 Size[] sizes = new Size[sizesCount]; 1290 int sizeIndex = 0; 1291 1292 StreamConfiguration[] configurations = 1293 (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations; 1294 StreamConfigurationDuration[] minFrameDurations = 1295 (dataspace == HAL_DATASPACE_DEPTH) ? mDepthMinFrameDurations : mMinFrameDurations; 1296 1297 for (StreamConfiguration config : configurations) { 1298 int fmt = config.getFormat(); 1299 if (fmt == format && config.isOutput() == output) { 1300 if (output && mListHighResolution) { 1301 // Filter slow high-res output formats; include for 1302 // highRes, remove for !highRes 1303 long duration = 0; 1304 for (int i = 0; i < minFrameDurations.length; i++) { 1305 StreamConfigurationDuration d = minFrameDurations[i]; 1306 if (d.getFormat() == fmt && 1307 d.getWidth() == config.getSize().getWidth() && 1308 d.getHeight() == config.getSize().getHeight()) { 1309 duration = d.getDuration(); 1310 break; 1311 } 1312 } 1313 if (dataspace != HAL_DATASPACE_DEPTH && 1314 highRes != (duration > DURATION_20FPS_NS)) { 1315 continue; 1316 } 1317 } 1318 sizes[sizeIndex++] = config.getSize(); 1319 } 1320 } 1321 1322 if (sizeIndex != sizesCount) { 1323 throw new AssertionError( 1324 "Too few sizes (expected " + sizesCount + ", actual " + sizeIndex + ")"); 1325 } 1326 1327 return sizes; 1328 } 1329 1330 /** Get the list of publically visible output formats; does not include IMPL_DEFINED */ getPublicFormats(boolean output)1331 private int[] getPublicFormats(boolean output) { 1332 int[] formats = new int[getPublicFormatCount(output)]; 1333 1334 int i = 0; 1335 1336 SparseIntArray map = getFormatsMap(output); 1337 for (int j = 0; j < map.size(); j++) { 1338 int format = map.keyAt(j); 1339 formats[i++] = imageFormatToPublic(format); 1340 } 1341 if (output) { 1342 for (int j = 0; j < mDepthOutputFormats.size(); j++) { 1343 formats[i++] = depthFormatToPublic(mDepthOutputFormats.keyAt(j)); 1344 } 1345 } 1346 if (formats.length != i) { 1347 throw new AssertionError("Too few formats " + i + ", expected " + formats.length); 1348 } 1349 1350 return formats; 1351 } 1352 1353 /** Get the format -> size count map for either output or input formats */ getFormatsMap(boolean output)1354 private SparseIntArray getFormatsMap(boolean output) { 1355 return output ? mAllOutputFormats : mInputFormats; 1356 } 1357 getInternalFormatDuration(int format, int dataspace, Size size, int duration)1358 private long getInternalFormatDuration(int format, int dataspace, Size size, int duration) { 1359 // assume format is already checked, since its internal 1360 1361 if (!isSupportedInternalConfiguration(format, dataspace, size)) { 1362 throw new IllegalArgumentException("size was not supported"); 1363 } 1364 1365 StreamConfigurationDuration[] durations = getDurations(duration, dataspace); 1366 1367 for (StreamConfigurationDuration configurationDuration : durations) { 1368 if (configurationDuration.getFormat() == format && 1369 configurationDuration.getWidth() == size.getWidth() && 1370 configurationDuration.getHeight() == size.getHeight()) { 1371 return configurationDuration.getDuration(); 1372 } 1373 } 1374 // Default duration is '0' (unsupported/no extra stall) 1375 return 0; 1376 } 1377 1378 /** 1379 * Get the durations array for the kind of duration 1380 * 1381 * @see #DURATION_MIN_FRAME 1382 * @see #DURATION_STALL 1383 * */ getDurations(int duration, int dataspace)1384 private StreamConfigurationDuration[] getDurations(int duration, int dataspace) { 1385 switch (duration) { 1386 case DURATION_MIN_FRAME: 1387 return (dataspace == HAL_DATASPACE_DEPTH) ? 1388 mDepthMinFrameDurations : mMinFrameDurations; 1389 case DURATION_STALL: 1390 return (dataspace == HAL_DATASPACE_DEPTH) ? 1391 mDepthStallDurations : mStallDurations; 1392 default: 1393 throw new IllegalArgumentException("duration was invalid"); 1394 } 1395 } 1396 1397 /** Count the number of publicly-visible output formats */ getPublicFormatCount(boolean output)1398 private int getPublicFormatCount(boolean output) { 1399 SparseIntArray formatsMap = getFormatsMap(output); 1400 int size = formatsMap.size(); 1401 if (output) { 1402 size += mDepthOutputFormats.size(); 1403 } 1404 1405 return size; 1406 } 1407 arrayContains(T[] array, T element)1408 private static <T> boolean arrayContains(T[] array, T element) { 1409 if (array == null) { 1410 return false; 1411 } 1412 1413 for (T el : array) { 1414 if (Objects.equals(el, element)) { 1415 return true; 1416 } 1417 } 1418 1419 return false; 1420 } 1421 isSupportedInternalConfiguration(int format, int dataspace, Size size)1422 private boolean isSupportedInternalConfiguration(int format, int dataspace, 1423 Size size) { 1424 StreamConfiguration[] configurations = 1425 (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations; 1426 1427 for (int i = 0; i < configurations.length; i++) { 1428 if (configurations[i].getFormat() == format && 1429 configurations[i].getSize().equals(size)) { 1430 return true; 1431 } 1432 } 1433 1434 return false; 1435 } 1436 1437 /** 1438 * Return this {@link StreamConfigurationMap} as a string representation. 1439 * 1440 * <p>{@code "StreamConfigurationMap(Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d, 1441 * stall:%d], ... [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]), Inputs([w:%d, h:%d, 1442 * format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)]), ValidOutputFormatsForInput( 1443 * [in:%d, out:%d, ... %d], ... [in:%d, out:%d, ... %d]), HighSpeedVideoConfigurations( 1444 * [w:%d, h:%d, min_fps:%d, max_fps:%d], ... [w:%d, h:%d, min_fps:%d, max_fps:%d]))"}.</p> 1445 * 1446 * <p>{@code Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d], ... 1447 * [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d])}, where 1448 * {@code [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]} represents an output 1449 * configuration's width, height, format, minimal frame duration in nanoseconds, and stall 1450 * duration in nanoseconds.</p> 1451 * 1452 * <p>{@code Inputs([w:%d, h:%d, format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)])}, where 1453 * {@code [w:%d, h:%d, format:%s(%d)]} represents an input configuration's width, height, and 1454 * format.</p> 1455 * 1456 * <p>{@code ValidOutputFormatsForInput([in:%s(%d), out:%s(%d), ... %s(%d)], 1457 * ... [in:%s(%d), out:%s(%d), ... %s(%d)])}, where {@code [in:%s(%d), out:%s(%d), ... %s(%d)]} 1458 * represents an input fomat and its valid output formats.</p> 1459 * 1460 * <p>{@code HighSpeedVideoConfigurations([w:%d, h:%d, min_fps:%d, max_fps:%d], 1461 * ... [w:%d, h:%d, min_fps:%d, max_fps:%d])}, where 1462 * {@code [w:%d, h:%d, min_fps:%d, max_fps:%d]} represents a high speed video output 1463 * configuration's width, height, minimal frame rate, and maximal frame rate.</p> 1464 * 1465 * @return string representation of {@link StreamConfigurationMap} 1466 */ 1467 @Override toString()1468 public String toString() { 1469 StringBuilder sb = new StringBuilder("StreamConfiguration("); 1470 appendOutputsString(sb); 1471 sb.append(", "); 1472 appendHighResOutputsString(sb); 1473 sb.append(", "); 1474 appendInputsString(sb); 1475 sb.append(", "); 1476 appendValidOutputFormatsForInputString(sb); 1477 sb.append(", "); 1478 appendHighSpeedVideoConfigurationsString(sb); 1479 sb.append(")"); 1480 1481 return sb.toString(); 1482 } 1483 appendOutputsString(StringBuilder sb)1484 private void appendOutputsString(StringBuilder sb) { 1485 sb.append("Outputs("); 1486 int[] formats = getOutputFormats(); 1487 for (int format : formats) { 1488 Size[] sizes = getOutputSizes(format); 1489 for (Size size : sizes) { 1490 long minFrameDuration = getOutputMinFrameDuration(format, size); 1491 long stallDuration = getOutputStallDuration(format, size); 1492 sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " + 1493 "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format), 1494 format, minFrameDuration, stallDuration)); 1495 } 1496 } 1497 // Remove the pending ", " 1498 if (sb.charAt(sb.length() - 1) == ' ') { 1499 sb.delete(sb.length() - 2, sb.length()); 1500 } 1501 sb.append(")"); 1502 } 1503 appendHighResOutputsString(StringBuilder sb)1504 private void appendHighResOutputsString(StringBuilder sb) { 1505 sb.append("HighResolutionOutputs("); 1506 int[] formats = getOutputFormats(); 1507 for (int format : formats) { 1508 Size[] sizes = getHighResolutionOutputSizes(format); 1509 if (sizes == null) continue; 1510 for (Size size : sizes) { 1511 long minFrameDuration = getOutputMinFrameDuration(format, size); 1512 long stallDuration = getOutputStallDuration(format, size); 1513 sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " + 1514 "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format), 1515 format, minFrameDuration, stallDuration)); 1516 } 1517 } 1518 // Remove the pending ", " 1519 if (sb.charAt(sb.length() - 1) == ' ') { 1520 sb.delete(sb.length() - 2, sb.length()); 1521 } 1522 sb.append(")"); 1523 } 1524 appendInputsString(StringBuilder sb)1525 private void appendInputsString(StringBuilder sb) { 1526 sb.append("Inputs("); 1527 int[] formats = getInputFormats(); 1528 for (int format : formats) { 1529 Size[] sizes = getInputSizes(format); 1530 for (Size size : sizes) { 1531 sb.append(String.format("[w:%d, h:%d, format:%s(%d)], ", size.getWidth(), 1532 size.getHeight(), formatToString(format), format)); 1533 } 1534 } 1535 // Remove the pending ", " 1536 if (sb.charAt(sb.length() - 1) == ' ') { 1537 sb.delete(sb.length() - 2, sb.length()); 1538 } 1539 sb.append(")"); 1540 } 1541 appendValidOutputFormatsForInputString(StringBuilder sb)1542 private void appendValidOutputFormatsForInputString(StringBuilder sb) { 1543 sb.append("ValidOutputFormatsForInput("); 1544 int[] inputFormats = getInputFormats(); 1545 for (int inputFormat : inputFormats) { 1546 sb.append(String.format("[in:%s(%d), out:", formatToString(inputFormat), inputFormat)); 1547 int[] outputFormats = getValidOutputFormatsForInput(inputFormat); 1548 for (int i = 0; i < outputFormats.length; i++) { 1549 sb.append(String.format("%s(%d)", formatToString(outputFormats[i]), 1550 outputFormats[i])); 1551 if (i < outputFormats.length - 1) { 1552 sb.append(", "); 1553 } 1554 } 1555 sb.append("], "); 1556 } 1557 // Remove the pending ", " 1558 if (sb.charAt(sb.length() - 1) == ' ') { 1559 sb.delete(sb.length() - 2, sb.length()); 1560 } 1561 sb.append(")"); 1562 } 1563 appendHighSpeedVideoConfigurationsString(StringBuilder sb)1564 private void appendHighSpeedVideoConfigurationsString(StringBuilder sb) { 1565 sb.append("HighSpeedVideoConfigurations("); 1566 Size[] sizes = getHighSpeedVideoSizes(); 1567 for (Size size : sizes) { 1568 Range<Integer>[] ranges = getHighSpeedVideoFpsRangesFor(size); 1569 for (Range<Integer> range : ranges) { 1570 sb.append(String.format("[w:%d, h:%d, min_fps:%d, max_fps:%d], ", size.getWidth(), 1571 size.getHeight(), range.getLower(), range.getUpper())); 1572 } 1573 } 1574 // Remove the pending ", " 1575 if (sb.charAt(sb.length() - 1) == ' ') { 1576 sb.delete(sb.length() - 2, sb.length()); 1577 } 1578 sb.append(")"); 1579 } 1580 formatToString(int format)1581 private String formatToString(int format) { 1582 switch (format) { 1583 case ImageFormat.YV12: 1584 return "YV12"; 1585 case ImageFormat.YUV_420_888: 1586 return "YUV_420_888"; 1587 case ImageFormat.NV21: 1588 return "NV21"; 1589 case ImageFormat.NV16: 1590 return "NV16"; 1591 case PixelFormat.RGB_565: 1592 return "RGB_565"; 1593 case PixelFormat.RGBA_8888: 1594 return "RGBA_8888"; 1595 case PixelFormat.RGBX_8888: 1596 return "RGBX_8888"; 1597 case PixelFormat.RGB_888: 1598 return "RGB_888"; 1599 case ImageFormat.JPEG: 1600 return "JPEG"; 1601 case ImageFormat.YUY2: 1602 return "YUY2"; 1603 case ImageFormat.Y8: 1604 return "Y8"; 1605 case ImageFormat.Y16: 1606 return "Y16"; 1607 case ImageFormat.RAW_SENSOR: 1608 return "RAW_SENSOR"; 1609 case ImageFormat.RAW_PRIVATE: 1610 return "RAW_PRIVATE"; 1611 case ImageFormat.RAW10: 1612 return "RAW10"; 1613 case ImageFormat.DEPTH16: 1614 return "DEPTH16"; 1615 case ImageFormat.DEPTH_POINT_CLOUD: 1616 return "DEPTH_POINT_CLOUD"; 1617 case ImageFormat.RAW_DEPTH: 1618 return "RAW_DEPTH"; 1619 case ImageFormat.PRIVATE: 1620 return "PRIVATE"; 1621 default: 1622 return "UNKNOWN"; 1623 } 1624 } 1625 1626 // from system/core/include/system/graphics.h 1627 private static final int HAL_PIXEL_FORMAT_RAW16 = 0x20; 1628 private static final int HAL_PIXEL_FORMAT_BLOB = 0x21; 1629 private static final int HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 0x22; 1630 private static final int HAL_PIXEL_FORMAT_YCbCr_420_888 = 0x23; 1631 private static final int HAL_PIXEL_FORMAT_RAW_OPAQUE = 0x24; 1632 private static final int HAL_PIXEL_FORMAT_RAW10 = 0x25; 1633 private static final int HAL_PIXEL_FORMAT_RAW12 = 0x26; 1634 private static final int HAL_PIXEL_FORMAT_Y16 = 0x20363159; 1635 1636 1637 private static final int HAL_DATASPACE_STANDARD_SHIFT = 16; 1638 private static final int HAL_DATASPACE_TRANSFER_SHIFT = 22; 1639 private static final int HAL_DATASPACE_RANGE_SHIFT = 27; 1640 1641 private static final int HAL_DATASPACE_UNKNOWN = 0x0; 1642 private static final int HAL_DATASPACE_V0_JFIF = 1643 (2 << HAL_DATASPACE_STANDARD_SHIFT) | 1644 (3 << HAL_DATASPACE_TRANSFER_SHIFT) | 1645 (1 << HAL_DATASPACE_RANGE_SHIFT); 1646 1647 private static final int HAL_DATASPACE_DEPTH = 0x1000; 1648 1649 private static final long DURATION_20FPS_NS = 50000000L; 1650 /** 1651 * @see #getDurations(int, int) 1652 */ 1653 private static final int DURATION_MIN_FRAME = 0; 1654 private static final int DURATION_STALL = 1; 1655 1656 private final StreamConfiguration[] mConfigurations; 1657 private final StreamConfigurationDuration[] mMinFrameDurations; 1658 private final StreamConfigurationDuration[] mStallDurations; 1659 1660 private final StreamConfiguration[] mDepthConfigurations; 1661 private final StreamConfigurationDuration[] mDepthMinFrameDurations; 1662 private final StreamConfigurationDuration[] mDepthStallDurations; 1663 1664 private final HighSpeedVideoConfiguration[] mHighSpeedVideoConfigurations; 1665 private final ReprocessFormatsMap mInputOutputFormatsMap; 1666 1667 private final boolean mListHighResolution; 1668 1669 /** internal format -> num output sizes mapping, not including slow high-res sizes, for 1670 * non-depth dataspaces */ 1671 private final SparseIntArray mOutputFormats = new SparseIntArray(); 1672 /** internal format -> num output sizes mapping for slow high-res sizes, for non-depth 1673 * dataspaces */ 1674 private final SparseIntArray mHighResOutputFormats = new SparseIntArray(); 1675 /** internal format -> num output sizes mapping for all non-depth dataspaces */ 1676 private final SparseIntArray mAllOutputFormats = new SparseIntArray(); 1677 /** internal format -> num input sizes mapping, for input reprocessing formats */ 1678 private final SparseIntArray mInputFormats = new SparseIntArray(); 1679 /** internal format -> num depth output sizes mapping, for HAL_DATASPACE_DEPTH */ 1680 private final SparseIntArray mDepthOutputFormats = new SparseIntArray(); 1681 /** High speed video Size -> FPS range count mapping*/ 1682 private final HashMap</*HighSpeedVideoSize*/Size, /*Count*/Integer> mHighSpeedVideoSizeMap = 1683 new HashMap<Size, Integer>(); 1684 /** High speed video FPS range -> Size count mapping*/ 1685 private final HashMap</*HighSpeedVideoFpsRange*/Range<Integer>, /*Count*/Integer> 1686 mHighSpeedVideoFpsRangeMap = new HashMap<Range<Integer>, Integer>(); 1687 1688 } 1689