• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2013 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Utility functions to form an ItsSession and perform various camera actions.
15"""
16
17
18import collections
19import fnmatch
20import glob
21import json
22import logging
23import math
24import os
25import socket
26import subprocess
27import sys
28import time
29import types
30import unicodedata
31
32from mobly.controllers.android_device_lib import adb
33import numpy
34
35import camera_properties_utils
36import capture_request_utils
37import error_util
38import image_processing_utils
39import its_device_utils
40import opencv_processing_utils
41import ui_interaction_utils
42
43ANDROID13_API_LEVEL = 33
44ANDROID14_API_LEVEL = 34
45ANDROID15_API_LEVEL = 35
46ANDROID16_API_LEVEL = 36
47CAMERA_TYPE_TELE = 'telephoto'
48CAMERA_TYPE_ULTRAWIDE = 'ultrawide'
49CAMERA_TYPE_WIDE = 'wide'
50CHART_DISTANCE_NO_SCALING = 0
51IMAGE_FORMAT_JPEG = 256
52IMAGE_FORMAT_YUV_420_888 = 35
53JCA_VIDEO_PATH_TAG = 'JCA_VIDEO_CAPTURE_PATH'
54JCA_CAPTURE_PATHS_TAG = 'JCA_CAPTURE_PATHS'
55JCA_CAPTURE_STATUS_TAG = 'JCA_CAPTURE_STATUS'
56LOAD_SCENE_DELAY_SEC = 3
57PREVIEW_MAX_TESTED_AREA = 1920 * 1440
58PREVIEW_MIN_TESTED_AREA = 320 * 240
59PRIVATE_FORMAT = 'priv'
60JPEG_R_FMT_STR = 'jpeg_r'
61SCALING_TO_FILE_ATOL = 0.01
62SINGLE_CAPTURE_NCAP = 1
63SUB_CAMERA_SEPARATOR = '.'
64# pylint: disable=line-too-long
65# Allowed tablets as listed on https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-requirements
66# List entries must be entered in lowercase
67TABLET_ALLOWLIST = (
68    'dragon',  # Google Pixel C
69    'hnhey-q',  # Honor Pad 8
70    'hwcmr09',  # Huawei MediaPad M5
71    'x306f',  # Lenovo Tab M10 HD (Gen 2)
72    'x606f',  # Lenovo Tab M10 Plus
73    'j606f',  # Lenovo Tab P11
74    'tb350fu',  # Lenovo Tab P11 (Gen 2)
75    'agta',  # Nokia T21
76    'gta4lwifi',  # Samsung Galaxy Tab A7
77    'gta8wifi',  # Samsung Galaxy Tab A8
78    'gta8',  # Samsung Galaxy Tab A8 LTE
79    'gta9pwifi',  # Samsung Galaxy Tab A9+
80    'gta9p',  # Samsung Galaxy Tab A9+ 5G
81    'dpd2221',  # Vivo Pad2
82    'nabu',  # Xiaomi Pad 5
83    'nabu_tw',  # Xiaomi Pad 5
84    'xun',  # Xiaomi Redmi Pad SE
85    'yunluo',  # Xiaomi Redmi Pad
86)
87TABLET_DEFAULT_BRIGHTNESS = 192  # 8-bit tablet 75% brightness
88TABLET_LEGACY_BRIGHTNESS = 96
89TABLET_LEGACY_NAME = 'dragon'
90# List entries must be entered in lowercase
91TABLET_OS_VERSION = types.MappingProxyType({
92    'nabu': ANDROID13_API_LEVEL,
93    'nabu_tw': ANDROID13_API_LEVEL,
94    'yunluo': ANDROID14_API_LEVEL
95    })
96TABLET_REQUIREMENTS_URL = 'https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-allowlist'
97TABLET_BRIGHTNESS_ERROR_MSG = ('Tablet brightness not set as per '
98                               f'{TABLET_REQUIREMENTS_URL} in the config file')
99TABLET_NOT_ALLOWED_ERROR_MSG = ('Tablet model or tablet Android version is '
100                                'not on our allowlist, please refer to '
101                                f'{TABLET_REQUIREMENTS_URL}')
102TAP_COORDINATES = (500, 500)  # Location to tap tablet screen via adb
103USE_CASE_CROPPED_RAW = 6
104VIDEO_SCENES = ('scene_video',)
105NOT_YET_MANDATED_MESSAGE = 'Not yet mandated test'
106RESULT_OK_STATUS = '-1'
107
108_FLASH_MODE_OFF = 0
109_VALIDATE_LIGHTING_PATCH_H = 0.05
110_VALIDATE_LIGHTING_PATCH_W = 0.05
111_VALIDATE_LIGHTING_REGIONS = {
112    'top-left': (0, 0),
113    'top-right': (0, 1-_VALIDATE_LIGHTING_PATCH_H),
114    'bottom-left': (1-_VALIDATE_LIGHTING_PATCH_W, 0),
115    'bottom-right': (1-_VALIDATE_LIGHTING_PATCH_W,
116                     1-_VALIDATE_LIGHTING_PATCH_H),
117}
118_MODULAR_MACRO_OFFSET = 0.35  # Determined empirically from modular rig testing
119_VALIDATE_LIGHTING_REGIONS_MODULAR_UW = {
120    'top-left': (_MODULAR_MACRO_OFFSET, _MODULAR_MACRO_OFFSET),
121    'bottom-left': (_MODULAR_MACRO_OFFSET,
122                    1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_H),
123    'top-right': (1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_W,
124                  _MODULAR_MACRO_OFFSET),
125    'bottom-right': (1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_W,
126                     1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_H),
127}
128_VALIDATE_LIGHTING_MACRO_FOV_THRESH = 110
129_VALIDATE_LIGHTING_THRESH = 0.05  # Determined empirically from scene[1:6] tests
130_VALIDATE_LIGHTING_THRESH_DARK = 0.3  # Determined empirically for night test
131_CMD_NAME_STR = 'cmdName'
132_OBJ_VALUE_STR = 'objValue'
133_STR_VALUE_STR = 'strValue'
134_TAG_STR = 'tag'
135_CAMERA_ID_STR = 'cameraId'
136_EXTRA_TIMEOUT_FACTOR = 20
137_COPY_SCENE_DELAY_SEC = 1
138_DST_SCENE_DIR = '/sdcard/Download/'
139_BIT_HLG10 = 0x01  # bit 1 for feature mask
140_BIT_STABILIZATION = 0x02  # bit 2 for feature mask
141_CAMERA_RESTART_DELAY_SEC = 10
142
143
144def validate_tablet(tablet_name, brightness, device_id):
145  """Ensures tablet brightness is set according to documentation.
146
147  https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-allowlist
148  Args:
149    tablet_name: tablet product name specified by `ro.product.device`.
150    brightness: brightness specified by config file.
151    device_id: str; ID of the device.
152  """
153  tablet_name = tablet_name.lower()
154  if tablet_name not in TABLET_ALLOWLIST:
155    raise AssertionError(
156        f'Tablet product name: {tablet_name}. {TABLET_NOT_ALLOWED_ERROR_MSG}'
157    )
158  if tablet_name in TABLET_OS_VERSION:
159    if (device_sdk := get_build_sdk_version(
160        device_id)) < TABLET_OS_VERSION[tablet_name]:
161      raise AssertionError(
162          f' Tablet product name: {tablet_name}. '
163          f'Android version: {device_sdk}. {TABLET_NOT_ALLOWED_ERROR_MSG}'
164      )
165  name_to_brightness = {
166      TABLET_LEGACY_NAME: TABLET_LEGACY_BRIGHTNESS,
167  }
168  if tablet_name in name_to_brightness:
169    if brightness != name_to_brightness[tablet_name]:
170      raise AssertionError(TABLET_BRIGHTNESS_ERROR_MSG)
171  else:
172    if brightness != TABLET_DEFAULT_BRIGHTNESS:
173      raise AssertionError(TABLET_BRIGHTNESS_ERROR_MSG)
174
175
176def check_apk_installed(device_id, package_name):
177  """Verifies that an APK is installed on a given device.
178
179  Args:
180    device_id: str; ID of the device.
181    package_name: str; name of the package that should be installed.
182  """
183  verify_cts_cmd = (
184      f'adb -s {device_id} shell pm list packages | '
185      f'grep {package_name}'
186  )
187  bytes_output = subprocess.check_output(
188      verify_cts_cmd, stderr=subprocess.STDOUT, shell=True
189  )
190  output = str(bytes_output.decode('utf-8')).strip()
191  if package_name not in output:
192    raise AssertionError(
193        f'{package_name} not installed on device {device_id}!'
194    )
195
196
197def get_array_size(buffer):
198  """Get array size based on different NumPy versions' functions.
199
200  Args:
201    buffer: A NumPy array.
202
203  Returns:
204    buffer_size: The size of the buffer.
205  """
206  np_version = numpy.__version__
207  if np_version.startswith(('1.25', '1.26', '2.')):
208    buffer_size = numpy.prod(buffer.shape)
209  else:
210    buffer_size = numpy.product(buffer.shape)
211  return buffer_size
212
213
214class ItsSession(object):
215  """Controls a device over adb to run ITS scripts.
216
217    The script importing this module (on the host machine) prepares JSON
218    objects encoding CaptureRequests, specifying sets of parameters to use
219    when capturing an image using the Camera2 APIs. This class encapsulates
220    sending the requests to the device, monitoring the device's progress, and
221    copying the resultant captures back to the host machine when done. TCP
222    forwarded over adb is the transport mechanism used.
223
224    The device must have CtsVerifier.apk installed.
225
226    Attributes:
227        sock: The open socket.
228  """
229
230  # Open a connection to localhost:<host_port>, forwarded to port 6000 on the
231  # device. <host_port> is determined at run-time to support multiple
232  # connected devices.
233  IPADDR = '127.0.0.1'
234  REMOTE_PORT = 6000
235  BUFFER_SIZE = 4096
236
237  # LOCK_PORT is used as a mutex lock to protect the list of forwarded ports
238  # among all processes. The script assumes LOCK_PORT is available and will
239  # try to use ports between CLIENT_PORT_START and
240  # CLIENT_PORT_START+MAX_NUM_PORTS-1 on host for ITS sessions.
241  CLIENT_PORT_START = 6000
242  MAX_NUM_PORTS = 100
243  LOCK_PORT = CLIENT_PORT_START + MAX_NUM_PORTS
244
245  # Seconds timeout on each socket operation.
246  SOCK_TIMEOUT = 20.0
247  # Seconds timeout on performance measurement socket operation
248  SOCK_TIMEOUT_FOR_PERF_MEASURE = 40.0
249  # Seconds timeout on preview recording socket operation.
250  SOCK_TIMEOUT_PREVIEW = 30.0  # test_imu_drift is 30s
251
252  # Additional timeout in seconds when ITS service is doing more complicated
253  # operations, for example: issuing warmup requests before actual capture.
254  EXTRA_SOCK_TIMEOUT = 5.0
255
256  PACKAGE = 'com.android.cts.verifier.camera.its'
257  INTENT_START = 'com.android.cts.verifier.camera.its.START'
258
259  # This string must be in sync with ItsService. Updated when interface
260  # between script and ItsService is changed.
261  ITS_SERVICE_VERSION = '1.0'
262
263  SEC_TO_NSEC = 1000*1000*1000.0
264  adb = 'adb -d'
265
266  # Predefine camera props. Save props extracted from the function,
267  # "get_camera_properties".
268  props = None
269
270  IMAGE_FORMAT_LIST_1 = [
271      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
272      'dngImage', 'y8Image', 'jpeg_rImage', 'heic_ultrahdrImage',
273      'rawQuadBayerImage', 'rawQuadBayerStatsImage',
274      'raw10StatsImage', 'raw10QuadBayerStatsImage', 'raw10QuadBayerImage'
275  ]
276
277  IMAGE_FORMAT_LIST_2 = [
278      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
279      'yuvImage', 'jpeg_rImage', 'heic_ultrahdrImage',
280      'rawQuadBayerImage', 'rawQuadBayerStatsImage',
281      'raw10StatsImage', 'raw10QuadBayerStatsImage', 'raw10QuadBayerImage'
282  ]
283
284  CAP_JPEG = {'format': 'jpeg'}
285  CAP_RAW = {'format': 'raw'}
286  CAP_CROPPED_RAW = {'format': 'raw', 'useCase': USE_CASE_CROPPED_RAW}
287  CAP_YUV = {'format': 'yuv'}
288  CAP_RAW_YUV = [{'format': 'raw'}, {'format': 'yuv'}]
289
290  def __init_socket_port(self):
291    """Initialize the socket port for the host to forward requests to the device.
292
293    This method assumes localhost's LOCK_PORT is available and will try to
294    use ports between CLIENT_PORT_START and CLIENT_PORT_START+MAX_NUM_PORTS-1
295    """
296    num_retries = 100
297    retry_wait_time_sec = 0.05
298
299    # Bind a socket to use as mutex lock
300    socket_lock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
301    for i in range(num_retries):
302      try:
303        socket_lock.bind((ItsSession.IPADDR, ItsSession.LOCK_PORT))
304        break
305      except (socket.error, socket.timeout) as socket_issue:
306        if i == num_retries - 1:
307          raise error_util.CameraItsError(
308              self._device_id, 'socket lock returns error') from socket_issue
309        else:
310          time.sleep(retry_wait_time_sec)
311
312    # Check if a port is already assigned to the device.
313    command = 'adb forward --list'
314    proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
315    # pylint: disable=unused-variable
316    output, error = proc.communicate()
317    port = None
318    used_ports = []
319    for line  in output.decode('utf-8').split(os.linesep):
320      # each line should be formatted as:
321      # "<device_id> tcp:<host_port> tcp:<remote_port>"
322      forward_info = line.split()
323      if len(forward_info) >= 3 and len(
324          forward_info[1]) > 4 and forward_info[1][:4] == 'tcp:' and len(
325              forward_info[2]) > 4 and forward_info[2][:4] == 'tcp:':
326        local_p = int(forward_info[1][4:])
327        remote_p = int(forward_info[2][4:])
328        if forward_info[
329            0] == self._device_id and remote_p == ItsSession.REMOTE_PORT:
330          port = local_p
331          break
332        else:
333          used_ports.append(local_p)
334
335      # Find the first available port if no port is assigned to the device.
336    if port is None:
337      for p in range(ItsSession.CLIENT_PORT_START,
338                     ItsSession.CLIENT_PORT_START + ItsSession.MAX_NUM_PORTS):
339        if self.check_port_availability(p, used_ports):
340          port = p
341          break
342
343    if port is None:
344      raise error_util.CameraItsError(self._device_id,
345                                      ' cannot find an available ' + 'port')
346
347    self._sock_port = port
348
349    # Release the socket as mutex unlock
350    socket_lock.close()
351
352    # Connect to the socket
353    self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
354    self.sock.connect((self.IPADDR, port))
355    self.sock.settimeout(self.SOCK_TIMEOUT)
356
357  def check_port_availability(self, check_port, used_ports):
358    """Check if the port is available or not.
359
360    Args:
361      check_port: Port to check for availability
362      used_ports: List of used ports
363
364    Returns:
365     True if the given port is available and can be assigned to the device.
366    """
367    if check_port not in used_ports:
368      # Try to run "adb forward" with the port
369      command = ('%s forward tcp:%d tcp:%d' %
370                 (self.adb, check_port, self.REMOTE_PORT))
371      proc = subprocess.Popen(
372          command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
373      error = proc.communicate()[1]
374
375      # Check if there is no error
376      if error is None or error.find('error'.encode()) < 0:
377        return True
378      else:
379        return False
380
381  def __wait_for_service(self):
382    """Wait for ItsService to be ready and reboot the device if needed.
383
384    This also includes the optional reboot handling: if the user
385    provides a "reboot" or "reboot=N" arg, then reboot the device,
386    waiting for N seconds (default 30) before returning.
387    """
388
389    for s in sys.argv[1:]:
390      if s[:6] == 'reboot':
391        duration = 30
392        if len(s) > 7 and s[6] == '=':
393          duration = int(s[7:])
394        logging.debug('Rebooting device')
395        its_device_utils.run(f'{self.adb} reboot')
396        its_device_utils.run(f'{self.adb} wait-for-device')
397        time.sleep(duration)
398        logging.debug('Reboot complete')
399
400    # Flush logcat so following code won't be misled by previous
401    # 'ItsService ready' log.
402    its_device_utils.run(f'{self.adb} logcat -c')
403    time.sleep(1)
404
405    its_device_utils.run(
406        f'{self.adb} shell am force-stop --user cur {self.PACKAGE}')
407    its_device_utils.run(
408        f'{self.adb} shell am start-foreground-service --user cur '
409        f'-t text/plain -a {self.INTENT_START}'
410    )
411
412    # Wait until the socket is ready to accept a connection.
413    proc = subprocess.Popen(
414        self.adb.split() + ['logcat'], stdout=subprocess.PIPE)
415    logcat = proc.stdout
416    while True:
417      line = logcat.readline().strip()
418      if line.find(b'ItsService ready') >= 0:
419        break
420    proc.kill()
421    proc.communicate()
422
423  def __init__(self, device_id=None, camera_id=None, hidden_physical_id=None,
424               override_to_portrait=None):
425    self._camera_id = camera_id
426    self._device_id = device_id
427    self._hidden_physical_id = hidden_physical_id
428    self._override_to_portrait = override_to_portrait
429
430    # Initialize device id and adb command.
431    self.adb = 'adb -s ' + self._device_id
432    self.__wait_for_service()
433    self.__init_socket_port()
434
435  def __enter__(self):
436    self.close_camera()
437    self.__open_camera()
438    return self
439
440  def __exit__(self, exec_type, exec_value, exec_traceback):
441    if hasattr(self, 'sock') and self.sock:
442      self.close_camera()
443      self.sock.close()
444    return False
445
446  def reset_socket_and_camera(self):
447    """Reset by reconnecting socket and opening camera.
448
449    Returns: None
450    """
451    if hasattr(self, 'sock') and self.sock:
452      self.sock.close()
453
454    # Give more time for camera device to enumerate and initialize
455    # after crash.
456    time.sleep(_CAMERA_RESTART_DELAY_SEC)
457
458    # Reconnect to the socket
459    self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
460    self.sock.connect((self.IPADDR, self._sock_port))
461    self.sock.settimeout(self.SOCK_TIMEOUT)
462
463    # Reopen camera
464    self.__open_camera()
465
466  def override_with_hidden_physical_camera_props(self, props):
467    """Check that it is a valid sub-camera backing the logical camera.
468
469    If current session is for a hidden physical camera, check that it is a valid
470    sub-camera backing the logical camera, override self.props, and return the
471    characteristics of sub-camera. Otherwise, return "props" directly.
472
473    Args:
474     props: Camera properties object.
475
476    Returns:
477     The properties of the hidden physical camera if possible.
478    """
479    if self._hidden_physical_id:
480      if not camera_properties_utils.logical_multi_camera(props):
481        logging.debug('cam %s not a logical multi-camera: no change in props.',
482                      self._hidden_physical_id)
483        return props
484      physical_ids = camera_properties_utils.logical_multi_camera_physical_ids(
485          props)
486      if self._hidden_physical_id not in physical_ids:
487        raise AssertionError(f'{self._hidden_physical_id} is not a hidden '
488                             f'sub-camera of {self._camera_id}')
489      logging.debug('Overriding cam %s props', self._hidden_physical_id)
490      props = self.get_camera_properties_by_id(self._hidden_physical_id)
491      self.props = props
492    return props
493
494  def get_camera_properties(self):
495    """Get the camera properties object for the device.
496
497    Returns:
498     The Python dictionary object for the CameraProperties object.
499    """
500    cmd = {}
501    cmd[_CMD_NAME_STR] = 'getCameraProperties'
502    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
503    data, _ = self.__read_response_from_socket()
504    if data[_TAG_STR] != 'cameraProperties':
505      raise error_util.CameraItsError('Invalid command response')
506    self.props = data[_OBJ_VALUE_STR]['cameraProperties']
507    return data[_OBJ_VALUE_STR]['cameraProperties']
508
509  def get_session_properties(self, out_surfaces, cap_request):
510    """Get the camera properties object for a session configuration.
511
512    Args:
513      out_surfaces: output surfaces used to query session props.
514      cap_request: capture request used to query session props.
515
516    Returns:
517     The Python dictionary object for the CameraProperties object.
518    """
519    cmd = {}
520    cmd[_CMD_NAME_STR] = 'getCameraSessionProperties'
521    if out_surfaces:
522      if isinstance(out_surfaces, list):
523        cmd['outputSurfaces'] = out_surfaces
524      else:
525        cmd['outputSurfaces'] = [out_surfaces]
526      formats = [
527          c['format'] if 'format' in c else 'yuv' for c in cmd['outputSurfaces']
528      ]
529      formats = [s if s != 'jpg' else 'jpeg' for s in formats]
530    else:
531      max_yuv_size = capture_request_utils.get_available_output_sizes(
532          'yuv', self.props)[0]
533      formats = ['yuv']
534      cmd['outputSurfaces'] = [{
535          'format': 'yuv',
536          'width': max_yuv_size[0],
537          'height': max_yuv_size[1]
538      }]
539    cmd['captureRequest'] = cap_request
540
541    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
542    data, _ = self.__read_response_from_socket()
543    if data[_TAG_STR] != 'cameraProperties':
544      raise error_util.CameraItsError('Invalid command response')
545    self.props = data[_OBJ_VALUE_STR]['cameraProperties']
546    return data[_OBJ_VALUE_STR]['cameraProperties']
547
548  def get_camera_properties_by_id(self, camera_id, override_to_portrait=None):
549    """Get the camera properties object for device with camera_id.
550
551    Args:
552     camera_id: The ID string of the camera
553     override_to_portrait: Optional value for overrideToPortrait
554
555    Returns:
556     The Python dictionary object for the CameraProperties object. Empty
557     if no such device exists.
558    """
559    cmd = {}
560    cmd[_CMD_NAME_STR] = 'getCameraPropertiesById'
561    cmd[_CAMERA_ID_STR] = camera_id
562    if override_to_portrait is not None:
563      cmd['overrideToPortrait'] = override_to_portrait
564    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
565    data, _ = self.__read_response_from_socket()
566    if data[_TAG_STR] != 'cameraProperties':
567      raise error_util.CameraItsError('Invalid command response')
568    return data[_OBJ_VALUE_STR]['cameraProperties']
569
570  def __read_response_from_socket(self):
571    """Reads a line (newline-terminated) string serialization of JSON object.
572
573    Returns:
574     Deserialized json obj.
575    """
576    chars = []
577    while not chars or chars[-1] != '\n':
578      ch = self.sock.recv(1).decode('utf-8')
579      if not ch:
580        # Socket was probably closed; otherwise don't get empty strings
581        raise error_util.CameraItsError('Problem with socket on device side')
582      chars.append(ch)
583    line = ''.join(chars)
584    jobj = json.loads(line)
585    # Optionally read a binary buffer of a fixed size.
586    buf = None
587    if 'bufValueSize' in jobj:
588      n = jobj['bufValueSize']
589      buf = bytearray(n)
590      view = memoryview(buf)
591      while n > 0:
592        nbytes = self.sock.recv_into(view, n)
593        view = view[nbytes:]
594        n -= nbytes
595      buf = numpy.frombuffer(buf, dtype=numpy.uint8)
596    return jobj, buf
597
598  def __open_camera(self):
599    """Get the camera ID to open if it is an argument as a single camera.
600
601    This allows passing camera=# to individual tests at command line
602    and camera=#,#,# or an no camera argv with tools/run_all_tests.py.
603    In case the camera is a logical multi-camera, to run ITS on the
604    hidden physical sub-camera, pass camera=[logical ID]:[physical ID]
605    to an individual test at the command line, and same applies to multiple
606    camera IDs for tools/run_all_tests.py: camera=#,#:#,#:#,#
607    """
608    if not self._camera_id:
609      self._camera_id = 0
610      for s in sys.argv[1:]:
611        if s[:7] == 'camera=' and len(s) > 7:
612          camera_ids = s[7:].split(',')
613          camera_id_combos = parse_camera_ids(camera_ids)
614          if len(camera_id_combos) == 1:
615            self._camera_id = camera_id_combos[0].id
616            self._hidden_physical_id = camera_id_combos[0].sub_id
617
618    logging.debug('Opening camera: %s', self._camera_id)
619    cmd = {_CMD_NAME_STR: 'open', _CAMERA_ID_STR: self._camera_id}
620    if self._override_to_portrait is not None:
621      cmd['overrideToPortrait'] = self._override_to_portrait
622    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
623    data, _ = self.__read_response_from_socket()
624    if data[_TAG_STR] != 'cameraOpened':
625      raise error_util.CameraItsError('Invalid command response')
626
627  def close_camera(self):
628    cmd = {_CMD_NAME_STR: 'close'}
629    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
630    data, _ = self.__read_response_from_socket()
631    if data[_TAG_STR] != 'cameraClosed':
632      raise error_util.CameraItsError('Invalid command response')
633
634  def zoom_ratio_within_range(self, zoom_ratio):
635    """Determine if a given zoom ratio is within device zoom range.
636
637    Args:
638      zoom_ratio: float; zoom ratio requested
639    Returns:
640      Boolean: True, if zoom_ratio inside device range. False otherwise.
641    """
642    zoom_range = self.props['android.control.zoomRatioRange']
643    return zoom_ratio >= zoom_range[0] and zoom_ratio <= zoom_range[1]
644
645  def get_sensors(self):
646    """Get all sensors on the device.
647
648    Returns:
649       A Python dictionary that returns keys and booleans for each sensor.
650    """
651    cmd = {}
652    cmd[_CMD_NAME_STR] = 'checkSensorExistence'
653    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
654    data, _ = self.__read_response_from_socket()
655    if data[_TAG_STR] != 'sensorExistence':
656      raise error_util.CameraItsError('Invalid response for command: %s' %
657                                      cmd[_CMD_NAME_STR])
658    return data[_OBJ_VALUE_STR]
659
660  def get_default_camera_pkg(self):
661    """Get default camera app package name.
662
663    Returns:
664       Default camera app pkg name.
665    """
666    cmd = {}
667    cmd[_CMD_NAME_STR] = 'doGetDefaultCameraPkgName'
668    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
669    data, _ = self.__read_response_from_socket()
670    if data[_TAG_STR] != 'defaultCameraPkg':
671      raise error_util.CameraItsError('Invalid response for command: %s' %
672                                      cmd[_CMD_NAME_STR])
673    return data['strValue']
674
675  def check_gain_map_present(self, file_path):
676    """Check if the image has gainmap present or not.
677
678    The image stored at file_path is decoded and analyzed
679    to check whether the gainmap is present or not. If the image
680    captured is UltraHDR, it should have gainmap present.
681
682    Args:
683      file_path: path of the image to be analyzed on DUT.
684    Returns:
685      Boolean: True if the image has gainmap present.
686    """
687    cmd = {}
688    cmd[_CMD_NAME_STR] = 'doGainMapCheck'
689    cmd['filePath'] = file_path
690    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
691    data, _ = self.__read_response_from_socket()
692    if data[_TAG_STR] != 'gainmapPresent':
693      raise error_util.CameraItsError(
694          'Invalid response for command: %s' % cmd[_CMD_NAME_STR])
695    return data['strValue']
696
697  def start_sensor_events(self):
698    """Start collecting sensor events on the device.
699
700    See get_sensor_events for more info.
701
702    Returns:
703       Nothing.
704    """
705    cmd = {}
706    cmd[_CMD_NAME_STR] = 'startSensorEvents'
707    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
708    data, _ = self.__read_response_from_socket()
709    if data[_TAG_STR] != 'sensorEventsStarted':
710      raise error_util.CameraItsError('Invalid response for command: %s' %
711                                      cmd[_CMD_NAME_STR])
712
713  def get_sensor_events(self):
714    """Get a trace of all sensor events on the device.
715
716        The trace starts when the start_sensor_events function is called. If
717        the test runs for a long time after this call, then the device's
718        internal memory can fill up. Calling get_sensor_events gets all events
719        from the device, and then stops the device from collecting events and
720        clears the internal buffer; to start again, the start_sensor_events
721        call must be used again.
722
723        Events from the accelerometer, compass, and gyro are returned; each
724        has a timestamp and x,y,z values.
725
726        Note that sensor events are only produced if the device isn't in its
727        standby mode (i.e.) if the screen is on.
728
729    Returns:
730            A Python dictionary with three keys ("accel", "mag", "gyro") each
731            of which maps to a list of objects containing "time","x","y","z"
732            keys.
733    """
734    cmd = {}
735    cmd[_CMD_NAME_STR] = 'getSensorEvents'
736    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
737    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
738    self.sock.settimeout(timeout)
739    data, _ = self.__read_response_from_socket()
740    if data[_TAG_STR] != 'sensorEvents':
741      raise error_util.CameraItsError('Invalid response for command: %s ' %
742                                      cmd[_CMD_NAME_STR])
743    self.sock.settimeout(self.SOCK_TIMEOUT)
744    return data[_OBJ_VALUE_STR]
745
746  def get_camera_ids(self):
747    """Returns the list of all camera_ids.
748
749    Returns:
750      List of camera ids on the device.
751    """
752    cmd = {'cmdName': 'getCameraIds'}
753    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
754    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
755    self.sock.settimeout(timeout)
756    data, _ = self.__read_response_from_socket()
757    if data['tag'] != 'cameraIds':
758      raise error_util.CameraItsError('Invalid command response')
759    return data['objValue']
760
761  def get_camera_name(self):
762    """Gets the camera name.
763
764    Returns:
765      The camera name with camera id and/or hidden physical id.
766    """
767    if self._hidden_physical_id:
768      return f'{self._camera_id}.{self._hidden_physical_id}'
769    else:
770      return self._camera_id
771
772  def get_unavailable_physical_cameras(self, camera_id):
773    """Get the unavailable physical cameras ids.
774
775    Args:
776      camera_id: int; device id
777    Returns:
778      List of all physical camera ids which are unavailable.
779    """
780    cmd = {_CMD_NAME_STR: 'doGetUnavailablePhysicalCameras',
781           _CAMERA_ID_STR: camera_id}
782    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
783    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
784    self.sock.settimeout(timeout)
785    data, _ = self.__read_response_from_socket()
786    if data[_TAG_STR] != 'unavailablePhysicalCameras':
787      raise error_util.CameraItsError('Invalid command response')
788    return data[_OBJ_VALUE_STR]
789
790  def is_hlg10_recording_supported_for_profile(self, profile_id):
791    """Query whether the camera device supports HLG10 video recording.
792
793    Args:
794      profile_id: int; profile id corresponding to the quality level.
795    Returns:
796      Boolean: True if device supports HLG10 video recording, False in
797      all other cases.
798    """
799    cmd = {}
800    cmd[_CMD_NAME_STR] = 'isHLG10SupportedForProfile'
801    cmd[_CAMERA_ID_STR] = self._camera_id
802    cmd['profileId'] = profile_id
803    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
804
805    data, _ = self.__read_response_from_socket()
806    if data[_TAG_STR] != 'hlg10Response':
807      raise error_util.CameraItsError('Failed to query HLG10 support')
808    return data[_STR_VALUE_STR] == 'true'
809
810  def is_hlg10_recording_supported_for_size_and_fps(
811      self, video_size, max_fps):
812    """Query whether the camera device supports HLG10 video recording.
813
814    Args:
815      video_size: String; the hlg10 video recording size.
816      max_fps: int; the maximum frame rate of the camera.
817    Returns:
818      Boolean: True if device supports HLG10 video recording, False in
819      all other cases.
820    """
821    cmd = {}
822    cmd[_CMD_NAME_STR] = 'isHLG10SupportedForSizeAndFps'
823    cmd[_CAMERA_ID_STR] = self._camera_id
824    cmd['videoSize'] = video_size
825    cmd['maxFps'] = max_fps
826    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
827
828    data, _ = self.__read_response_from_socket()
829    if data[_TAG_STR] != 'hlg10Response':
830      raise error_util.CameraItsError('Failed to query HLG10 support')
831    return data[_STR_VALUE_STR] == 'true'
832
833  def is_p3_capture_supported(self):
834    """Query whether the camera device supports P3 image capture.
835
836    Returns:
837      Boolean: True, if device supports P3 image capture, False in
838      all other cases.
839    """
840    cmd = {}
841    cmd[_CMD_NAME_STR] = 'isP3Supported'
842    cmd[_CAMERA_ID_STR] = self._camera_id
843    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
844
845    data, _ = self.__read_response_from_socket()
846    if data[_TAG_STR] != 'p3Response':
847      raise error_util.CameraItsError('Failed to query P3 support')
848    return data[_STR_VALUE_STR] == 'true'
849
850  def is_landscape_to_portrait_enabled(self):
851    """Query whether the device has enabled the landscape to portrait property.
852
853    Returns:
854      Boolean: True, if the device has the system property enabled. False
855      otherwise.
856    """
857    cmd = {}
858    cmd[_CMD_NAME_STR] = 'isLandscapeToPortraitEnabled'
859    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
860
861    data, _ = self.__read_response_from_socket()
862    if data[_TAG_STR] != 'landscapeToPortraitEnabledResponse':
863      raise error_util.CameraItsError(
864          'Failed to query landscape to portrait system property')
865    return data[_STR_VALUE_STR] == 'true'
866
867  def get_supported_video_sizes_capped(self, camera_id):
868    """Get the supported video sizes for camera id.
869
870    Args:
871      camera_id: int; device id
872    Returns:
873      Sorted list of supported video sizes.
874    """
875
876    cmd = {
877        _CMD_NAME_STR: 'doGetSupportedVideoSizesCapped',
878        _CAMERA_ID_STR: camera_id,
879    }
880    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
881    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
882    self.sock.settimeout(timeout)
883    data, _ = self.__read_response_from_socket()
884    if data[_TAG_STR] != 'supportedVideoSizes':
885      raise error_util.CameraItsError('Invalid command response')
886    if not data[_STR_VALUE_STR]:
887      raise error_util.CameraItsError('No supported video sizes')
888    return data[_STR_VALUE_STR].split(';')
889
890  def do_basic_recording(self, profile_id, quality, duration,
891                         video_stabilization_mode=0, hlg10_enabled=False,
892                         zoom_ratio=None, ae_target_fps_min=None,
893                         ae_target_fps_max=None, antibanding_mode=None,
894                         face_detect_mode=None):
895    """Issue a recording request and read back the video recording object.
896
897    The recording will be done with the format specified in quality. These
898    quality levels correspond to the profiles listed in CamcorderProfile.
899    The duration is the time in seconds for which the video will be recorded.
900    The recorded object consists of a path on the device at which the
901    recorded video is saved.
902
903    Args:
904      profile_id: int; profile id corresponding to the quality level.
905      quality: Video recording quality such as High, Low, VGA.
906      duration: The time in seconds for which the video will be recorded.
907      video_stabilization_mode: Video stabilization mode ON/OFF. Value can be
908      0: 'OFF', 1: 'ON', 2: 'PREVIEW'
909      hlg10_enabled: boolean: True Enable 10-bit HLG video recording, False
910      record using the regular SDR profile
911      zoom_ratio: float; zoom ratio. None if default zoom
912      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
913      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
914      antibanding_mode: int; CONTROL_AE_ANTIBANDING_MODE. Set if not None
915      face_detect_mode: int; STATISTICS_FACE_DETECT_MODE. Set if not None
916    Returns:
917      video_recorded_object: The recorded object returned from ItsService which
918      contains path at which the recording is saved on the device, quality of
919      the recorded video, video size of the recorded video, video frame rate
920      and 'hlg10' if 'hlg10_enabled' is set to True.
921      Ex:
922      VideoRecordingObject: {
923        'tag': 'recordingResponse',
924        'objValue': {
925          'recordedOutputPath':
926            '/storage/emulated/0/Android/data/com.android.cts.verifier'
927            '/files/VideoITS/VID_20220324_080414_0_CIF_352x288.mp4',
928          'quality': 'CIF',
929          'videoFrameRate': 30,
930          'videoSize': '352x288'
931        }
932      }
933    """
934    cmd = {_CMD_NAME_STR: 'doBasicRecording', _CAMERA_ID_STR: self._camera_id,
935           'profileId': profile_id, 'quality': quality,
936           'recordingDuration': duration,
937           'videoStabilizationMode': video_stabilization_mode,
938           'hlg10Enabled': hlg10_enabled}
939    if zoom_ratio:
940      if self.zoom_ratio_within_range(zoom_ratio):
941        cmd['zoomRatio'] = zoom_ratio
942      else:
943        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
944    if ae_target_fps_min and ae_target_fps_max:
945      cmd['aeTargetFpsMin'] = ae_target_fps_min
946      cmd['aeTargetFpsMax'] = ae_target_fps_max
947    if antibanding_mode:
948      cmd['aeAntibandingMode'] = antibanding_mode
949    else:
950      cmd['aeAntibandingMode'] = 0
951    if face_detect_mode:
952      cmd['faceDetectMode'] = face_detect_mode
953    else:
954      cmd['faceDetectMode'] = 0
955    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
956    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
957    self.sock.settimeout(timeout)
958    data, _ = self.__read_response_from_socket()
959    if data[_TAG_STR] != 'recordingResponse':
960      raise error_util.CameraItsError(
961          f'Invalid response for command: {cmd[_CMD_NAME_STR]}')
962    return data[_OBJ_VALUE_STR]
963
964  def _execute_preview_recording(self, cmd):
965    """Send preview recording command over socket and retrieve output object.
966
967    Args:
968      cmd: dict; Mapping from command key to corresponding value
969    Returns:
970      video_recorded_object: The recorded object returned from ItsService which
971      contains path at which the recording is saved on the device, quality of
972      the recorded video which is always set to "preview", video size of the
973      recorded video, video frame rate.
974      Ex:
975      VideoRecordingObject: {
976        'tag': 'recordingResponse',
977        'objValue': {
978          'recordedOutputPath': '/storage/emulated/0/Android/data/'
979                                'com.android.cts.verifier/files/VideoITS/'
980                                'VID_20220324_080414_0_CIF_352x288.mp4',
981          'quality': 'preview',
982          'videoSize': '352x288'
983        }
984      }
985    """
986    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
987    timeout = (self.SOCK_TIMEOUT_PREVIEW +
988               self.EXTRA_SOCK_TIMEOUT * _EXTRA_TIMEOUT_FACTOR)
989    self.sock.settimeout(timeout)
990
991    data, _ = self.__read_response_from_socket()
992    logging.debug('VideoRecordingObject: %s', str(data))
993    if data[_TAG_STR] != 'recordingResponse':
994      raise error_util.CameraItsError(
995          f'Invalid response from command{cmd[_CMD_NAME_STR]}')
996    return data[_OBJ_VALUE_STR]
997
998  def do_preview_recording_multiple_surfaces(
999      self, output_surfaces, video_stream_index, duration, stabilize_mode,
1000      ois=False, zoom_ratio=None, ae_target_fps_min=None, ae_target_fps_max=None,
1001      antibanding_mode=None, face_detect_mode=None):
1002    """Issue a preview request and read back the preview recording object.
1003
1004    The resolution of the preview and its recording will be determined by
1005    video_size. The duration is the time in seconds for which the preview will
1006    be recorded. The recorded object consists of a path on the device at
1007    which the recorded video is saved.
1008
1009    Args:
1010      output_surfaces: list; The list of output surfaces used for creating
1011                             preview recording session. The first surface
1012                             is used for recording.
1013      video_stream_index: int; The index of the output surface used for recording
1014      duration: int; The time in seconds for which the video will be recorded.
1015      stabilize_mode: int; The video stabilization mode
1016      ois: boolean; Whether the preview should be optically stabilized or not
1017      zoom_ratio: float; static zoom ratio. None if default zoom
1018      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
1019      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
1020      antibanding_mode: int; CONTROL_AE_ANTIBANDING_MODE. Set if not None
1021      face_detect_mode: int; STATISTICS_FACE_DETECT_MODE. Set if not None
1022    Returns:
1023      video_recorded_object: The recorded object returned from ItsService
1024    """
1025    cam_id = self._camera_id
1026    if 'physicalCamera' in output_surfaces[0]:
1027      cam_id = output_surfaces[0]['physicalCamera']
1028    cmd = {
1029        _CMD_NAME_STR: 'doStaticPreviewRecording',
1030        _CAMERA_ID_STR: cam_id,
1031        'outputSurfaces': output_surfaces,
1032        'recordSurfaceIndex': video_stream_index,
1033        'recordingDuration': duration,
1034        'stabilizeMode': stabilize_mode,
1035        'ois': ois,
1036    }
1037    if zoom_ratio:
1038      if self.zoom_ratio_within_range(zoom_ratio):
1039        cmd['zoomRatio'] = zoom_ratio
1040      else:
1041        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
1042    if ae_target_fps_min and ae_target_fps_max:
1043      cmd['aeTargetFpsMin'] = ae_target_fps_min
1044      cmd['aeTargetFpsMax'] = ae_target_fps_max
1045    if antibanding_mode is not None:
1046      cmd['aeAntibandingMode'] = antibanding_mode
1047    if face_detect_mode is not None:
1048      cmd['faceDetectMode'] = face_detect_mode
1049    return self._execute_preview_recording(cmd)
1050
1051  def do_preview_recording(
1052      self, video_size, duration, stabilize, ois=False, zoom_ratio=None,
1053      ae_target_fps_min=None, ae_target_fps_max=None, hlg10_enabled=False,
1054      antibanding_mode=None, face_detect_mode=None):
1055    """Issue a preview request and read back the preview recording object.
1056
1057    The resolution of the preview and its recording will be determined by
1058    video_size. The duration is the time in seconds for which the preview will
1059    be recorded. The recorded object consists of a path on the device at
1060    which the recorded video is saved.
1061
1062    Args:
1063      video_size: str; Preview resolution at which to record. ex. "1920x1080"
1064      duration: int; The time in seconds for which the video will be recorded.
1065      stabilize: boolean; Whether the preview should be stabilized or not
1066      ois: boolean; Whether the preview should be optically stabilized or not
1067      zoom_ratio: float; static zoom ratio. None if default zoom
1068      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
1069      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
1070      hlg10_enabled: boolean; True Eanable 10-bit HLG video recording, False
1071                              record using the regular SDK profile.
1072      antibanding_mode: int; CONTROL_AE_ANTIBANDING_MODE. Set if not None
1073      face_detect_mode: int; STATISTICS_FACE_DETECT_MODE. Set if not None
1074    Returns:
1075      video_recorded_object: The recorded object returned from ItsService
1076    """
1077    output_surfaces = self.preview_surface(video_size, hlg10_enabled)
1078    video_stream_index = 0
1079    if stabilize:
1080      stabilization_mode = camera_properties_utils.STABILIZATION_MODE_PREVIEW
1081    else:
1082      stabilization_mode = camera_properties_utils.STABILIZATION_MODE_OFF
1083    return self.do_preview_recording_multiple_surfaces(
1084        output_surfaces, video_stream_index, duration, stabilization_mode,
1085        ois, zoom_ratio, ae_target_fps_min, ae_target_fps_max, antibanding_mode,
1086        face_detect_mode)
1087
1088  def do_preview_recording_with_dynamic_zoom(self, video_size, stabilize,
1089                                             sweep_zoom,
1090                                             ae_target_fps_min=None,
1091                                             ae_target_fps_max=None,
1092                                             padded_frames=False):
1093    """Issue a preview request with dynamic zoom and read back output object.
1094
1095    The resolution of the preview and its recording will be determined by
1096    video_size. The duration will be determined by the duration at each zoom
1097    ratio and the total number of zoom ratios. The recorded object consists
1098    of a path on the device at which the recorded video is saved.
1099
1100    Args:
1101      video_size: str; Preview resolution at which to record. ex. "1920x1080"
1102      stabilize: boolean; Whether the preview should be stabilized or not
1103      sweep_zoom: tuple of (zoom_start, zoom_end, step_size, step_duration).
1104        Used to control zoom ratio during recording.
1105        zoom_start (float) is the starting zoom ratio during recording
1106        zoom_end (float) is the ending zoom ratio during recording
1107        step_size (float) is the step for zoom ratio during recording
1108        step_duration (float) sleep in ms between zoom ratios
1109      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
1110      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
1111      padded_frames: boolean; Whether to add additional frames at the beginning
1112        and end of recording to workaround issue with MediaRecorder.
1113    Returns:
1114      video_recorded_object: The recorded object returned from ItsService
1115    """
1116    output_surface = self.preview_surface(video_size)
1117    if stabilize:
1118      stabilization_mode = camera_properties_utils.STABILIZATION_MODE_PREVIEW
1119    else:
1120      stabilization_mode = camera_properties_utils.STABILIZATION_MODE_OFF
1121    cmd = {
1122        _CMD_NAME_STR: 'doDynamicZoomPreviewRecording',
1123        _CAMERA_ID_STR: self._camera_id,
1124        'outputSurfaces': output_surface,
1125        'stabilizeMode': stabilization_mode,
1126        'ois': False
1127    }
1128    zoom_start, zoom_end, step_size, step_duration = sweep_zoom
1129    if (not self.zoom_ratio_within_range(zoom_start) or
1130        not self.zoom_ratio_within_range(zoom_end)):
1131      raise AssertionError(
1132          f'Starting zoom ratio {zoom_start} or '
1133          f'ending zoom ratio {zoom_end} out of range'
1134      )
1135    if zoom_start > zoom_end or step_size < 0:
1136      raise NotImplementedError('Only increasing zoom ratios are supported')
1137    cmd['zoomStart'] = zoom_start
1138    cmd['zoomEnd'] = zoom_end
1139    cmd['stepSize'] = step_size
1140    cmd['stepDuration'] = step_duration
1141    cmd['hlg10Enabled'] = False
1142    cmd['paddedFrames'] = padded_frames
1143    if ae_target_fps_min and ae_target_fps_max:
1144      cmd['aeTargetFpsMin'] = ae_target_fps_min
1145      cmd['aeTargetFpsMax'] = ae_target_fps_max
1146    return self._execute_preview_recording(cmd)
1147
1148  def do_preview_recording_with_dynamic_ae_awb_region(
1149      self, video_size, ae_awb_regions, ae_awb_region_duration, stabilize=False,
1150      ae_target_fps_min=None, ae_target_fps_max=None):
1151    """Issue a preview request with dynamic 3A region and read back output object.
1152
1153    The resolution of the preview and its recording will be determined by
1154    video_size. The recorded object consists of a path on the device at which
1155    the recorded video is saved.
1156
1157    Args:
1158      video_size: str; Preview resolution at which to record. ex. "1920x1080"
1159      ae_awb_regions: dictionary of (regionBlue/Light/Dark/Yellow).
1160        Used to control 3A region during recording.
1161        regionBlue (metering rectangle) first ae/awb region of recording.
1162        regionLight (metering rectangle) second ae/awb region of recording.
1163        regionDark (metering rectangle) third ae/awb region of recording.
1164        regionYellow (metering rectangle) fourth ae/awb region of recording.
1165      ae_awb_region_duration: float; sleep in ms between 3A regions.
1166      stabilize: boolean; Whether the preview should be stabilized.
1167      ae_target_fps_min: int; If not none, set CONTROL_AE_TARGET_FPS_RANGE min.
1168      ae_target_fps_max: int; If not none, set CONTROL_AE_TARGET_FPS_RANGE max.
1169    Returns:
1170      video_recorded_object: The recorded object returned from ItsService.
1171    """
1172    output_surface = self.preview_surface(video_size)
1173    if stabilize:
1174      stabilization_mode = camera_properties_utils.STABILIZATION_MODE_PREVIEW
1175    else:
1176      stabilization_mode = camera_properties_utils.STABILIZATION_MODE_OFF
1177    cmd = {
1178        _CMD_NAME_STR: 'doDynamicMeteringRegionPreviewRecording',
1179        _CAMERA_ID_STR: self._camera_id,
1180        'outputSurfaces': output_surface,
1181        'stabilizeMode': stabilization_mode,
1182        'ois': False,
1183        'aeAwbRegionDuration': ae_awb_region_duration
1184    }
1185
1186    cmd['regionBlue'] = ae_awb_regions['regionBlue']
1187    cmd['regionLight'] = ae_awb_regions['regionLight']
1188    cmd['regionDark'] = ae_awb_regions['regionDark']
1189    cmd['regionYellow'] = ae_awb_regions['regionYellow']
1190    cmd['hlg10Enabled'] = False
1191    if ae_target_fps_min and ae_target_fps_max:
1192      cmd['aeTargetFpsMin'] = ae_target_fps_min
1193      cmd['aeTargetFpsMax'] = ae_target_fps_max
1194    return self._execute_preview_recording(cmd)
1195
1196  def get_supported_video_qualities(self, camera_id):
1197    """Get all supported video qualities for this camera device.
1198
1199    ie. ['480:4', '1080:6', '2160:8', '720:5', 'CIF:3', 'HIGH:1', 'LOW:0',
1200         'QCIF:2', 'QVGA:7']
1201
1202    Args:
1203      camera_id: device id
1204    Returns:
1205      List of all supported video qualities and corresponding profileIds.
1206    """
1207    cmd = {}
1208    cmd[_CMD_NAME_STR] = 'getSupportedVideoQualities'
1209    cmd[_CAMERA_ID_STR] = camera_id
1210    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1211    data, _ = self.__read_response_from_socket()
1212    if data[_TAG_STR] != 'supportedVideoQualities':
1213      raise error_util.CameraItsError('Invalid command response')
1214    return data[_STR_VALUE_STR].split(';')[:-1]  # remove the last appended ';'
1215
1216  def get_all_supported_preview_sizes(self, camera_id, filter_recordable=False):
1217    """Get all supported preview resolutions for this camera device.
1218
1219    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1220
1221    Note: resolutions are sorted by width x height in ascending order
1222
1223    Args:
1224      camera_id: int; device id
1225      filter_recordable: filter preview sizes if supported for video recording
1226                       using MediaRecorder
1227
1228    Returns:
1229      List of all supported preview resolutions in ascending order.
1230    """
1231    cmd = {
1232        _CMD_NAME_STR: 'getSupportedPreviewSizes',
1233        _CAMERA_ID_STR: camera_id,
1234        'filter_recordable': filter_recordable,
1235    }
1236    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1237    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1238    self.sock.settimeout(timeout)
1239    data, _ = self.__read_response_from_socket()
1240    if data[_TAG_STR] != 'supportedPreviewSizes':
1241      raise error_util.CameraItsError('Invalid command response')
1242    if not data[_STR_VALUE_STR]:
1243      raise error_util.CameraItsError('No supported preview sizes')
1244    supported_preview_sizes = data[_STR_VALUE_STR].split(';')
1245    logging.debug('Supported preview sizes: %s', supported_preview_sizes)
1246    return supported_preview_sizes
1247
1248  def get_supported_preview_sizes(self, camera_id):
1249    """Get supported preview resolutions for this camera device.
1250
1251    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1252
1253    Note: resolutions are sorted by width x height in ascending order
1254    Note: max resolution is capped at 1440x1920.
1255    Note: min resolution is capped at 320x240.
1256
1257    Args:
1258      camera_id: int; device id
1259
1260    Returns:
1261      List of all supported preview resolutions with floor & ceiling set
1262      by _CONSTANTS in ascending order.
1263    """
1264    supported_preview_sizes = self.get_all_supported_preview_sizes(camera_id)
1265    resolution_to_area = lambda s: int(s.split('x')[0])*int(s.split('x')[1])
1266    supported_preview_sizes = [size for size in supported_preview_sizes
1267                               if (resolution_to_area(size)
1268                                   <= PREVIEW_MAX_TESTED_AREA
1269                                   and resolution_to_area(size)
1270                                   >= PREVIEW_MIN_TESTED_AREA)]
1271    logging.debug(
1272        'Supported preview sizes (MIN: %d, MAX: %d area in pixels): %s',
1273        PREVIEW_MIN_TESTED_AREA, PREVIEW_MAX_TESTED_AREA,
1274        supported_preview_sizes
1275    )
1276    return supported_preview_sizes
1277
1278  def get_supported_extension_preview_sizes(self, camera_id, extension):
1279    """Get all supported preview resolutions for the extension mode.
1280
1281    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1282
1283    Note: resolutions are sorted by width x height in ascending order
1284
1285    Args:
1286      camera_id: int; device id
1287      extension: int; camera extension mode
1288
1289    Returns:
1290      List of all supported camera extension preview resolutions in
1291      ascending order.
1292    """
1293    cmd = {
1294        _CMD_NAME_STR: 'getSupportedExtensionPreviewSizes',
1295        _CAMERA_ID_STR: camera_id,
1296        "extension": extension  # pylint: disable=g-inconsistent-quotes
1297    }
1298    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1299    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1300    self.sock.settimeout(timeout)
1301    data, _ = self.__read_response_from_socket()
1302    if data[_TAG_STR] != 'supportedExtensionPreviewSizes':
1303      raise error_util.CameraItsError('Invalid command response')
1304    if not data[_STR_VALUE_STR]:
1305      raise error_util.CameraItsError('No supported extension preview sizes')
1306    supported_preview_sizes = data[_STR_VALUE_STR].split(';')
1307    logging.debug('Supported extension preview sizes: %s', supported_preview_sizes)
1308    return supported_preview_sizes
1309
1310  def get_queryable_stream_combinations(self):
1311    """Get all queryable stream combinations for this camera device.
1312
1313    This function parses the queryable stream combinations string
1314    returned from ItsService. The return value includes both the
1315    string and the parsed result.
1316
1317    One example of the queryable stream combination string is:
1318
1319    'priv:1920x1080+jpeg:4032x2268;priv:1280x720+priv:1280x720'
1320
1321    which can be parsed to:
1322
1323    [
1324      {
1325       "name": "priv:1920x1080+jpeg:4032x2268",
1326       "combination": [
1327                        {
1328                         "format": "priv",
1329                         "size": "1920x1080"
1330                        }
1331                        {
1332                         "format": "jpeg",
1333                         "size": "4032x2268"
1334                        }
1335                      ]
1336      }
1337      {
1338       "name": "priv:1280x720+priv:1280x720",
1339       "combination": [
1340                        {
1341                         "format": "priv",
1342                         "size": "1280x720"
1343                        },
1344                        {
1345                         "format": "priv",
1346                         "size": "1280x720"
1347                        }
1348                      ]
1349      }
1350    ]
1351
1352    Returns:
1353      Tuple of:
1354      - queryable stream combination string, and
1355      - parsed stream combinations
1356    """
1357    cmd = {
1358        _CMD_NAME_STR: 'getQueryableStreamCombinations',
1359    }
1360    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1361    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1362    self.sock.settimeout(timeout)
1363    data, _ = self.__read_response_from_socket()
1364    if data[_TAG_STR] != 'queryableStreamCombinations':
1365      raise error_util.CameraItsError('Invalid command response')
1366    if not data[_STR_VALUE_STR]:
1367      raise error_util.CameraItsError('No queryable stream combinations')
1368
1369    # Parse the stream combination string. Example:
1370    # '34+priv:1920x1080+jpeg:4032x2268;35+priv:1280x720+priv:1280x720'
1371    combinations = [{
1372        'name': c,
1373        'version': int(c.split('+')[0]),
1374        'combination': [
1375            {'format': s.split(':')[0],
1376             'size': s.split(':')[1]} for s in c.split('+')[1:]]}
1377                    for c in data[_STR_VALUE_STR].split(';')]
1378
1379    return data[_STR_VALUE_STR], combinations
1380
1381  def get_supported_extensions(self, camera_id):
1382    """Get all supported camera extensions for this camera device.
1383
1384    ie. [EXTENSION_AUTOMATIC, EXTENSION_BOKEH,
1385         EXTENSION_FACE_RETOUCH, EXTENSION_HDR, EXTENSION_NIGHT]
1386    where EXTENSION_AUTOMATIC is 0, EXTENSION_BOKEH is 1, etc.
1387
1388    Args:
1389      camera_id: int; device ID
1390    Returns:
1391      List of all supported extensions (as int) in ascending order.
1392    """
1393    cmd = {
1394        'cmdName': 'getSupportedExtensions',
1395        'cameraId': camera_id
1396    }
1397    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1398    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1399    self.sock.settimeout(timeout)
1400    data, _ = self.__read_response_from_socket()
1401    if data['tag'] != 'supportedExtensions':
1402      raise error_util.CameraItsError('Invalid command response')
1403    if not data['strValue']:
1404      raise error_util.CameraItsError('No supported extensions')
1405    return [int(x) for x in str(data['strValue'][1:-1]).split(', ') if x]
1406
1407  def get_supported_extension_sizes(self, camera_id, extension, image_format):
1408    """Get all supported camera sizes for this camera, extension, and format.
1409
1410    Sorts in ascending order according to area, i.e.
1411    ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1412
1413    Args:
1414      camera_id: int; device ID
1415      extension: int; the integer value of the extension.
1416      image_format: int; the integer value of the format.
1417    Returns:
1418      List of sizes supported for this camera, extension, and format.
1419    """
1420    cmd = {
1421        'cmdName': 'getSupportedExtensionSizes',
1422        'cameraId': camera_id,
1423        'extension': extension,
1424        'format': image_format
1425    }
1426    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1427    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1428    self.sock.settimeout(timeout)
1429    data, _ = self.__read_response_from_socket()
1430    if data[_TAG_STR] != 'supportedExtensionSizes':
1431      raise error_util.CameraItsError('Invalid command response')
1432    if not data[_STR_VALUE_STR]:
1433      logging.debug('No supported extension sizes')
1434      return ''
1435    return data[_STR_VALUE_STR].split(';')
1436
1437  def get_display_size(self):
1438    """Get the display size of the screen.
1439
1440    Returns:
1441      The size of the display resolution in pixels.
1442    """
1443    cmd = {
1444        'cmdName': 'getDisplaySize'
1445    }
1446    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1447    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1448    self.sock.settimeout(timeout)
1449    data, _ = self.__read_response_from_socket()
1450    if data['tag'] != 'displaySize':
1451      raise error_util.CameraItsError('Invalid command response')
1452    if not data['strValue']:
1453      raise error_util.CameraItsError('No display size')
1454    return data['strValue'].split('x')
1455
1456  def get_max_camcorder_profile_size(self, camera_id):
1457    """Get the maximum camcorder profile size for this camera device.
1458
1459    Args:
1460      camera_id: int; device id
1461    Returns:
1462      The maximum size among all camcorder profiles supported by this camera.
1463    """
1464    cmd = {
1465        'cmdName': 'getMaxCamcorderProfileSize',
1466        'cameraId': camera_id
1467    }
1468    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1469    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1470    self.sock.settimeout(timeout)
1471    data, _ = self.__read_response_from_socket()
1472    if data['tag'] != 'maxCamcorderProfileSize':
1473      raise error_util.CameraItsError('Invalid command response')
1474    if not data['strValue']:
1475      raise error_util.CameraItsError('No max camcorder profile size')
1476    return data['strValue'].split('x')
1477
1478  def do_simple_capture(self, cmd, out_surface):
1479    """Issue single capture request via command and read back image/metadata.
1480
1481    Args:
1482      cmd: Dictionary specifying command name, requests, and output surface.
1483      out_surface: Dictionary describing output surface.
1484    Returns:
1485      An object which contains following fields:
1486      * data: the image data as a numpy array of bytes.
1487      * width: the width of the captured image.
1488      * height: the height of the captured image.
1489      * format: image format
1490      * metadata: the capture result object
1491    """
1492    fmt = out_surface['format'] if 'format' in out_surface else 'yuv'
1493    if fmt == 'jpg': fmt = 'jpeg'
1494
1495    # we only have 1 capture request and 1 surface by definition.
1496    ncap = SINGLE_CAPTURE_NCAP
1497
1498    cam_id = None
1499    bufs = {}
1500    yuv_bufs = {}
1501    if self._hidden_physical_id:
1502      out_surface['physicalCamera'] = self._hidden_physical_id
1503
1504    if 'physicalCamera' in out_surface:
1505      cam_id = out_surface['physicalCamera']
1506    else:
1507      cam_id = self._camera_id
1508
1509    bufs[cam_id] = {
1510        'raw': [],
1511        'raw10': [],
1512        'raw12': [],
1513        'rawStats': [],
1514        'dng': [],
1515        'jpeg': [],
1516        'y8': [],
1517        'rawQuadBayer': [],
1518        'rawQuadBayerStats': [],
1519        'raw10Stats': [],
1520        'raw10QuadBayerStats': [],
1521        'raw10QuadBayer': [],
1522    }
1523
1524    # Only allow yuv output to multiple targets
1525    yuv_surface = None
1526    if cam_id == self._camera_id:
1527      if 'physicalCamera' not in out_surface:
1528        if out_surface['format'] == 'yuv':
1529          yuv_surface = out_surface
1530    else:
1531      if ('physicalCamera' in out_surface and
1532          out_surface['physicalCamera'] == cam_id):
1533        if out_surface['format'] == 'yuv':
1534          yuv_surface = out_surface
1535
1536    # Compute the buffer size of YUV targets
1537    yuv_maxsize_1d = 0
1538    if yuv_surface is not None:
1539      if ('width' not in yuv_surface and 'height' not in yuv_surface):
1540        if self.props is None:
1541          raise error_util.CameraItsError('Camera props are unavailable')
1542        yuv_maxsize_2d = capture_request_utils.get_available_output_sizes(
1543            'yuv', self.props)[0]
1544        # YUV420 size = 1.5 bytes per pixel
1545        yuv_maxsize_1d = (yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3) // 2
1546      if 'width' in yuv_surface and 'height' in yuv_surface:
1547        yuv_size = (yuv_surface['width'] * yuv_surface['height'] * 3) // 2
1548      else:
1549        yuv_size = yuv_maxsize_1d
1550
1551      yuv_bufs[cam_id] = {yuv_size: []}
1552
1553    cam_ids = self._camera_id
1554    self.sock.settimeout(self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT)
1555    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1556
1557    nbufs = 0
1558    md = None
1559    physical_md = None
1560    width = None
1561    height = None
1562    capture_results_returned = False
1563    while (nbufs < ncap) or (not capture_results_returned):
1564      json_obj, buf = self.__read_response_from_socket()
1565      if (json_obj[_TAG_STR] in ItsSession.IMAGE_FORMAT_LIST_1 and
1566          buf is not None):
1567        fmt = json_obj[_TAG_STR][:-5]
1568        bufs[self._camera_id][fmt].append(buf)
1569        nbufs += 1
1570      elif json_obj[_TAG_STR] == 'yuvImage':
1571        buf_size = get_array_size(buf)
1572        yuv_bufs[self._camera_id][buf_size].append(buf)
1573        nbufs += 1
1574      elif json_obj[_TAG_STR] == 'captureResults':
1575        capture_results_returned = True
1576        md = json_obj[_OBJ_VALUE_STR]['captureResult']
1577        physical_md = json_obj[_OBJ_VALUE_STR]['physicalResults']
1578        outputs = json_obj[_OBJ_VALUE_STR]['outputs']
1579        returned_fmt = outputs[0]['format']
1580        if fmt != returned_fmt:
1581          raise AssertionError(
1582              f'Incorrect format. Requested: {fmt}, '
1583              f'Received: {returned_fmt}')
1584        width = outputs[0]['width']
1585        height = outputs[0]['height']
1586        requested_width = out_surface['width']
1587        requested_height = out_surface['height']
1588        if requested_width != width or requested_height != height:
1589          raise AssertionError(
1590              'Incorrect size. '
1591              f'Requested: {requested_width}x{requested_height}, '
1592              f'Received: {width}x{height}')
1593      else:
1594        tag_string = unicodedata.normalize('NFKD', json_obj[_TAG_STR]).encode(
1595            'ascii', 'ignore')
1596        for x in ItsSession.IMAGE_FORMAT_LIST_2:
1597          x = bytes(x, encoding='utf-8')
1598          if tag_string.startswith(x):
1599            if x == b'yuvImage':
1600              physical_id = json_obj[_TAG_STR][len(x):]
1601              if physical_id in cam_ids:
1602                buf_size = get_array_size(buf)
1603                yuv_bufs[physical_id][buf_size].append(buf)
1604                nbufs += 1
1605            else:
1606              physical_id = json_obj[_TAG_STR][len(x):]
1607              if physical_id in cam_ids:
1608                fmt = x[:-5].decode('UTF-8')
1609                bufs[physical_id][fmt].append(buf)
1610                nbufs += 1
1611
1612    if 'physicalCamera' in out_surface:
1613      cam_id = out_surface['physicalCamera']
1614    else:
1615      cam_id = self._camera_id
1616    ret = {'width': width, 'height': height, 'format': fmt}
1617    if cam_id == self._camera_id:
1618      ret['metadata'] = md
1619    else:
1620      if cam_id in physical_md:
1621        ret['metadata'] = physical_md[cam_id]
1622
1623    if fmt == 'yuv':
1624      buf_size = (width * height * 3) // 2
1625      ret['data'] = yuv_bufs[cam_id][buf_size][0]
1626    else:
1627      ret['data'] = bufs[cam_id][fmt][0]
1628
1629    return ret
1630
1631  def do_jca_capture(
1632      self, dut, log_path, flash_mode_desc, lens_facing, zoom_ratio=1.0,
1633      save_image_delay=None):
1634    """Take a single capture using JCA, modifying capture settings using the UI.
1635
1636    This function is a convenience wrapper for tests that only need to take
1637    a single capture.
1638
1639    Args:
1640      dut: An Android controller device object.
1641      log_path: str; log path to save screenshots.
1642      flash_mode_desc: str; constant describing the desired flash mode.
1643        Acceptable values: ui_interaction_utils.FLASH_MODES
1644      lens_facing: str; constant describing the direction the camera lens faces.
1645        Acceptable values: camera_properties_utils.LENS_FACING[BACK, FRONT]
1646      zoom_ratio: float; zoom ratio for the capture.
1647      save_image_delay: Optional[float]; time to wait after pressing the
1648        capture button before ending the JCA capture activity.
1649    Returns:
1650      A ui_interaction_utils.JcaCapture object describing the capture.
1651    """
1652    captures = list(
1653        self.do_jca_captures_across_zoom_ratios(
1654            dut, log_path, flash_mode_desc, lens_facing,
1655            zoom_ratios=(zoom_ratio,), save_image_delay=save_image_delay
1656        )
1657    )
1658    if len(captures) != 1:
1659      raise AssertionError(f'Expected 1 capture, got {len(captures)}!')
1660    return captures[0]
1661
1662  def do_jca_captures_across_zoom_ratios(
1663      self, dut, log_path, flash_mode_desc, lens_facing, zoom_ratios=(1.0,),
1664      save_image_delay=None):
1665    """Take multiple captures using JCA, modifying capture settings using UI.
1666
1667    Selects UI elements to modify settings, and presses the capture button.
1668    Reads response from socket containing the capture path, and
1669    pulls the image from the DUT.
1670
1671    This method is included here because an ITS session is needed to retrieve
1672    the capture path from the device.
1673
1674    Args:
1675      dut: An Android controller device object.
1676      log_path: str; log path to save screenshots.
1677      flash_mode_desc: str; constant describing the desired flash mode.
1678        Acceptable values: ui_interaction_utils.FLASH_MODES
1679      lens_facing: str; constant describing the direction the camera lens faces.
1680        Acceptable values: camera_properties_utils.LENS_FACING[BACK, FRONT]
1681      zoom_ratios: Optional[Iterable[float]]; zoom ratio for the capture.
1682      save_image_delay: Optional[float]; time to wait after pressing the
1683        capture button before ending the JCA capture activity.
1684    Yields:
1685      A ui_interaction_utils.JcaCapture object describing each capture.
1686    """
1687    physical_camera_ids = []
1688    ui_interaction_utils.open_jca_viewfinder(dut, log_path)
1689    ui_interaction_utils.switch_jca_camera(dut, log_path, lens_facing)
1690    ui_interaction_utils.set_jca_flash_mode(dut, log_path, flash_mode_desc)
1691    for zoom_ratio in zoom_ratios:
1692      ui_interaction_utils.jca_ui_zoom(dut, zoom_ratio, log_path)
1693      # Get physical ID
1694      try:
1695        physical_camera_id = int(
1696            dut.ui(
1697                res=ui_interaction_utils.UI_PHYSICAL_CAMERA_RESOURCE_ID).text
1698        )
1699        logging.debug('Physical camera ID: %d', physical_camera_id)
1700      except ValueError:
1701        physical_camera_id = None
1702      physical_camera_ids.append(physical_camera_id)
1703      # Take capture
1704      dut.ui(res=ui_interaction_utils.CAPTURE_BUTTON_RESOURCE_ID).click()
1705      dut.ui(
1706          text=ui_interaction_utils.UI_IMAGE_CAPTURE_SUCCESS_TEXT).wait.exists(
1707              ui_interaction_utils.UI_OBJECT_WAIT_TIME_SECONDS)
1708      dut.ui(text=ui_interaction_utils.UI_IMAGE_CAPTURE_SUCCESS_TEXT).wait.gone(
1709          ui_interaction_utils.UI_OBJECT_WAIT_TIME_SECONDS)
1710    # TODO: b/404350495 - Handle the case where the image is still not saved.
1711    if save_image_delay:
1712      time.sleep(save_image_delay)
1713    dut.ui.press.back()
1714    number_of_captures = 0
1715    for capture_path, physical_camera_id in zip(
1716        self.get_and_pull_jca_capture(dut, log_path), physical_camera_ids):
1717      number_of_captures += 1
1718      yield ui_interaction_utils.JcaCapture(capture_path, physical_camera_id)
1719    if number_of_captures != len(zoom_ratios):
1720      raise AssertionError(
1721          f'Expected {len(zoom_ratios)} captures, got {number_of_captures}!'
1722      )
1723
1724  def do_jca_video_capture(self, dut, log_path, duration):
1725    """Take a capture using JCA using the UI.
1726
1727    Captures JCA video by holding the capture button with requested duration.
1728    Reads response from socket containing the capture path, and
1729    pulls the image from the DUT.
1730
1731    This method is included here because an ITS session is needed to retrieve
1732    the capture path from the device.
1733
1734    Args:
1735      dut: An Android controller device object.
1736      log_path: str; log path to save screenshots.
1737      duration: int; requested video duration, in ms.
1738    Returns:
1739      The host-side path of the capture.
1740    """
1741    # Make sure JCA is started
1742    jca_capture_button_visible = dut.ui(
1743        res=ui_interaction_utils.CAPTURE_BUTTON_RESOURCE_ID).wait.exists(
1744            ui_interaction_utils.UI_OBJECT_WAIT_TIME_SECONDS)
1745    if not jca_capture_button_visible:
1746      raise AssertionError('JCA was not started! Please use'
1747                           'open_jca_viewfinder() or do_jca_video_setup()'
1748                           'in ui_interaction_utils.py to start JCA.')
1749    dut.ui(res=ui_interaction_utils.CAPTURE_BUTTON_RESOURCE_ID).click(duration)
1750    return self.get_and_pull_jca_video_capture(dut, log_path)
1751
1752  def _get_jca_capture_paths(self):
1753    """Handle JCA capture result paths from the socket.
1754
1755    Returns:
1756      A capture result path or a list of capture results paths.
1757    """
1758    capture_paths, capture_status = None, None
1759    while not capture_paths or not capture_status:
1760      data, _ = self.__read_response_from_socket()
1761      if data[_TAG_STR] == JCA_CAPTURE_STATUS_TAG:
1762        capture_status = data[_STR_VALUE_STR]
1763      elif data[_TAG_STR] == JCA_CAPTURE_PATHS_TAG:
1764        if capture_paths is not None:
1765          raise error_util.CameraItsError(
1766              f'Invalid response {data[_TAG_STR]} for JCA capture')
1767        capture_paths = data[_OBJ_VALUE_STR][JCA_CAPTURE_PATHS_TAG]
1768      elif data[_TAG_STR] == JCA_VIDEO_PATH_TAG:
1769        if capture_paths is not None:
1770          raise error_util.CameraItsError(
1771              f'Invalid response {data[_TAG_STR]} for JCA capture')
1772        capture_paths = data[_STR_VALUE_STR]
1773      else:
1774        raise error_util.CameraItsError(
1775            f'Invalid response {data[_TAG_STR]} for JCA capture')
1776    if capture_status != RESULT_OK_STATUS:
1777      logging.error('Capture failed! Expected status %d, received %d',
1778                    RESULT_OK_STATUS, capture_status)
1779    logging.debug('capture paths: %s', capture_paths)
1780    return capture_paths
1781
1782  def get_and_pull_jca_capture(self, dut, log_path):
1783    """Retrieve a capture path from the socket and pulls capture to host.
1784
1785    Args:
1786      dut: An Android controller device object.
1787      log_path: str; log path to save screenshots.
1788    Yields:
1789      The host-side path of a capture.
1790    Raises:
1791      CameraItsError: If unexpected data is retrieved from the socket.
1792    """
1793    capture_paths = self._get_jca_capture_paths()
1794    for capture_path in capture_paths:
1795      _, capture_name = os.path.split(capture_path)
1796      its_device_utils.run(
1797          f'adb -s {dut.serial} pull {capture_path} {log_path}')
1798      yield os.path.join(log_path, capture_name)
1799
1800  def get_and_pull_jca_video_capture(self, dut, log_path):
1801    """Retrieve a capture path from the socket and pulls capture to host.
1802
1803    Args:
1804      dut: An Android controller device object.
1805      log_path: str; log path to save screenshots.
1806    Returns:
1807      The host-side path of the capture.
1808    Raises:
1809      CameraItsError: If unexpected data is retrieved from the socket.
1810    """
1811    capture_path = self._get_jca_capture_paths()
1812    _, capture_name = os.path.split(capture_path)
1813    its_device_utils.run(f'adb -s {dut.serial} pull {capture_path} {log_path}')
1814    return os.path.join(log_path, capture_name)
1815
1816  def do_capture_with_flash(self,
1817                            preview_request_start,
1818                            preview_request_idle,
1819                            still_capture_req,
1820                            out_surface):
1821    """Issue capture request with flash and read back the image and metadata.
1822
1823    Captures a single image with still_capture_req as capture request
1824    with flash. It triggers the precapture sequence with preview request
1825    preview_request_start with capture intent preview by setting aePrecapture
1826    trigger to Start. This is followed by repeated preview requests
1827    preview_request_idle with aePrecaptureTrigger set to IDLE.
1828    Once the AE is converged, a single image is captured still_capture_req
1829    during which the flash must be fired.
1830    Note: The part where we read output data from socket is cloned from
1831    do_capture and will be consolidated in U.
1832
1833    Args:
1834      preview_request_start: Preview request with aePrecaptureTrigger set to
1835        Start
1836      preview_request_idle: Preview request with aePrecaptureTrigger set to Idle
1837      still_capture_req: Single still capture request.
1838      out_surface: Specifications of the output image formats and
1839        sizes to use for capture. Supports yuv and jpeg.
1840    Returns:
1841      An object which contains following fields:
1842      * data: the image data as a numpy array of bytes.
1843      * width: the width of the captured image.
1844      * height: the height of the captured image.
1845      * format: image format
1846      * metadata: the capture result object
1847    """
1848    cmd = {}
1849    cmd[_CMD_NAME_STR] = 'doCaptureWithFlash'
1850    cmd['previewRequestStart'] = [preview_request_start]
1851    cmd['previewRequestIdle'] = [preview_request_idle]
1852    cmd['stillCaptureRequest'] = [still_capture_req]
1853    cmd['outputSurfaces'] = [out_surface]
1854    if 'android.control.aeMode' in still_capture_req:
1855      logging.debug('Capturing image with aeMode: %d',
1856                    still_capture_req['android.control.aeMode'])
1857    return self.do_simple_capture(cmd, out_surface)
1858
1859  def do_capture_with_extensions(self,
1860                                 cap_request,
1861                                 extension,
1862                                 out_surface):
1863    """Issue extension capture request(s), and read back image(s) and metadata.
1864
1865    Args:
1866      cap_request: The Python dict/list specifying the capture(s), which will be
1867        converted to JSON and sent to the device.
1868      extension: The extension to be requested.
1869      out_surface: specifications of the output image format and
1870        size to use for the capture.
1871
1872    Returns:
1873      An object, list of objects, or list of lists of objects, where each
1874      object contains the following fields:
1875      * data: the image data as a numpy array of bytes.
1876      * width: the width of the captured image.
1877      * height: the height of the captured image.
1878      * format: image the format, in [
1879                        "yuv","jpeg","raw","raw10","raw12","rawStats","dng"].
1880      * metadata: the capture result object (Python dictionary).
1881    """
1882    cmd = {}
1883    cmd[_CMD_NAME_STR] = 'doCaptureWithExtensions'
1884    cmd['repeatRequests'] = []
1885    cmd['captureRequests'] = [cap_request]
1886    cmd['extension'] = extension
1887    cmd['outputSurfaces'] = [out_surface]
1888
1889    logging.debug('Capturing image with EXTENSIONS.')
1890    return self.do_simple_capture(cmd, out_surface)
1891
1892  def do_capture(self,
1893                 cap_request,
1894                 out_surfaces=None,
1895                 reprocess_format=None,
1896                 repeat_request=None,
1897                 reuse_session=False,
1898                 first_surface_for_3a=False):
1899    """Issue capture request(s), and read back the image(s) and metadata.
1900
1901    The main top-level function for capturing one or more images using the
1902    device. Captures a single image if cap_request is a single object, and
1903    captures a burst if it is a list of objects.
1904
1905    The optional repeat_request field can be used to assign a repeating
1906    request list ran in background for 3 seconds to warm up the capturing
1907    pipeline before start capturing. The repeat_requests will be ran on a
1908    640x480 YUV surface without sending any data back. The caller needs to
1909    make sure the stream configuration defined by out_surfaces and
1910    repeat_request are valid or do_capture may fail because device does not
1911    support such stream configuration.
1912
1913    The out_surfaces field can specify the width(s), height(s), and
1914    format(s) of the captured image. The formats may be "yuv", "jpeg",
1915    "dng", "raw", "raw10", "raw12", "rawStats" or "y8". The default is a
1916    YUV420 frame ("yuv") corresponding to a full sensor frame.
1917
1918    1. Optionally the out_surfaces field can specify physical camera id(s) if
1919    the current camera device is a logical multi-camera. The physical camera
1920    id must refer to a physical camera backing this logical camera device.
1921    2. Optionally The output_surfaces field can also specify the use case(s) if
1922    the current camera device has STREAM_USE_CASE capability.
1923
1924    Note that one or more surfaces can be specified, allowing a capture to
1925    request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
1926    yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the
1927    default is the largest resolution available for the format of that
1928    surface. At most one output surface can be specified for a given format,
1929    and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations.
1930
1931    If reprocess_format is not None, for each request, an intermediate
1932    buffer of the given reprocess_format will be captured from camera and
1933    the intermediate buffer will be reprocessed to the output surfaces. The
1934    following settings will be turned off when capturing the intermediate
1935    buffer and will be applied when reprocessing the intermediate buffer.
1936    1. android.noiseReduction.mode
1937    2. android.edge.mode
1938    3. android.reprocess.effectiveExposureFactor
1939
1940    Supported reprocess format are "yuv" and "private". Supported output
1941    surface formats when reprocessing is enabled are "yuv" and "jpeg".
1942
1943    Example of a single capture request:
1944
1945    {
1946     "android.sensor.exposureTime": 100*1000*1000,
1947     "android.sensor.sensitivity": 100
1948    }
1949
1950    Example of a list of capture requests:
1951    [
1952     {
1953       "android.sensor.exposureTime": 100*1000*1000,
1954       "android.sensor.sensitivity": 100
1955     },
1956    {
1957      "android.sensor.exposureTime": 100*1000*1000,
1958       "android.sensor.sensitivity": 200
1959     }
1960    ]
1961
1962    Example of output surface specifications:
1963    {
1964     "width": 640,
1965     "height": 480,
1966     "format": "yuv"
1967    }
1968    [
1969     {
1970       "format": "jpeg"
1971     },
1972     {
1973       "format": "raw"
1974     }
1975    ]
1976
1977    The following variables defined in this class are shortcuts for
1978    specifying one or more formats where each output is the full size for
1979    that format; they can be used as values for the out_surfaces arguments:
1980
1981    CAP_RAW
1982    CAP_DNG
1983    CAP_YUV
1984    CAP_JPEG
1985    CAP_RAW_YUV
1986    CAP_DNG_YUV
1987    CAP_RAW_JPEG
1988    CAP_DNG_JPEG
1989    CAP_YUV_JPEG
1990    CAP_RAW_YUV_JPEG
1991    CAP_DNG_YUV_JPEG
1992
1993    If multiple formats are specified, then this function returns multiple
1994    capture objects, one for each requested format. If multiple formats and
1995    multiple captures (i.e. a burst) are specified, then this function
1996    returns multiple lists of capture objects. In both cases, the order of
1997    the returned objects matches the order of the requested formats in the
1998    out_surfaces parameter. For example:
1999
2000    yuv_cap = do_capture(req1)
2001    yuv_cap = do_capture(req1,yuv_fmt)
2002    yuv_cap, raw_cap = do_capture(req1, [yuv_fmt,raw_fmt])
2003    yuv_caps = do_capture([req1,req2], yuv_fmt)
2004    yuv_caps, raw_caps = do_capture([req1,req2], [yuv_fmt,raw_fmt])
2005
2006    The "rawStats" format processes the raw image and returns a new image
2007    of statistics from the raw image. The format takes additional keys,
2008    "gridWidth" and "gridHeight" which are size of grid cells in a 2D grid
2009    of the raw image. For each grid cell, the mean and variance of each raw
2010    channel is computed, and the do_capture call returns two 4-element float
2011    images of dimensions (rawWidth / gridWidth, rawHeight / gridHeight),
2012    concatenated back-to-back, where the first image contains the 4-channel
2013    means and the second contains the 4-channel variances. Note that only
2014    pixels in the active array crop region are used; pixels outside this
2015    region (for example optical black rows) are cropped out before the
2016    gridding and statistics computation is performed.
2017
2018    For the rawStats format, if the gridWidth is not provided then the raw
2019    image width is used as the default, and similarly for gridHeight. With
2020    this, the following is an example of a output description that computes
2021    the mean and variance across each image row:
2022    {
2023      "gridHeight": 1,
2024      "format": "rawStats"
2025    }
2026
2027    Args:
2028      cap_request: The Python dict/list specifying the capture(s), which will be
2029        converted to JSON and sent to the device.
2030      out_surfaces: (Optional) specifications of the output image formats and
2031        sizes to use for each capture.
2032      reprocess_format: (Optional) The reprocessing format. If not
2033        None,reprocessing will be enabled.
2034      repeat_request: Repeating request list.
2035      reuse_session: True if ItsService.java should try to use
2036        the existing CameraCaptureSession.
2037      first_surface_for_3a: Use first surface in out_surfaces for 3A, not capture
2038        Only applicable if out_surfaces contains at least 1 surface.
2039
2040    Returns:
2041      An object, list of objects, or list of lists of objects, where each
2042      object contains the following fields:
2043      * data: the image data as a numpy array of bytes.
2044      * width: the width of the captured image.
2045      * height: the height of the captured image.
2046      * format: image the format, in [
2047                        "yuv","jpeg","raw","raw10","raw12","rawStats","dng"].
2048      * metadata: the capture result object (Python dictionary).
2049    """
2050    cmd = {}
2051    if reprocess_format is not None:
2052      if repeat_request is not None:
2053        raise error_util.CameraItsError(
2054            'repeating request + reprocessing is not supported')
2055      cmd[_CMD_NAME_STR] = 'doReprocessCapture'
2056      cmd['reprocessFormat'] = reprocess_format
2057    else:
2058      cmd[_CMD_NAME_STR] = 'doCapture'
2059
2060    if repeat_request is None:
2061      cmd['repeatRequests'] = []
2062    elif not isinstance(repeat_request, list):
2063      cmd['repeatRequests'] = [repeat_request]
2064    else:
2065      cmd['repeatRequests'] = repeat_request
2066
2067    if not isinstance(cap_request, list):
2068      cmd['captureRequests'] = [cap_request]
2069    else:
2070      cmd['captureRequests'] = cap_request
2071
2072    if out_surfaces:
2073      if isinstance(out_surfaces, list):
2074        cmd['outputSurfaces'] = out_surfaces
2075      else:
2076        cmd['outputSurfaces'] = [out_surfaces]
2077      formats = [
2078          c['format'] if 'format' in c else 'yuv' for c in cmd['outputSurfaces']
2079      ]
2080      formats = [s if s != 'jpg' else 'jpeg' for s in formats]
2081    else:
2082      max_yuv_size = capture_request_utils.get_available_output_sizes(
2083          'yuv', self.props)[0]
2084      formats = ['yuv']
2085      cmd['outputSurfaces'] = [{
2086          'format': 'yuv',
2087          'width': max_yuv_size[0],
2088          'height': max_yuv_size[1]
2089      }]
2090
2091    cmd['reuseSession'] = reuse_session
2092    cmd['firstSurfaceFor3A'] = first_surface_for_3a
2093
2094    requested_surfaces = cmd['outputSurfaces'][:]
2095    if first_surface_for_3a:
2096      formats.pop(0)
2097      requested_surfaces.pop(0)
2098
2099    ncap = len(cmd['captureRequests'])
2100    nsurf = len(formats)
2101
2102    cam_ids = []
2103    bufs = {}
2104    yuv_bufs = {}
2105    for i, s in enumerate(cmd['outputSurfaces']):
2106      if self._hidden_physical_id:
2107        s['physicalCamera'] = self._hidden_physical_id
2108
2109      if 'physicalCamera' in s:
2110        cam_id = s['physicalCamera']
2111      else:
2112        cam_id = self._camera_id
2113
2114      if cam_id not in cam_ids:
2115        cam_ids.append(cam_id)
2116        bufs[cam_id] = {
2117            'raw': [],
2118            'raw10': [],
2119            'raw12': [],
2120            'rawStats': [],
2121            'dng': [],
2122            'jpeg': [],
2123            'jpeg_r': [],
2124            'heic_ultrahdr': [],
2125            'y8': [],
2126            'rawQuadBayer': [],
2127            'rawQuadBayerStats': [],
2128            'raw10Stats': [],
2129            'raw10QuadBayerStats': [],
2130            'raw10QuadBayer': [],
2131        }
2132
2133    for cam_id in cam_ids:
2134       # Only allow yuv output to multiple targets
2135      if cam_id == self._camera_id:
2136        yuv_surfaces = [
2137            s for s in requested_surfaces
2138            if s['format'] == 'yuv' and 'physicalCamera' not in s
2139        ]
2140        formats_for_id = [
2141            s['format']
2142            for s in requested_surfaces
2143            if 'physicalCamera' not in s
2144        ]
2145      else:
2146        yuv_surfaces = [
2147            s for s in requested_surfaces if s['format'] == 'yuv' and
2148            'physicalCamera' in s and s['physicalCamera'] == cam_id
2149        ]
2150        formats_for_id = [
2151            s['format']
2152            for s in requested_surfaces
2153            if 'physicalCamera' in s and s['physicalCamera'] == cam_id
2154        ]
2155
2156      n_yuv = len(yuv_surfaces)
2157      # Compute the buffer size of YUV targets
2158      yuv_maxsize_1d = 0
2159      for s in yuv_surfaces:
2160        if ('width' not in s and 'height' not in s):
2161          if self.props is None:
2162            raise error_util.CameraItsError('Camera props are unavailable')
2163          yuv_maxsize_2d = capture_request_utils.get_available_output_sizes(
2164              'yuv', self.props)[0]
2165          # YUV420 size = 1.5 bytes per pixel
2166          yuv_maxsize_1d = (yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3) // 2
2167          break
2168      yuv_sizes = [
2169          (c['width'] * c['height'] * 3) // 2
2170          if 'width' in c and 'height' in c else yuv_maxsize_1d
2171          for c in yuv_surfaces
2172      ]
2173      # Currently we don't pass enough metadata from ItsService to distinguish
2174      # different yuv stream of same buffer size
2175      if len(yuv_sizes) != len(set(yuv_sizes)):
2176        raise error_util.CameraItsError(
2177            'ITS does not support yuv outputs of same buffer size')
2178      if len(formats_for_id) > len(set(formats_for_id)):
2179        if n_yuv != len(formats_for_id) - len(set(formats_for_id)) + 1:
2180          raise error_util.CameraItsError('Duplicate format requested')
2181
2182      yuv_bufs[cam_id] = {size: [] for size in yuv_sizes}
2183
2184    logging.debug('yuv bufs: %s', yuv_bufs)
2185    raw_formats = 0
2186    raw_formats += 1 if 'dng' in formats else 0
2187    raw_formats += 1 if 'raw' in formats else 0
2188    raw_formats += 1 if 'raw10' in formats else 0
2189    raw_formats += 1 if 'raw12' in formats else 0
2190    raw_formats += 1 if 'rawStats' in formats else 0
2191    raw_formats += 1 if 'rawQuadBayer' in formats else 0
2192    raw_formats += 1 if 'rawQuadBayerStats' in formats else 0
2193    raw_formats += 1 if 'raw10Stats' in formats else 0
2194    raw_formats += 1 if 'raw10QuadBayer' in formats else 0
2195    raw_formats += 1 if 'raw10QuadBayerStats' in formats else 0
2196
2197    if raw_formats > 1:
2198      raise error_util.CameraItsError('Different raw formats not supported')
2199
2200    # Detect long exposure time and set timeout accordingly
2201    longest_exp_time = 0
2202    for req in cmd['captureRequests']:
2203      if 'android.sensor.exposureTime' in req and req[
2204          'android.sensor.exposureTime'] > longest_exp_time:
2205        longest_exp_time = req['android.sensor.exposureTime']
2206
2207    extended_timeout = longest_exp_time // self.SEC_TO_NSEC + self.SOCK_TIMEOUT
2208    if repeat_request:
2209      extended_timeout += self.EXTRA_SOCK_TIMEOUT
2210    self.sock.settimeout(extended_timeout)
2211
2212    logging.debug('Capturing %d frame%s with %d format%s [%s]', ncap,
2213                  's' if ncap > 1 else '', nsurf, 's' if nsurf > 1 else '',
2214                  ','.join(formats))
2215    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2216
2217    # Wait for ncap*nsurf images and ncap metadata responses.
2218    # Assume that captures come out in the same order as requested in
2219    # the burst, however individual images of different formats can come
2220    # out in any order for that capture.
2221    nbufs = 0
2222    mds = []
2223    physical_mds = []
2224    widths = None
2225    heights = None
2226    camera_id = (
2227        self._camera_id
2228        if not self._hidden_physical_id
2229        else self._hidden_physical_id
2230    )
2231    logging.debug('Using camera_id %s to store buffers', camera_id)
2232    while nbufs < ncap * nsurf or len(mds) < ncap:
2233      json_obj, buf = self.__read_response_from_socket()
2234      if (json_obj[_TAG_STR] in ItsSession.IMAGE_FORMAT_LIST_1 and
2235          buf is not None):
2236        fmt = json_obj[_TAG_STR][:-5]
2237        bufs[camera_id][fmt].append(buf)
2238        nbufs += 1
2239      # Physical camera is appended to the tag string of a private capture
2240      elif json_obj[_TAG_STR].startswith('privImage'):
2241        # The private image format buffers are opaque to camera clients
2242        # and cannot be accessed.
2243        nbufs += 1
2244      elif json_obj[_TAG_STR] == 'yuvImage':
2245        buf_size = get_array_size(buf)
2246        yuv_bufs[camera_id][buf_size].append(buf)
2247        nbufs += 1
2248      elif json_obj[_TAG_STR] == 'captureResults':
2249        mds.append(json_obj[_OBJ_VALUE_STR]['captureResult'])
2250        physical_mds.append(json_obj[_OBJ_VALUE_STR]['physicalResults'])
2251        outputs = json_obj[_OBJ_VALUE_STR]['outputs']
2252        widths = [out['width'] for out in outputs]
2253        heights = [out['height'] for out in outputs]
2254      else:
2255        tag_string = unicodedata.normalize('NFKD', json_obj[_TAG_STR]).encode(
2256            'ascii', 'ignore')
2257        for x in ItsSession.IMAGE_FORMAT_LIST_2:
2258          x = bytes(x, encoding='utf-8')
2259          if tag_string.startswith(x):
2260            if x == b'yuvImage':
2261              physical_id = json_obj[_TAG_STR][len(x):]
2262              if physical_id in cam_ids:
2263                buf_size = get_array_size(buf)
2264                yuv_bufs[physical_id][buf_size].append(buf)
2265                nbufs += 1
2266            else:
2267              physical_id = json_obj[_TAG_STR][len(x):]
2268              if physical_id in cam_ids:
2269                fmt = x[:-5].decode('UTF-8')
2270                bufs[physical_id][fmt].append(buf)
2271                nbufs += 1
2272    rets = []
2273    for j, fmt in enumerate(formats):
2274      objs = []
2275      if 'physicalCamera' in requested_surfaces[j]:
2276        cam_id = requested_surfaces[j]['physicalCamera']
2277      else:
2278        cam_id = self._camera_id
2279
2280      for i in range(ncap):
2281        obj = {}
2282        obj['width'] = widths[j]
2283        obj['height'] = heights[j]
2284        obj['format'] = fmt
2285        if cam_id == self._camera_id:
2286          obj['metadata'] = mds[i]
2287        else:
2288          for physical_md in physical_mds[i]:
2289            if cam_id in physical_md:
2290              obj['metadata'] = physical_md[cam_id]
2291              break
2292
2293        if fmt == 'yuv':
2294          buf_size = (widths[j] * heights[j] * 3) // 2
2295          obj['data'] = yuv_bufs[cam_id][buf_size][i]
2296        elif fmt != 'priv':
2297          obj['data'] = bufs[cam_id][fmt][i]
2298        objs.append(obj)
2299      rets.append(objs if ncap > 1 else objs[0])
2300    self.sock.settimeout(self.SOCK_TIMEOUT)
2301    if len(rets) > 1 or (isinstance(rets[0], dict) and
2302                         isinstance(cap_request, list)):
2303      return rets
2304    else:
2305      return rets[0]
2306
2307  def do_vibrate(self, pattern):
2308    """Cause the device to vibrate to a specific pattern.
2309
2310    Args:
2311      pattern: Durations (ms) for which to turn on or off the vibrator.
2312      The first value indicates the number of milliseconds to wait
2313      before turning the vibrator on. The next value indicates the
2314      number of milliseconds for which to keep the vibrator on
2315      before turning it off. Subsequent values alternate between
2316      durations in milliseconds to turn the vibrator off or to turn
2317      the vibrator on.
2318
2319    Returns:
2320      Nothing.
2321    """
2322    cmd = {}
2323    cmd[_CMD_NAME_STR] = 'doVibrate'
2324    cmd['pattern'] = pattern
2325    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2326    data, _ = self.__read_response_from_socket()
2327    if data[_TAG_STR] != 'vibrationStarted':
2328      raise error_util.CameraItsError('Invalid response for command: %s' %
2329                                      cmd[_CMD_NAME_STR])
2330
2331  def set_audio_restriction(self, mode):
2332    """Set the audio restriction mode for this camera device.
2333
2334    Args:
2335     mode: int; the audio restriction mode. See CameraDevice.java for valid
2336     value.
2337    Returns:
2338     Nothing.
2339    """
2340    cmd = {}
2341    cmd[_CMD_NAME_STR] = 'setAudioRestriction'
2342    cmd['mode'] = mode
2343    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2344    data, _ = self.__read_response_from_socket()
2345    if data[_TAG_STR] != 'audioRestrictionSet':
2346      raise error_util.CameraItsError('Invalid response for command: %s' %
2347                                      cmd[_CMD_NAME_STR])
2348
2349  # pylint: disable=dangerous-default-value
2350  def do_3a(self,
2351            regions_ae=[[0, 0, 1, 1, 1]],
2352            regions_awb=[[0, 0, 1, 1, 1]],
2353            regions_af=[[0, 0, 1, 1, 1]],
2354            do_awb=True,
2355            do_af=True,
2356            lock_ae=False,
2357            lock_awb=False,
2358            get_results=False,
2359            ev_comp=0,
2360            auto_flash=False,
2361            mono_camera=False,
2362            zoom_ratio=None,
2363            out_surfaces=None,
2364            repeat_request=None,
2365            first_surface_for_3a=False,
2366            flash_mode=_FLASH_MODE_OFF):
2367    """Perform a 3A operation on the device.
2368
2369    Triggers some or all of AE, AWB, and AF, and returns once they have
2370    converged. Uses the vendor 3A that is implemented inside the HAL.
2371    Note: do_awb is always enabled regardless of do_awb flag
2372
2373    Throws an assertion if 3A fails to converge.
2374
2375    Args:
2376      regions_ae: List of weighted AE regions.
2377      regions_awb: List of weighted AWB regions.
2378      regions_af: List of weighted AF regions.
2379      do_awb: Wait for AWB to converge.
2380      do_af: Trigger AF and wait for it to converge.
2381      lock_ae: Request AE lock after convergence, and wait for it.
2382      lock_awb: Request AWB lock after convergence, and wait for it.
2383      get_results: Return the 3A results from this function.
2384      ev_comp: An EV compensation value to use when running AE.
2385      auto_flash: AE control boolean to enable auto flash.
2386      mono_camera: Boolean for monochrome camera.
2387      zoom_ratio: Zoom ratio. None if default zoom
2388      out_surfaces: dict; see do_capture() for specifications on out_surfaces.
2389        CameraCaptureSession will only be reused if out_surfaces is specified.
2390      repeat_request: repeating request list.
2391        See do_capture() for specifications on repeat_request.
2392      first_surface_for_3a: Use first surface in output_surfaces for 3A.
2393        Only applicable if out_surfaces contains at least 1 surface.
2394      flash_mode: FLASH_MODE to be used during 3A
2395        0: OFF
2396        1: SINGLE
2397        2: TORCH
2398
2399      Region format in args:
2400         Arguments are lists of weighted regions; each weighted region is a
2401         list of 5 values, [x, y, w, h, wgt], and each argument is a list of
2402         these 5-value lists. The coordinates are given as normalized
2403         rectangles (x, y, w, h) specifying the region. For example:
2404         [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]].
2405         Weights are non-negative integers.
2406
2407    Returns:
2408      Five values are returned if get_results is true:
2409      * AE sensitivity;
2410      * AE exposure time;
2411      * AWB gains (list);
2412      * AWB transform (list);
2413      * AF focus position; None if do_af is false
2414      Otherwise, it returns five None values.
2415    """
2416    logging.debug('Running vendor 3A on device')
2417    cmd = {}
2418    cmd[_CMD_NAME_STR] = 'do3A'
2419    reuse_session = False
2420    if out_surfaces:
2421      reuse_session = True
2422      if isinstance(out_surfaces, list):
2423        cmd['outputSurfaces'] = out_surfaces
2424      else:
2425        cmd['outputSurfaces'] = [out_surfaces]
2426    if repeat_request is None:
2427      cmd['repeatRequests'] = []
2428    elif not isinstance(repeat_request, list):
2429      cmd['repeatRequests'] = [repeat_request]
2430    else:
2431      cmd['repeatRequests'] = repeat_request
2432
2433    cmd['regions'] = {
2434        'ae': sum(regions_ae, []),
2435        'awb': sum(regions_awb, []),
2436        'af': sum(regions_af, [])
2437    }
2438    do_ae = True  # Always run AE
2439    cmd['triggers'] = {'ae': do_ae, 'af': do_af}
2440    if lock_ae:
2441      cmd['aeLock'] = True
2442    if lock_awb:
2443      cmd['awbLock'] = True
2444    if ev_comp != 0:
2445      cmd['evComp'] = ev_comp
2446    if flash_mode != 0:
2447      cmd['flashMode'] = flash_mode
2448    if auto_flash:
2449      cmd['autoFlash'] = True
2450    if self._hidden_physical_id:
2451      cmd['physicalId'] = self._hidden_physical_id
2452    if zoom_ratio:
2453      if self.zoom_ratio_within_range(zoom_ratio):
2454        cmd['zoomRatio'] = zoom_ratio
2455      else:
2456        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
2457    cmd['reuseSession'] = reuse_session
2458    cmd['firstSurfaceFor3A'] = first_surface_for_3a
2459    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2460
2461    # Wait for each specified 3A to converge.
2462    ae_sens = None
2463    ae_exp = None
2464    awb_gains = None
2465    awb_transform = None
2466    af_dist = None
2467    converged = False
2468    while True:
2469      data, _ = self.__read_response_from_socket()
2470      vals = data[_STR_VALUE_STR].split()
2471      if data[_TAG_STR] == 'aeResult':
2472        if do_ae:
2473          ae_sens, ae_exp = [int(i) for i in vals]
2474      elif data[_TAG_STR] == 'afResult':
2475        if do_af:
2476          af_dist = float(vals[0])
2477      elif data[_TAG_STR] == 'awbResult':
2478        awb_gains = [float(f) for f in vals[:4]]
2479        awb_transform = [float(f) for f in vals[4:]]
2480      elif data[_TAG_STR] == '3aConverged':
2481        converged = True
2482      elif data[_TAG_STR] == '3aDone':
2483        break
2484      else:
2485        raise error_util.CameraItsError('Invalid command response')
2486    if converged and not get_results:
2487      return None, None, None, None, None
2488    if (do_ae and ae_sens is None or
2489        (not mono_camera and do_awb and awb_gains is None) or
2490        do_af and af_dist is None or not converged):
2491      raise error_util.CameraItsError('3A failed to converge')
2492    return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
2493
2494  def calc_camera_fov(self, props):
2495    """Determine the camera field of view from internal params.
2496
2497    Args:
2498      props: Camera properties object.
2499
2500    Returns:
2501      camera_fov: string; field of view for camera.
2502    """
2503
2504    focal_ls = props['android.lens.info.availableFocalLengths']
2505    if len(focal_ls) > 1:
2506      logging.debug('Doing capture to determine logical camera focal length')
2507      cap = self.do_capture(capture_request_utils.auto_capture_request())
2508      focal_l = cap['metadata']['android.lens.focalLength']
2509    else:
2510      focal_l = focal_ls[0]
2511
2512    sensor_size = props['android.sensor.info.physicalSize']
2513    diag = math.sqrt(sensor_size['height']**2 + sensor_size['width']**2)
2514    try:
2515      fov = str(round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2))
2516    except ValueError:
2517      fov = str(0)
2518    logging.debug('Calculated FoV: %s', fov)
2519    return fov
2520
2521  def get_file_name_to_load(self, chart_distance, camera_fov, scene):
2522    """Get the image to load on the tablet depending on fov and chart_distance.
2523
2524    Args:
2525     chart_distance: float; distance in cm from camera of displayed chart
2526     camera_fov: float; camera field of view.
2527     scene: String; Scene to be used in the test.
2528
2529    Returns:
2530     file_name: file name to display on the tablet.
2531
2532    """
2533    chart_scaling = opencv_processing_utils.calc_chart_scaling(
2534        chart_distance, camera_fov)
2535    if chart_scaling:
2536      file_name = f'{scene}_{chart_scaling}x_scaled.png'
2537    else:
2538      file_name = f'{scene}.png'
2539    logging.debug('Scene to load: %s', file_name)
2540    return file_name
2541
2542  def is_stream_combination_supported(self, out_surfaces, settings=None):
2543    """Query whether out_surfaces combination and settings are supported by the camera device.
2544
2545    This function hooks up to the isSessionConfigurationSupported()/
2546    isSessionConfigurationWithSettingsSupported() camera API
2547    to query whether a particular stream combination and settings are supported.
2548
2549    Args:
2550      out_surfaces: dict; see do_capture() for specifications on out_surfaces.
2551      settings: dict; optional capture request settings metadata.
2552
2553    Returns:
2554      Boolean
2555    """
2556    cmd = {}
2557    cmd[_CMD_NAME_STR] = 'isStreamCombinationSupported'
2558    cmd[_CAMERA_ID_STR] = self._camera_id
2559
2560    if isinstance(out_surfaces, list):
2561      cmd['outputSurfaces'] = out_surfaces
2562      for out_surface in out_surfaces:
2563        if self._hidden_physical_id:
2564          out_surface['physicalCamera'] = self._hidden_physical_id
2565    else:
2566      cmd['outputSurfaces'] = [out_surfaces]
2567      if self._hidden_physical_id:
2568        out_surfaces['physicalCamera'] = self._hidden_physical_id
2569
2570    if settings:
2571      cmd['settings'] = settings
2572
2573    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2574
2575    data, _ = self.__read_response_from_socket()
2576    if data[_TAG_STR] != 'streamCombinationSupport':
2577      raise error_util.CameraItsError('Failed to query stream combination')
2578
2579    return data[_STR_VALUE_STR] == 'supportedCombination'
2580
2581  def is_camera_privacy_mode_supported(self):
2582    """Query whether the mobile device supports camera privacy mode.
2583
2584    This function checks whether the mobile device has FEATURE_CAMERA_TOGGLE
2585    feature support, which indicates the camera device can run in privacy mode.
2586
2587    Returns:
2588      Boolean
2589    """
2590    cmd = {}
2591    cmd[_CMD_NAME_STR] = 'isCameraPrivacyModeSupported'
2592    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2593
2594    data, _ = self.__read_response_from_socket()
2595    if data[_TAG_STR] != 'cameraPrivacyModeSupport':
2596      raise error_util.CameraItsError('Failed to query camera privacy mode'
2597                                      ' support')
2598    return data[_STR_VALUE_STR] == 'true'
2599
2600  def is_primary_camera(self):
2601    """Query whether the camera device is a primary rear/front camera.
2602
2603    A primary rear/front facing camera is a camera device with the lowest
2604    camera Id for that facing.
2605
2606    Returns:
2607      Boolean
2608    """
2609    cmd = {}
2610    cmd[_CMD_NAME_STR] = 'isPrimaryCamera'
2611    cmd[_CAMERA_ID_STR] = self._camera_id
2612    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2613
2614    data, _ = self.__read_response_from_socket()
2615    if data[_TAG_STR] != 'primaryCamera':
2616      raise error_util.CameraItsError('Failed to query primary camera')
2617    return data[_STR_VALUE_STR] == 'true'
2618
2619  def is_performance_class(self):
2620    """Query whether the mobile device is an R or S performance class device.
2621
2622    Returns:
2623      Boolean
2624    """
2625    cmd = {}
2626    cmd[_CMD_NAME_STR] = 'isPerformanceClass'
2627    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2628
2629    data, _ = self.__read_response_from_socket()
2630    if data[_TAG_STR] != 'performanceClass':
2631      raise error_util.CameraItsError('Failed to query performance class')
2632    return data[_STR_VALUE_STR] == 'true'
2633
2634  def is_vic_performance_class(self):
2635    """Return whether the mobile device is VIC performance class device.
2636    """
2637    cmd = {}
2638    cmd[_CMD_NAME_STR] = 'isVicPerformanceClass'
2639    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2640
2641    data, _ = self.__read_response_from_socket()
2642    if data[_TAG_STR] != 'vicPerformanceClass':
2643      raise error_util.CameraItsError('Failed to query performance class')
2644    return data[_STR_VALUE_STR] == 'true'
2645
2646  def measure_camera_launch_ms(self):
2647    """Measure camera launch latency in millisecond, from open to first frame.
2648
2649    Returns:
2650      Camera launch latency from camera open to receipt of first frame
2651    """
2652    cmd = {}
2653    cmd[_CMD_NAME_STR] = 'measureCameraLaunchMs'
2654    cmd[_CAMERA_ID_STR] = self._camera_id
2655    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2656
2657    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
2658    self.sock.settimeout(timeout)
2659    data, _ = self.__read_response_from_socket()
2660    self.sock.settimeout(self.SOCK_TIMEOUT)
2661
2662    if data[_TAG_STR] != 'cameraLaunchMs':
2663      raise error_util.CameraItsError('Failed to measure camera launch latency')
2664    return float(data[_STR_VALUE_STR])
2665
2666  def measure_camera_1080p_jpeg_capture_ms(self):
2667    """Measure camera 1080P jpeg capture latency in milliseconds.
2668
2669    Returns:
2670      Camera jpeg capture latency in milliseconds
2671    """
2672    cmd = {}
2673    cmd[_CMD_NAME_STR] = 'measureCamera1080pJpegCaptureMs'
2674    cmd[_CAMERA_ID_STR] = self._camera_id
2675    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2676
2677    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
2678    self.sock.settimeout(timeout)
2679    data, _ = self.__read_response_from_socket()
2680    self.sock.settimeout(self.SOCK_TIMEOUT)
2681
2682    if data[_TAG_STR] != 'camera1080pJpegCaptureMs':
2683      raise error_util.CameraItsError(
2684          'Failed to measure camera 1080p jpeg capture latency')
2685    return float(data[_STR_VALUE_STR])
2686
2687  def _camera_id_to_props(self):
2688    """Return the properties of each camera ID."""
2689    unparsed_ids = self.get_camera_ids().get('cameraIdArray', [])
2690    parsed_ids = parse_camera_ids(unparsed_ids)
2691    id_to_props = {}
2692    for unparsed_id, id_combo in zip(unparsed_ids, parsed_ids):
2693      if id_combo.sub_id is None:
2694        props = self.get_camera_properties_by_id(id_combo.id)
2695      else:
2696        props = self.get_camera_properties_by_id(id_combo.sub_id)
2697      id_to_props[unparsed_id] = props
2698    if not id_to_props:
2699      raise AssertionError('No camera IDs were found.')
2700    return id_to_props
2701
2702  def get_primary_camera_id(self, facing):
2703    """Return the primary camera ID facing the given direction."""
2704    camera_ids = self.get_camera_ids()
2705    primary_rear_camera_id = camera_ids.get('primaryRearCameraId', '')
2706    primary_front_camera_id = camera_ids.get('primaryFrontCameraId', '')
2707    if facing == camera_properties_utils.LENS_FACING['BACK']:
2708      return primary_rear_camera_id
2709    elif facing == camera_properties_utils.LENS_FACING['FRONT']:
2710      return primary_front_camera_id
2711    else:
2712      raise NotImplementedError('Cameras not facing either front or back '
2713                                'are currently unsupported.')
2714
2715  def _get_id_to_fov_facing(self):
2716    """Return the FoV and facing of each camera ID.
2717
2718    Returns:
2719      A dictionary mapping camera IDs to a namedtuple containing the camera's
2720      field of view and facing.
2721    """
2722    id_to_props = self._camera_id_to_props()
2723    fov_and_facing = collections.namedtuple('FovAndFacing', ['fov', 'facing'])
2724    id_to_fov_facing = {
2725        unparsed_id: fov_and_facing(
2726            self.calc_camera_fov(props), props['android.lens.facing']
2727        )
2728        for unparsed_id, props in id_to_props.items()
2729    }
2730    logging.debug('IDs to (FOVs, facing): %s', id_to_fov_facing)
2731    return id_to_fov_facing
2732
2733  def _has_physical_camera_with_different_fov(
2734      self, facing, is_fov_beyond_threshold, camera_type='physical'):
2735    """Return if device has a physical camera with different FoV than primary.
2736
2737    Args:
2738      facing: constant describing the direction the camera device lens faces.
2739      is_fov_beyond_threshold: Callable that compares the FoV of a physical
2740        camera against an FoV threshold.
2741      camera_type: Optional[string]; description of the FoV of the camera.
2742    Returns:
2743      True if the device has a physical camera with different FoV than primary.
2744    """
2745    primary_camera_id = self.get_primary_camera_id(facing)
2746    id_to_fov_facing = self._get_id_to_fov_facing()
2747    primary_camera_fov, primary_camera_facing = id_to_fov_facing[
2748        primary_camera_id]
2749    for unparsed_id, fov_facing_combo in id_to_fov_facing.items():
2750      if (is_fov_beyond_threshold(float(fov_facing_combo.fov)) and
2751          fov_facing_combo.facing == primary_camera_facing and
2752          unparsed_id != primary_camera_id):
2753        logging.debug('Found %s camera with ID %s and FoV %.3f. '
2754                      'Primary camera has ID %s and FoV: %.3f.',
2755                      camera_type,
2756                      unparsed_id, float(fov_facing_combo.fov),
2757                      primary_camera_id, float(primary_camera_fov))
2758        return True
2759    return False
2760
2761  def has_ultrawide_camera(self, facing):
2762    """Return if device has an ultrawide camera facing the same direction.
2763
2764    Args:
2765      facing: constant describing the direction the camera device lens faces.
2766    Returns:
2767      True if the device has an ultrawide camera facing in that direction.
2768    """
2769    return self._has_physical_camera_with_different_fov(
2770        facing,
2771        lambda fov: fov >= opencv_processing_utils.FOV_THRESH_UW,
2772        camera_type=CAMERA_TYPE_ULTRAWIDE)
2773
2774  def has_tele_camera(self, facing):
2775    """Return if device has a telephoto camera facing the same direction.
2776
2777    Args:
2778      facing: constant describing the direction the camera device lens faces.
2779    Returns:
2780      True if the device has a telephoto camera facing in that direction.
2781    """
2782    return self._has_physical_camera_with_different_fov(
2783        facing,
2784        lambda fov: fov <= opencv_processing_utils.FOV_THRESH_TELE,
2785        camera_type=CAMERA_TYPE_TELE)
2786
2787  def get_camera_type(self, props):
2788    """Return if a camera is a tele, wide or ultrawide camera.
2789
2790    Args:
2791      props: Camera properties object.
2792    Returns:
2793      camera_type: str; telephoto, wide or ultrawide.
2794    """
2795    camera_type = CAMERA_TYPE_ULTRAWIDE
2796    fov = float(self.calc_camera_fov(props))
2797    logging.debug('Camera FoV: %s', fov)
2798    if fov < opencv_processing_utils.FOV_THRESH_TELE:
2799      camera_type = CAMERA_TYPE_TELE
2800    elif fov < opencv_processing_utils.FOV_THRESH_UW:
2801      camera_type = CAMERA_TYPE_WIDE
2802    logging.debug('Camera type: %s', camera_type)
2803    return camera_type
2804
2805  def get_facing_to_ids(self):
2806    """Returns mapping from lens facing to list of corresponding camera IDs."""
2807    id_to_props = self._camera_id_to_props()
2808    facing_to_ids = collections.defaultdict(list)
2809    for unparsed_id, props in id_to_props.items():
2810      facing_to_ids[props['android.lens.facing']].append(unparsed_id)
2811    for ids in facing_to_ids.values():
2812      ids.sort()
2813    logging.debug('Facing to camera IDs: %s', facing_to_ids)
2814    return facing_to_ids
2815
2816  def is_low_light_boost_available(self, camera_id, extension=-1):
2817    """Checks if low light boost is available for camera id and extension.
2818
2819    If the extension is not provided (or -1) then low light boost support is
2820    checked for a camera2 session.
2821
2822    Args:
2823      camera_id: int; device ID
2824      extension: int; extension type
2825    Returns:
2826      True if low light boost is available and false otherwise.
2827    """
2828    cmd = {
2829        'cmdName': 'isLowLightBoostAvailable',
2830        'cameraId': camera_id,
2831        'extension': extension
2832    }
2833    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2834    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
2835    self.sock.settimeout(timeout)
2836    data, _ = self.__read_response_from_socket()
2837    if data['tag'] != 'isLowLightBoostAvailable':
2838      raise error_util.CameraItsError('Invalid command response')
2839    return data[_STR_VALUE_STR] == 'true'
2840
2841  def is_night_mode_indicator_supported(self, camera_id):
2842    """Checks if night mode indicator is supported for camera id.
2843
2844    If night mode camera extension is supported by the device and the device
2845    supports the night mode indicator for both Camera2 and CameraExtension, then
2846    this function returns True.
2847
2848    Args:
2849      camera_id: int; device ID
2850    Returns:
2851      True if night mode indicator is supported and false otherwise.
2852    """
2853    cmd = {
2854        'cmdName': 'isNightModeIndicatorSupported',
2855        'cameraId': camera_id,
2856    }
2857    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2858    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
2859    self.sock.settimeout(timeout)
2860    data, _ = self.__read_response_from_socket()
2861    if data['tag'] != 'isNightModeIndicatorSupported':
2862      raise error_util.CameraItsError('Invalid command response')
2863    return data[_STR_VALUE_STR] == 'true'
2864
2865  def do_capture_preview_frame(self,
2866                               camera_id,
2867                               preview_size,
2868                               frame_num=0,
2869                               extension=-1,
2870                               cap_request={}):
2871    """Captures the nth preview frame from the preview stream.
2872
2873    By default the 0th frame is the first frame. The extension type can also be
2874    provided or -1 to use Camera2 which is the default.
2875
2876    Args:
2877      camera_id: int; device ID
2878      preview_size: int; preview size
2879      frame_num: int; frame number to capture
2880      extension: int; extension type
2881      cap_request: dict; python dict specifying the key/value pair of capture
2882        request keys, which will be converted to JSON and sent to the device.
2883    Returns:
2884      Tuple of capture result camera metadata and single JPEG frame capture as
2885      numpy array of bytes
2886    """
2887    cmd = {
2888        'cmdName': 'doCapturePreviewFrame',
2889        'cameraId': camera_id,
2890        'previewSize': preview_size,
2891        'frameNum': frame_num,
2892        'extension': extension,
2893        'captureRequest': cap_request,
2894    }
2895    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2896    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
2897    self.sock.settimeout(timeout)
2898    capture_result_metadata = None
2899    jpeg_image = None
2900    data, _ = self.__read_response_from_socket()
2901    if data[_TAG_STR] == 'captureResults':
2902      capture_result_metadata = data[_OBJ_VALUE_STR]['captureResult']
2903
2904    data, buf = self.__read_response_from_socket()
2905    if data[_TAG_STR] == 'jpegImage':
2906      jpeg_image = buf
2907    return capture_result_metadata, jpeg_image
2908
2909  def preview_surface(self, size, hlg10_enabled=False):
2910    """Create a surface dictionary based on size and hdr-ness.
2911
2912    Args:
2913      size: str, Resolution of an output surface. ex. "1920x1080"
2914      hlg10_enabled: boolean; Whether the output is hlg10 or not.
2915
2916    Returns:
2917      a dictionary object containing format, size, and hdr-ness.
2918    """
2919    surface = {
2920        'format': 'priv',
2921        'width': int(size.split('x')[0]),
2922        'height': int(size.split('x')[1]),
2923        'hlg10': hlg10_enabled
2924    }
2925    if self._hidden_physical_id:
2926      surface['physicalCamera'] = self._hidden_physical_id
2927    return [surface]
2928
2929
2930def parse_camera_ids(ids):
2931  """Parse the string of camera IDs into array of CameraIdCombo tuples.
2932
2933  Args:
2934   ids: List of camera ids.
2935
2936  Returns:
2937   Array of CameraIdCombo
2938  """
2939  camera_id_combo = collections.namedtuple('CameraIdCombo', ['id', 'sub_id'])
2940  id_combos = []
2941  for one_id in ids:
2942    one_combo = one_id.split(SUB_CAMERA_SEPARATOR)
2943    if len(one_combo) == 1:
2944      id_combos.append(camera_id_combo(one_combo[0], None))
2945    elif len(one_combo) == 2:
2946      id_combos.append(camera_id_combo(one_combo[0], one_combo[1]))
2947    else:
2948      raise AssertionError('Camera id parameters must be either ID or '
2949                           f'ID{SUB_CAMERA_SEPARATOR}SUB_ID')
2950  return id_combos
2951
2952
2953def do_capture_with_latency(cam, req, sync_latency, fmt=None):
2954  """Helper function to take enough frames to allow sync latency.
2955
2956  Args:
2957    cam: camera object
2958    req: request for camera
2959    sync_latency: integer number of frames
2960    fmt: format for the capture
2961  Returns:
2962    single capture with the unsettled frames discarded
2963  """
2964  caps = cam.do_capture([req]*(sync_latency+1), fmt)
2965  return caps[-1]
2966
2967
2968def load_scene(cam, props, scene, tablet, chart_distance, lighting_check=True,
2969               log_path=None):
2970  """Load the scene for the camera based on the FOV.
2971
2972  Args:
2973    cam: camera object
2974    props: camera properties
2975    scene: scene to be loaded
2976    tablet: tablet to load scene on
2977    chart_distance: distance to tablet
2978    lighting_check: Boolean for lighting check enabled
2979    log_path: [Optional] path to store artifacts
2980  """
2981  if not tablet:
2982    logging.info('Manual run: no tablet to load scene on.')
2983    return
2984  # Calculate camera_fov, which determines the image/video to load on tablet.
2985  camera_fov = cam.calc_camera_fov(props)
2986  file_name = cam.get_file_name_to_load(chart_distance, camera_fov, scene)
2987  if 'scene' not in file_name:
2988    file_name = f'scene{file_name}'
2989  if scene in VIDEO_SCENES:
2990    root_file_name, _ = os.path.splitext(file_name)
2991    file_name = root_file_name + '.mp4'
2992  logging.debug('Displaying %s on the tablet', file_name)
2993
2994  # Display the image/video on the tablet using the default media player.
2995  view_file_type = 'image/png' if scene not in VIDEO_SCENES else 'video/mp4'
2996  uri_prefix = 'file://mnt' if scene not in VIDEO_SCENES else ''
2997  tablet.adb.shell(
2998      f'am start -a android.intent.action.VIEW -t {view_file_type} '
2999      f'-d {uri_prefix}/sdcard/Download/{file_name}')
3000  time.sleep(LOAD_SCENE_DELAY_SEC)
3001  # Tap tablet to remove gallery buttons
3002  tablet.adb.shell(
3003      f'input tap {TAP_COORDINATES[0]} {TAP_COORDINATES[1]}')
3004  rfov_camera_in_rfov_box = (
3005      math.isclose(
3006          chart_distance,
3007          opencv_processing_utils.CHART_DISTANCE_31CM, rel_tol=0.1) and
3008      opencv_processing_utils.FOV_THRESH_TELE <= float(camera_fov)
3009      <= opencv_processing_utils.FOV_THRESH_UW)
3010  wfov_camera_in_wfov_box = (
3011      math.isclose(
3012          chart_distance,
3013          opencv_processing_utils.CHART_DISTANCE_22CM, rel_tol=0.1) and
3014      float(camera_fov) > opencv_processing_utils.FOV_THRESH_UW)
3015  if (rfov_camera_in_rfov_box or wfov_camera_in_wfov_box) and lighting_check:
3016    cam.do_3a()
3017    cap = cam.do_capture(
3018        capture_request_utils.auto_capture_request(), cam.CAP_YUV)
3019    y_plane, _, _ = image_processing_utils.convert_capture_to_planes(cap)
3020    validate_lighting(y_plane, scene, log_path=log_path, fov=float(camera_fov))
3021
3022
3023def copy_scenes_to_tablet(scene, tablet_id):
3024  """Copies scenes onto the tablet before running the tests.
3025
3026  Args:
3027    scene: Name of the scene to copy image files.
3028    tablet_id: device id of tablet
3029  """
3030  logging.info('Copying files to tablet: %s', tablet_id)
3031  scene_path = os.path.join(os.environ['CAMERA_ITS_TOP'], 'tests', scene)
3032  scene_dir = os.listdir(scene_path)
3033  for file_name in scene_dir:
3034    if file_name.endswith('.png') or file_name.endswith('.mp4'):
3035      src_scene_file = os.path.join(scene_path, file_name)
3036      cmd = f'adb -s {tablet_id} push {src_scene_file} {_DST_SCENE_DIR}'
3037      subprocess.Popen(cmd.split())
3038  time.sleep(_COPY_SCENE_DELAY_SEC)
3039  logging.info('Finished copying files to tablet.')
3040
3041
3042def validate_lighting(y_plane, scene, state='ON', log_path=None,
3043                      tablet_state='ON', fov=None):
3044  """Validates the lighting level in scene corners based on empirical values.
3045
3046  Args:
3047    y_plane: Y plane of YUV image
3048    scene: scene name
3049    state: string 'ON' or 'OFF'
3050    log_path: [Optional] path to store artifacts
3051    tablet_state: string 'ON' or 'OFF'
3052    fov: [Optional] float, calculated camera FoV
3053
3054  Returns:
3055    boolean True if lighting validated, else raise AssertionError
3056  """
3057  logging.debug('Validating lighting levels.')
3058  file_name = f'validate_lighting_{scene}.jpg'
3059  if log_path:
3060    file_name = os.path.join(log_path, f'validate_lighting_{scene}.jpg')
3061
3062  if tablet_state == 'OFF':
3063    validate_lighting_thresh = _VALIDATE_LIGHTING_THRESH_DARK
3064  else:
3065    validate_lighting_thresh = _VALIDATE_LIGHTING_THRESH
3066
3067  validate_lighting_regions = _VALIDATE_LIGHTING_REGIONS
3068  if fov and fov > _VALIDATE_LIGHTING_MACRO_FOV_THRESH:
3069    validate_lighting_regions = _VALIDATE_LIGHTING_REGIONS_MODULAR_UW
3070
3071  # Test patches from each corner.
3072  for location, coordinates in validate_lighting_regions.items():
3073    patch = image_processing_utils.get_image_patch(
3074        y_plane, coordinates[0], coordinates[1],
3075        _VALIDATE_LIGHTING_PATCH_W, _VALIDATE_LIGHTING_PATCH_H)
3076    y_mean = image_processing_utils.compute_image_means(patch)[0]
3077    logging.debug('%s corner Y mean: %.3f', location, y_mean)
3078    if state == 'ON':
3079      if y_mean > validate_lighting_thresh:
3080        logging.debug('Lights ON in test rig.')
3081        return True
3082      else:
3083        image_processing_utils.write_image(y_plane, file_name)
3084        raise AssertionError('Lights OFF in test rig. Turn ON and retry.')
3085    elif state == 'OFF':
3086      if y_mean < validate_lighting_thresh:
3087        logging.debug('Lights OFF in test rig.')
3088        return True
3089      else:
3090        image_processing_utils.write_image(y_plane, file_name)
3091        raise AssertionError('Lights ON in test rig. Turn OFF and retry.')
3092    else:
3093      raise AssertionError('Invalid lighting state string. '
3094                           "Valid strings: 'ON', 'OFF'.")
3095
3096
3097def get_build_fingerprint(device_id):
3098  """Return the build fingerprint of the device."""
3099  cmd = f'adb -s {device_id} shell getprop ro.build.fingerprint'
3100  try:
3101    build_fingerprint = subprocess.check_output(cmd.split()).decode('utf-8').strip()
3102    logging.debug('Build fingerprint: %s', build_fingerprint)
3103  except (subprocess.CalledProcessError, ValueError) as exp_errors:
3104    raise AssertionError('No build_fingerprint.') from exp_errors
3105  return build_fingerprint
3106
3107
3108def get_build_sdk_version(device_id):
3109  """Return the int build version of the device."""
3110  cmd = f'adb -s {device_id} shell getprop ro.build.version.sdk'
3111  try:
3112    build_sdk_version = int(subprocess.check_output(cmd.split()).rstrip())
3113    logging.debug('Build SDK version: %d', build_sdk_version)
3114  except (subprocess.CalledProcessError, ValueError) as exp_errors:
3115    raise AssertionError('No build_sdk_version.') from exp_errors
3116  return build_sdk_version
3117
3118
3119def get_first_api_level(device_id):
3120  """Return the int value for the first API level of the device."""
3121  cmd = f'adb -s {device_id} shell getprop ro.product.first_api_level'
3122  try:
3123    first_api_level = int(subprocess.check_output(cmd.split()).rstrip())
3124    logging.debug('First API level: %d', first_api_level)
3125  except (subprocess.CalledProcessError, ValueError):
3126    logging.error('No first_api_level. Setting to build version.')
3127    first_api_level = get_build_sdk_version(device_id)
3128  return first_api_level
3129
3130
3131def get_vendor_api_level(device_id):
3132  """Return the int value for the vendor API level of the device."""
3133  cmd = f'adb -s {device_id} shell getprop ro.vendor.api_level'
3134  try:
3135    vendor_api_level = int(subprocess.check_output(cmd.split()).rstrip())
3136    logging.debug('First vendor API level: %d', vendor_api_level)
3137  except (subprocess.CalledProcessError, ValueError):
3138    logging.error('No vendor_api_level. Setting to build version.')
3139    vendor_api_level = get_build_sdk_version(device_id)
3140  return vendor_api_level
3141
3142
3143def get_media_performance_class(device_id):
3144  """Return the int value for the media performance class of the device."""
3145  cmd = (f'adb -s {device_id} shell '
3146         'getprop ro.odm.build.media_performance_class')
3147  try:
3148    media_performance_class = int(
3149        subprocess.check_output(cmd.split()).rstrip())
3150    logging.debug('Media performance class: %d', media_performance_class)
3151  except (subprocess.CalledProcessError, ValueError):
3152    logging.debug('No media performance class. Setting to 0.')
3153    media_performance_class = 0
3154  return media_performance_class
3155
3156
3157def raise_mpc_assertion_error(required_mpc, test_name, found_mpc):
3158  raise AssertionError(f'With MPC >= {required_mpc}, {test_name} must be run. '
3159                       f'Found MPC: {found_mpc}')
3160
3161
3162def stop_video_playback(tablet):
3163  """Force-stop activities used for video playback on the tablet.
3164
3165  Args:
3166    tablet: a controller object for the ITS tablet.
3167  """
3168  try:
3169    activities_unencoded = tablet.adb.shell(
3170        ['dumpsys', 'activity', 'recents', '|',
3171         'grep', '"baseIntent=Intent.*act=android.intent.action"']
3172    )
3173  except adb.AdbError as e:
3174    logging.warning('ADB error when finding intent activities: %s. '
3175                    'Please close the default video player manually.', e)
3176    return
3177  activity_lines = (
3178      str(activities_unencoded.decode('utf-8')).strip().splitlines()
3179  )
3180  for activity_line in activity_lines:
3181    activity = activity_line.split('cmp=')[-1].split('/')[0]
3182    try:
3183      tablet.adb.shell(['am', 'force-stop', activity])
3184    except adb.AdbError as e:
3185      logging.warning('ADB error when killing intent activity %s: %s. '
3186                      'Please close the default video player manually.',
3187                      activity, e)
3188
3189
3190def raise_not_yet_mandated_error(message, api_level, mandated_api_level):
3191  if api_level >= mandated_api_level:
3192    raise AssertionError(
3193        f'Test is mandated for API level {mandated_api_level} or above. '
3194        f'Found API level {api_level}.\n\n{message}'
3195    )
3196  else:
3197    raise AssertionError(f'{NOT_YET_MANDATED_MESSAGE}\n\n{message}')
3198
3199
3200def pull_file_from_dut(dut, dut_path, log_folder):
3201  """Pulls and returns file from dut and return file name.
3202
3203  Args:
3204    dut: device under test
3205    dut_path: pull file from this path
3206    log_folder: store pulled file to this folder
3207
3208  Returns:
3209    filename of file pulled from dut
3210  """
3211  dut.adb.pull([dut_path, log_folder])
3212  file_name = (dut_path.split('/')[-1])
3213  logging.debug('%s pulled from dut', file_name)
3214  return file_name
3215
3216
3217def remove_tmp_files(log_path, match_pattern):
3218  """Remove temp file with given directory path.
3219
3220  Args:
3221    log_path: path-like object, path of directory
3222    match_pattern: string, pattern to be matched and removed
3223
3224  Returns:
3225    List of error messages if encountering error while removing files
3226  """
3227  temp_files = []
3228  try:
3229    temp_files = os.listdir(log_path)
3230  except FileNotFoundError:
3231    logging.debug('/tmp directory: %s not found', log_path)
3232  for file in temp_files:
3233    if fnmatch.fnmatch(file, match_pattern):
3234      file_to_remove = os.path.join(log_path, file)
3235      try:
3236        os.remove(file_to_remove)
3237      except FileNotFoundError:
3238        logging.debug('File not found: %s', str(file))
3239
3240
3241def remove_frame_files(dir_name, save_files_list=None):
3242  """Removes the generated frame files from test dir.
3243
3244  Args:
3245    dir_name: test directory name.
3246    save_files_list: list of files not to be removed. Default is empty list.
3247  """
3248  if os.path.exists(dir_name):
3249    for image in glob.glob('%s/*.png' % dir_name):
3250      if save_files_list is None or image not in save_files_list:
3251        os.remove(image)
3252
3253
3254def remove_file(file_name_with_path):
3255  """Removes file at given path.
3256
3257  Args:
3258    file_name_with_path: string, filename with path.
3259  """
3260  remove_mp4_file(file_name_with_path)
3261
3262
3263def remove_mp4_file(file_name_with_path):
3264  """Removes the mp4 file at given path.
3265
3266  Args:
3267    file_name_with_path: string, path to mp4 recording.
3268  """
3269  try:
3270    os.remove(file_name_with_path)
3271  except FileNotFoundError:
3272    logging.debug('File not found: %s', file_name_with_path)
3273
3274
3275def check_features_passed(
3276    features_passed, streams_name, fps_range_tuple,
3277    hlg10, is_stabilized):
3278  """Check if [hlg10, is_stabilized] combo is already tested to be supported.
3279
3280  Args:
3281    features_passed: The 2d dictionary of feature combinations already passed
3282    streams_name: The first key for features_passed dictionary
3283    fps_range_tuple: The second key for features_passed dictionary
3284    hlg10: boolean; Whether HLG10 is enabled
3285    is_stabilized: boolean; Whether preview stabilizatoin is enabled
3286
3287  Returns:
3288    Whether the [hlg10, is_stabilized] is already tested to be supported.
3289  """
3290  feature_mask = 0
3291  if hlg10: feature_mask |= _BIT_HLG10
3292  if is_stabilized: feature_mask |= _BIT_STABILIZATION
3293  tested = False
3294  if streams_name in features_passed:
3295    if fps_range_tuple in features_passed[streams_name]:
3296      for tested_feature in features_passed[streams_name][fps_range_tuple]:
3297        # Only test a combination if they aren't already a subset
3298        # of another tested combination.
3299        if (tested_feature | feature_mask) == tested_feature:
3300          tested = True
3301          break
3302  return tested
3303
3304
3305def mark_features_passed(
3306    features_passed, streams_name, fps_range_tuple,
3307    hlg10, is_stabilized):
3308  """Mark the [hlg10, is_stabilized] combination as tested to pass.
3309
3310  Args:
3311    features_passed: The 2d dictionary of feature combinations already passed
3312    streams_name: The first key for features_passed dictionary
3313    fps_range_tuple: The second key for feature_passed dictionary
3314    hlg10: boolean; Whether HLG10 is enabled
3315    is_stabilized: boolean; Whether preview stabilizatoin is enabled
3316  """
3317  feature_mask = 0
3318  if hlg10: feature_mask |= _BIT_HLG10
3319  if is_stabilized: feature_mask |= _BIT_STABILIZATION
3320  if (streams_name in features_passed and
3321      fps_range_tuple in features_passed[streams_name]):
3322    features_passed[streams_name][fps_range_tuple].append(feature_mask)
3323  else:
3324    features_passed.setdefault(streams_name, {}).setdefault(fps_range_tuple, [feature_mask])
3325
3326
3327def define_raw_stats_fmt_sensor_sensitivity(props, img_stats_grid):
3328  """Define format with active array width and height for sensor sensitivity testing."""
3329  aa_width = (props['android.sensor.info.preCorrectionActiveArraySize']['right'] -
3330              props['android.sensor.info.preCorrectionActiveArraySize']['left'])
3331  aa_height = (props['android.sensor.info.preCorrectionActiveArraySize']['bottom'] -
3332               props['android.sensor.info.preCorrectionActiveArraySize']['top'])
3333  logging.debug('Active array W,H: %d,%d', aa_width, aa_height)
3334  return {'format': 'rawStats',
3335          'gridWidth': aa_width // img_stats_grid,
3336          'gridHeight': aa_height // img_stats_grid}
3337
3338
3339def define_raw_stats_fmt_exposure(props, img_stats_grid):
3340  """Define format with active array width and height for exposure testing."""
3341  aax = props['android.sensor.info.preCorrectionActiveArraySize']['left']
3342  aay = props['android.sensor.info.preCorrectionActiveArraySize']['top']
3343  aa_width = props['android.sensor.info.preCorrectionActiveArraySize']['right']-aax
3344  aa_height = props['android.sensor.info.preCorrectionActiveArraySize']['bottom']-aay
3345  return {'format': 'rawStats',
3346          'gridWidth': aa_width // img_stats_grid,
3347          'gridHeight': aa_height // img_stats_grid}
3348