• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2013 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Utility functions to form an ItsSession and perform various camera actions.
15"""
16
17
18import collections
19import json
20import logging
21import math
22import os
23import socket
24import subprocess
25import sys
26import time
27import unicodedata
28
29import numpy
30
31import camera_properties_utils
32import capture_request_utils
33import error_util
34import image_processing_utils
35import opencv_processing_utils
36
37ANDROID13_API_LEVEL = 33
38ANDROID14_API_LEVEL = 34
39CHART_DISTANCE_NO_SCALING = 0
40LOAD_SCENE_DELAY_SEC = 3
41SCALING_TO_FILE_ATOL = 0.01
42SINGLE_CAPTURE_NCAP = 1
43SUB_CAMERA_SEPARATOR = '.'
44DEFAULT_TABLET_BRIGHTNESS = 192  # 8-bit tablet 75% brightness
45ELEVEN_BIT_TABLET_BRIGHTNESS = 1024  # 50% brightness for Xiaomi tablets
46ELEVEN_BIT_TABLET_NAMES = ('nabu',)
47LEGACY_TABLET_BRIGHTNESS = 96
48LEGACY_TABLET_NAME = 'dragon'
49TABLET_REQUIREMENTS_URL = 'https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-requirements'
50BRIGHTNESS_ERROR_MSG = ('Tablet brightness not set as per '
51                        f'{TABLET_REQUIREMENTS_URL} in the config file')
52
53_VALIDATE_LIGHTING_PATCH_H = 0.05
54_VALIDATE_LIGHTING_PATCH_W = 0.05
55_VALIDATE_LIGHTING_REGIONS = {
56    'top-left': (0, 0),
57    'top-right': (0, 1-_VALIDATE_LIGHTING_PATCH_H),
58    'bottom-left': (1-_VALIDATE_LIGHTING_PATCH_W, 0),
59    'bottom-right': (1-_VALIDATE_LIGHTING_PATCH_W,
60                     1-_VALIDATE_LIGHTING_PATCH_H),
61}
62_VALIDATE_LIGHTING_THRESH = 0.05  # Determined empirically from scene[1:6] tests
63_CMD_NAME_STR = 'cmdName'
64_OBJ_VALUE_STR = 'objValue'
65_STR_VALUE = 'strValue'
66_TAG_STR = 'tag'
67_CAMERA_ID_STR = 'cameraId'
68_USE_CASE_CROPPED_RAW = 6
69
70
71def validate_tablet_brightness(tablet_name, brightness):
72  """Ensures tablet brightness is set according to documentation.
73
74  https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-requirements
75  Args:
76    tablet_name: tablet product name specified by `ro.build.product`.
77    brightness: brightness specified by config file.
78  """
79  name_to_brightness = {
80      LEGACY_TABLET_NAME: LEGACY_TABLET_BRIGHTNESS,
81  }
82  for name in ELEVEN_BIT_TABLET_NAMES:
83    name_to_brightness[name] = ELEVEN_BIT_TABLET_BRIGHTNESS
84  if tablet_name in name_to_brightness:
85    if brightness != name_to_brightness[tablet_name]:
86      raise AssertionError(BRIGHTNESS_ERROR_MSG)
87  else:
88    if brightness != DEFAULT_TABLET_BRIGHTNESS:
89      raise AssertionError(BRIGHTNESS_ERROR_MSG)
90
91
92class ItsSession(object):
93  """Controls a device over adb to run ITS scripts.
94
95    The script importing this module (on the host machine) prepares JSON
96    objects encoding CaptureRequests, specifying sets of parameters to use
97    when capturing an image using the Camera2 APIs. This class encapsulates
98    sending the requests to the device, monitoring the device's progress, and
99    copying the resultant captures back to the host machine when done. TCP
100    forwarded over adb is the transport mechanism used.
101
102    The device must have CtsVerifier.apk installed.
103
104    Attributes:
105        sock: The open socket.
106  """
107
108  # Open a connection to localhost:<host_port>, forwarded to port 6000 on the
109  # device. <host_port> is determined at run-time to support multiple
110  # connected devices.
111  IPADDR = '127.0.0.1'
112  REMOTE_PORT = 6000
113  BUFFER_SIZE = 4096
114
115  # LOCK_PORT is used as a mutex lock to protect the list of forwarded ports
116  # among all processes. The script assumes LOCK_PORT is available and will
117  # try to use ports between CLIENT_PORT_START and
118  # CLIENT_PORT_START+MAX_NUM_PORTS-1 on host for ITS sessions.
119  CLIENT_PORT_START = 6000
120  MAX_NUM_PORTS = 100
121  LOCK_PORT = CLIENT_PORT_START + MAX_NUM_PORTS
122
123  # Seconds timeout on each socket operation.
124  SOCK_TIMEOUT = 20.0
125  # Seconds timeout on performance measurement socket operation
126  SOCK_TIMEOUT_FOR_PERF_MEASURE = 40.0
127
128  # Additional timeout in seconds when ITS service is doing more complicated
129  # operations, for example: issuing warmup requests before actual capture.
130  EXTRA_SOCK_TIMEOUT = 5.0
131
132  PACKAGE = 'com.android.cts.verifier.camera.its'
133  INTENT_START = 'com.android.cts.verifier.camera.its.START'
134
135  # This string must be in sync with ItsService. Updated when interface
136  # between script and ItsService is changed.
137  ITS_SERVICE_VERSION = '1.0'
138
139  SEC_TO_NSEC = 1000*1000*1000.0
140  adb = 'adb -d'
141
142  # Predefine camera props. Save props extracted from the function,
143  # "get_camera_properties".
144  props = None
145
146  IMAGE_FORMAT_LIST_1 = [
147      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
148      'dngImage', 'y8Image', 'jpeg_rImage'
149  ]
150
151  IMAGE_FORMAT_LIST_2 = [
152      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
153      'yuvImage', 'jpeg_rImage'
154  ]
155
156  CAP_JPEG = {'format': 'jpeg'}
157  CAP_RAW = {'format': 'raw'}
158  CAP_CROPPED_RAW = {'format': 'raw', 'useCase': _USE_CASE_CROPPED_RAW}
159  CAP_YUV = {'format': 'yuv'}
160  CAP_RAW_YUV = [{'format': 'raw'}, {'format': 'yuv'}]
161
162  def __init_socket_port(self):
163    """Initialize the socket port for the host to forward requests to the device.
164
165    This method assumes localhost's LOCK_PORT is available and will try to
166    use ports between CLIENT_PORT_START and CLIENT_PORT_START+MAX_NUM_PORTS-1
167    """
168    num_retries = 100
169    retry_wait_time_sec = 0.05
170
171    # Bind a socket to use as mutex lock
172    socket_lock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
173    for i in range(num_retries):
174      try:
175        socket_lock.bind((ItsSession.IPADDR, ItsSession.LOCK_PORT))
176        break
177      except (socket.error, socket.timeout) as socket_issue:
178        if i == num_retries - 1:
179          raise error_util.CameraItsError(
180              self._device_id, 'socket lock returns error') from socket_issue
181        else:
182          time.sleep(retry_wait_time_sec)
183
184    # Check if a port is already assigned to the device.
185    command = 'adb forward --list'
186    proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
187    # pylint: disable=unused-variable
188    output, error = proc.communicate()
189    port = None
190    used_ports = []
191    for line  in output.decode('utf-8').split(os.linesep):
192      # each line should be formatted as:
193      # "<device_id> tcp:<host_port> tcp:<remote_port>"
194      forward_info = line.split()
195      if len(forward_info) >= 3 and len(
196          forward_info[1]) > 4 and forward_info[1][:4] == 'tcp:' and len(
197              forward_info[2]) > 4 and forward_info[2][:4] == 'tcp:':
198        local_p = int(forward_info[1][4:])
199        remote_p = int(forward_info[2][4:])
200        if forward_info[
201            0] == self._device_id and remote_p == ItsSession.REMOTE_PORT:
202          port = local_p
203          break
204        else:
205          used_ports.append(local_p)
206
207      # Find the first available port if no port is assigned to the device.
208    if port is None:
209      for p in range(ItsSession.CLIENT_PORT_START,
210                     ItsSession.CLIENT_PORT_START + ItsSession.MAX_NUM_PORTS):
211        if self.check_port_availability(p, used_ports):
212          port = p
213          break
214
215    if port is None:
216      raise error_util.CameraItsError(self._device_id,
217                                      ' cannot find an available ' + 'port')
218
219    # Release the socket as mutex unlock
220    socket_lock.close()
221
222    # Connect to the socket
223    self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
224    self.sock.connect((self.IPADDR, port))
225    self.sock.settimeout(self.SOCK_TIMEOUT)
226
227  def check_port_availability(self, check_port, used_ports):
228    """Check if the port is available or not.
229
230    Args:
231      check_port: Port to check for availability
232      used_ports: List of used ports
233
234    Returns:
235     True if the given port is available and can be assigned to the device.
236    """
237    if check_port not in used_ports:
238      # Try to run "adb forward" with the port
239      command = ('%s forward tcp:%d tcp:%d' %
240                 (self.adb, check_port, self.REMOTE_PORT))
241      proc = subprocess.Popen(
242          command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
243      error = proc.communicate()[1]
244
245      # Check if there is no error
246      if error is None or error.find('error'.encode()) < 0:
247        return True
248      else:
249        return False
250
251  def __wait_for_service(self):
252    """Wait for ItsService to be ready and reboot the device if needed.
253
254    This also includes the optional reboot handling: if the user
255    provides a "reboot" or "reboot=N" arg, then reboot the device,
256    waiting for N seconds (default 30) before returning.
257    """
258
259    for s in sys.argv[1:]:
260      if s[:6] == 'reboot':
261        duration = 30
262        if len(s) > 7 and s[6] == '=':
263          duration = int(s[7:])
264        logging.debug('Rebooting device')
265        _run(f'{self.adb} reboot')
266        _run(f'{self.adb} wait-for-device')
267        time.sleep(duration)
268        logging.debug('Reboot complete')
269
270    # Flush logcat so following code won't be misled by previous
271    # 'ItsService ready' log.
272    _run(f'{self.adb} logcat -c')
273    time.sleep(1)
274
275    _run(f'{self.adb} shell am force-stop --user 0 {self.PACKAGE}')
276    _run(f'{self.adb} shell am start-foreground-service --user 0 '
277         f'-t text/plain -a {self.INTENT_START}')
278
279    # Wait until the socket is ready to accept a connection.
280    proc = subprocess.Popen(
281        self.adb.split() + ['logcat'], stdout=subprocess.PIPE)
282    logcat = proc.stdout
283    while True:
284      line = logcat.readline().strip()
285      if line.find(b'ItsService ready') >= 0:
286        break
287    proc.kill()
288    proc.communicate()
289
290  def __init__(self, device_id=None, camera_id=None, hidden_physical_id=None,
291               override_to_portrait=None):
292    self._camera_id = camera_id
293    self._device_id = device_id
294    self._hidden_physical_id = hidden_physical_id
295    self._override_to_portrait = override_to_portrait
296
297    # Initialize device id and adb command.
298    self.adb = 'adb -s ' + self._device_id
299    self.__wait_for_service()
300    self.__init_socket_port()
301
302  def __enter__(self):
303    self.__close_camera()
304    self.__open_camera()
305    return self
306
307  def __exit__(self, exec_type, exec_value, exec_traceback):
308    if hasattr(self, 'sock') and self.sock:
309      self.__close_camera()
310      self.sock.close()
311    return False
312
313  def override_with_hidden_physical_camera_props(self, props):
314    """Check that it is a valid sub-camera backing the logical camera.
315
316    If current session is for a hidden physical camera, check that it is a valid
317    sub-camera backing the logical camera, override self.props, and return the
318    characteristics of sub-camera. Otherwise, return "props" directly.
319
320    Args:
321     props: Camera properties object.
322
323    Returns:
324     The properties of the hidden physical camera if possible.
325    """
326    if self._hidden_physical_id:
327      if not camera_properties_utils.logical_multi_camera(props):
328        raise AssertionError(f'{self._camera_id} is not a logical multi-camera')
329      physical_ids = camera_properties_utils.logical_multi_camera_physical_ids(
330          props)
331      if self._hidden_physical_id not in physical_ids:
332        raise AssertionError(f'{self._hidden_physical_id} is not a hidden '
333                             f'sub-camera of {self._camera_id}')
334      props = self.get_camera_properties_by_id(self._hidden_physical_id)
335      self.props = props
336    return props
337
338  def get_camera_properties(self):
339    """Get the camera properties object for the device.
340
341    Returns:
342     The Python dictionary object for the CameraProperties object.
343    """
344    cmd = {}
345    cmd[_CMD_NAME_STR] = 'getCameraProperties'
346    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
347    data, _ = self.__read_response_from_socket()
348    if data[_TAG_STR] != 'cameraProperties':
349      raise error_util.CameraItsError('Invalid command response')
350    self.props = data[_OBJ_VALUE_STR]['cameraProperties']
351    return data[_OBJ_VALUE_STR]['cameraProperties']
352
353  def get_camera_properties_by_id(self, camera_id, override_to_portrait=None):
354    """Get the camera properties object for device with camera_id.
355
356    Args:
357     camera_id: The ID string of the camera
358     override_to_portrait: Optional value for overrideToPortrait
359
360    Returns:
361     The Python dictionary object for the CameraProperties object. Empty
362     if no such device exists.
363    """
364    cmd = {}
365    cmd[_CMD_NAME_STR] = 'getCameraPropertiesById'
366    cmd[_CAMERA_ID_STR] = camera_id
367    if override_to_portrait is not None:
368      cmd['overrideToPortrait'] = override_to_portrait
369    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
370    data, _ = self.__read_response_from_socket()
371    if data[_TAG_STR] != 'cameraProperties':
372      raise error_util.CameraItsError('Invalid command response')
373    return data[_OBJ_VALUE_STR]['cameraProperties']
374
375  def __read_response_from_socket(self):
376    """Reads a line (newline-terminated) string serialization of JSON object.
377
378    Returns:
379     Deserialized json obj.
380    """
381    chars = []
382    while not chars or chars[-1] != '\n':
383      ch = self.sock.recv(1).decode('utf-8')
384      if not ch:
385        # Socket was probably closed; otherwise don't get empty strings
386        raise error_util.CameraItsError('Problem with socket on device side')
387      chars.append(ch)
388    line = ''.join(chars)
389    jobj = json.loads(line)
390    # Optionally read a binary buffer of a fixed size.
391    buf = None
392    if 'bufValueSize' in jobj:
393      n = jobj['bufValueSize']
394      buf = bytearray(n)
395      view = memoryview(buf)
396      while n > 0:
397        nbytes = self.sock.recv_into(view, n)
398        view = view[nbytes:]
399        n -= nbytes
400      buf = numpy.frombuffer(buf, dtype=numpy.uint8)
401    return jobj, buf
402
403  def __open_camera(self):
404    """Get the camera ID to open if it is an argument as a single camera.
405
406    This allows passing camera=# to individual tests at command line
407    and camera=#,#,# or an no camera argv with tools/run_all_tests.py.
408    In case the camera is a logical multi-camera, to run ITS on the
409    hidden physical sub-camera, pass camera=[logical ID]:[physical ID]
410    to an individual test at the command line, and same applies to multiple
411    camera IDs for tools/run_all_tests.py: camera=#,#:#,#:#,#
412    """
413    if not self._camera_id:
414      self._camera_id = 0
415      for s in sys.argv[1:]:
416        if s[:7] == 'camera=' and len(s) > 7:
417          camera_ids = s[7:].split(',')
418          camera_id_combos = parse_camera_ids(camera_ids)
419          if len(camera_id_combos) == 1:
420            self._camera_id = camera_id_combos[0].id
421            self._hidden_physical_id = camera_id_combos[0].sub_id
422
423    logging.debug('Opening camera: %s', self._camera_id)
424    cmd = {_CMD_NAME_STR: 'open', _CAMERA_ID_STR: self._camera_id}
425    if self._override_to_portrait is not None:
426      cmd['overrideToPortrait'] = self._override_to_portrait
427    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
428    data, _ = self.__read_response_from_socket()
429    if data[_TAG_STR] != 'cameraOpened':
430      raise error_util.CameraItsError('Invalid command response')
431
432  def __close_camera(self):
433    cmd = {_CMD_NAME_STR: 'close'}
434    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
435    data, _ = self.__read_response_from_socket()
436    if data[_TAG_STR] != 'cameraClosed':
437      raise error_util.CameraItsError('Invalid command response')
438
439  def zoom_ratio_within_range(self, zoom_ratio):
440    """Determine if a given zoom ratio is within device zoom range.
441
442    Args:
443      zoom_ratio: float; zoom ratio requested
444    Returns:
445      Boolean: True, if zoom_ratio inside device range. False otherwise.
446    """
447    zoom_range = self.props['android.control.zoomRatioRange']
448    return zoom_ratio >= zoom_range[0] and zoom_ratio <= zoom_range[1]
449
450  def get_sensors(self):
451    """Get all sensors on the device.
452
453    Returns:
454       A Python dictionary that returns keys and booleans for each sensor.
455    """
456    cmd = {}
457    cmd[_CMD_NAME_STR] = 'checkSensorExistence'
458    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
459    data, _ = self.__read_response_from_socket()
460    if data[_TAG_STR] != 'sensorExistence':
461      raise error_util.CameraItsError('Invalid response for command: %s' %
462                                      cmd[_CMD_NAME_STR])
463    return data[_OBJ_VALUE_STR]
464
465  def start_sensor_events(self):
466    """Start collecting sensor events on the device.
467
468    See get_sensor_events for more info.
469
470    Returns:
471       Nothing.
472    """
473    cmd = {}
474    cmd[_CMD_NAME_STR] = 'startSensorEvents'
475    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
476    data, _ = self.__read_response_from_socket()
477    if data[_TAG_STR] != 'sensorEventsStarted':
478      raise error_util.CameraItsError('Invalid response for command: %s' %
479                                      cmd[_CMD_NAME_STR])
480
481  def get_sensor_events(self):
482    """Get a trace of all sensor events on the device.
483
484        The trace starts when the start_sensor_events function is called. If
485        the test runs for a long time after this call, then the device's
486        internal memory can fill up. Calling get_sensor_events gets all events
487        from the device, and then stops the device from collecting events and
488        clears the internal buffer; to start again, the start_sensor_events
489        call must be used again.
490
491        Events from the accelerometer, compass, and gyro are returned; each
492        has a timestamp and x,y,z values.
493
494        Note that sensor events are only produced if the device isn't in its
495        standby mode (i.e.) if the screen is on.
496
497    Returns:
498            A Python dictionary with three keys ("accel", "mag", "gyro") each
499            of which maps to a list of objects containing "time","x","y","z"
500            keys.
501    """
502    cmd = {}
503    cmd[_CMD_NAME_STR] = 'getSensorEvents'
504    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
505    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
506    self.sock.settimeout(timeout)
507    data, _ = self.__read_response_from_socket()
508    if data[_TAG_STR] != 'sensorEvents':
509      raise error_util.CameraItsError('Invalid response for command: %s ' %
510                                      cmd[_CMD_NAME_STR])
511    self.sock.settimeout(self.SOCK_TIMEOUT)
512    return data[_OBJ_VALUE_STR]
513
514  def get_camera_ids(self):
515    """Returns the list of all camera_ids.
516
517    Returns:
518      List of camera ids on the device.
519    """
520    cmd = {'cmdName': 'getCameraIds'}
521    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
522    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
523    self.sock.settimeout(timeout)
524    data, _ = self.__read_response_from_socket()
525    if data['tag'] != 'cameraIds':
526      raise error_util.CameraItsError('Invalid command response')
527    return data['objValue']
528
529  def get_unavailable_physical_cameras(self, camera_id):
530    """Get the unavailable physical cameras ids.
531
532    Args:
533      camera_id: int; device id
534    Returns:
535      List of all physical camera ids which are unavailable.
536    """
537    cmd = {_CMD_NAME_STR: 'doGetUnavailablePhysicalCameras',
538           _CAMERA_ID_STR: camera_id}
539    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
540    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
541    self.sock.settimeout(timeout)
542    data, _ = self.__read_response_from_socket()
543    if data[_TAG_STR] != 'unavailablePhysicalCameras':
544      raise error_util.CameraItsError('Invalid command response')
545    return data[_OBJ_VALUE_STR]
546
547  def is_hlg10_recording_supported(self, profile_id):
548    """Query whether the camera device supports HLG10 video recording.
549
550    Args:
551      profile_id: int; profile id corresponding to the quality level.
552    Returns:
553      Boolean: True, if device supports HLG10 video recording, False in
554      all other cases.
555    """
556    cmd = {}
557    cmd[_CMD_NAME_STR] = 'isHLG10Supported'
558    cmd[_CAMERA_ID_STR] = self._camera_id
559    cmd['profileId'] = profile_id
560    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
561
562    data, _ = self.__read_response_from_socket()
563    if data[_TAG_STR] != 'hlg10Response':
564      raise error_util.CameraItsError('Failed to query HLG10 support')
565    return data[_STR_VALUE] == 'true'
566
567  def is_p3_capture_supported(self):
568    """Query whether the camera device supports P3 image capture.
569
570    Returns:
571      Boolean: True, if device supports P3 image capture, False in
572      all other cases.
573    """
574    cmd = {}
575    cmd[_CMD_NAME_STR] = 'isP3Supported'
576    cmd[_CAMERA_ID_STR] = self._camera_id
577    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
578
579    data, _ = self.__read_response_from_socket()
580    if data[_TAG_STR] != 'p3Response':
581      raise error_util.CameraItsError('Failed to query P3 support')
582    return data[_STR_VALUE] == 'true'
583
584  def is_landscape_to_portrait_enabled(self):
585    """Query whether the device has enabled the landscape to portrait property.
586
587    Returns:
588      Boolean: True, if the device has the system property enabled. False
589      otherwise.
590    """
591    cmd = {}
592    cmd[_CMD_NAME_STR] = 'isLandscapeToPortraitEnabled'
593    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
594
595    data, _ = self.__read_response_from_socket()
596    if data[_TAG_STR] != 'landscapeToPortraitEnabledResponse':
597      raise error_util.CameraItsError(
598          'Failed to query landscape to portrait system property')
599    return data[_STR_VALUE] == 'true'
600
601  def do_basic_recording(self, profile_id, quality, duration,
602                         video_stabilization_mode=0, hlg10_enabled=False,
603                         zoom_ratio=None, ae_target_fps_min=None,
604                         ae_target_fps_max=None):
605    """Issue a recording request and read back the video recording object.
606
607    The recording will be done with the format specified in quality. These
608    quality levels correspond to the profiles listed in CamcorderProfile.
609    The duration is the time in seconds for which the video will be recorded.
610    The recorded object consists of a path on the device at which the
611    recorded video is saved.
612
613    Args:
614      profile_id: int; profile id corresponding to the quality level.
615      quality: Video recording quality such as High, Low, VGA.
616      duration: The time in seconds for which the video will be recorded.
617      video_stabilization_mode: Video stabilization mode ON/OFF. Value can be
618      0: 'OFF', 1: 'ON', 2: 'PREVIEW'
619      hlg10_enabled: boolean: True Enable 10-bit HLG video recording, False
620      record using the regular SDR profile
621      zoom_ratio: float; zoom ratio. None if default zoom
622      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
623      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
624    Returns:
625      video_recorded_object: The recorded object returned from ItsService which
626      contains path at which the recording is saved on the device, quality of
627      the recorded video, video size of the recorded video, video frame rate
628      and 'hlg10' if 'hlg10_enabled' is set to True.
629      Ex:
630      VideoRecordingObject: {
631        'tag': 'recordingResponse',
632        'objValue': {
633          'recordedOutputPath':
634            '/storage/emulated/0/Android/data/com.android.cts.verifier'
635            '/files/VideoITS/VID_20220324_080414_0_CIF_352x288.mp4',
636          'quality': 'CIF',
637          'videoFrameRate': 30,
638          'videoSize': '352x288'
639        }
640      }
641    """
642    cmd = {_CMD_NAME_STR: 'doBasicRecording', _CAMERA_ID_STR: self._camera_id,
643           'profileId': profile_id, 'quality': quality,
644           'recordingDuration': duration,
645           'videoStabilizationMode': video_stabilization_mode,
646           'hlg10Enabled': hlg10_enabled}
647    if zoom_ratio:
648      if self.zoom_ratio_within_range(zoom_ratio):
649        cmd['zoomRatio'] = zoom_ratio
650      else:
651        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
652    if ae_target_fps_min and ae_target_fps_max:
653      cmd['aeTargetFpsMin'] = ae_target_fps_min
654      cmd['aeTargetFpsMax'] = ae_target_fps_max
655    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
656    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
657    self.sock.settimeout(timeout)
658    data, _ = self.__read_response_from_socket()
659    if data[_TAG_STR] != 'recordingResponse':
660      raise error_util.CameraItsError(
661          f'Invalid response for command: {cmd[_CMD_NAME_STR]}')
662    logging.debug('VideoRecordingObject: %s', data)
663    return data[_OBJ_VALUE_STR]
664
665  def do_preview_recording(self, video_size, duration, stabilize,
666                           zoom_ratio=None, ae_target_fps_min=None,
667                           ae_target_fps_max=None):
668    """Issue a preview request and read back the preview recording object.
669
670    The resolution of the preview and its recording will be determined by
671    video_size. The duration is the time in seconds for which the preview will
672    be recorded. The recorded object consists of a path on the device at
673    which the recorded video is saved.
674
675    Args:
676      video_size: str; Preview resolution at which to record. ex. "1920x1080"
677      duration: int; The time in seconds for which the video will be recorded.
678      stabilize: boolean; Whether the preview should be stabilized or not
679      zoom_ratio: float; zoom ratio. None if default zoom
680      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
681      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
682    Returns:
683      video_recorded_object: The recorded object returned from ItsService which
684      contains path at which the recording is saved on the device, quality of
685      the recorded video which is always set to "preview", video size of the
686      recorded video, video frame rate.
687      Ex:
688      VideoRecordingObject: {
689        'tag': 'recordingResponse',
690        'objValue': {
691          'recordedOutputPath': '/storage/emulated/0/Android/data/'
692                                'com.android.cts.verifier/files/VideoITS/'
693                                'VID_20220324_080414_0_CIF_352x288.mp4',
694          'quality': 'preview',
695          'videoSize': '352x288'
696        }
697      }
698    """
699
700    cmd = {
701        _CMD_NAME_STR: 'doPreviewRecording',
702        _CAMERA_ID_STR: self._camera_id,
703        'videoSize': video_size,
704        'recordingDuration': duration,
705        'stabilize': stabilize
706    }
707    if zoom_ratio:
708      if self.zoom_ratio_within_range(zoom_ratio):
709        cmd['zoomRatio'] = zoom_ratio
710      else:
711        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
712    if ae_target_fps_min and ae_target_fps_max:
713      cmd['aeTargetFpsMin'] = ae_target_fps_min
714      cmd['aeTargetFpsMax'] = ae_target_fps_max
715    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
716    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
717    self.sock.settimeout(timeout)
718
719    data, _ = self.__read_response_from_socket()
720    logging.debug('VideoRecordingObject: %s', str(data))
721    if data[_TAG_STR] != 'recordingResponse':
722      raise error_util.CameraItsError(
723          f'Invalid response from command{cmd[_CMD_NAME_STR]}')
724    return data[_OBJ_VALUE_STR]
725
726  def get_supported_video_qualities(self, camera_id):
727    """Get all supported video qualities for this camera device.
728
729    ie. ['480:4', '1080:6', '2160:8', '720:5', 'CIF:3', 'HIGH:1', 'LOW:0',
730         'QCIF:2', 'QVGA:7']
731
732    Args:
733      camera_id: device id
734    Returns:
735      List of all supported video qualities and corresponding profileIds.
736    """
737    cmd = {}
738    cmd[_CMD_NAME_STR] = 'getSupportedVideoQualities'
739    cmd[_CAMERA_ID_STR] = camera_id
740    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
741    data, _ = self.__read_response_from_socket()
742    if data[_TAG_STR] != 'supportedVideoQualities':
743      raise error_util.CameraItsError('Invalid command response')
744    return data[_STR_VALUE].split(';')[:-1]  # remove the last appended ';'
745
746  def get_supported_preview_sizes(self, camera_id):
747    """Get all supported preview resolutions for this camera device.
748
749    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
750
751    Args:
752      camera_id: int; device id
753    Returns:
754      List of all supported video resolutions in ascending order.
755    """
756    cmd = {
757        _CMD_NAME_STR: 'getSupportedPreviewSizes',
758        _CAMERA_ID_STR: camera_id
759    }
760    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
761    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
762    self.sock.settimeout(timeout)
763    data, _ = self.__read_response_from_socket()
764    if data[_TAG_STR] != 'supportedPreviewSizes':
765      raise error_util.CameraItsError('Invalid command response')
766    if not data[_STR_VALUE]:
767      raise error_util.CameraItsError('No supported preview sizes')
768    return data[_STR_VALUE].split(';')
769
770  def get_supported_extensions(self, camera_id):
771    """Get all supported camera extensions for this camera device.
772
773    ie. [EXTENSION_AUTOMATIC, EXTENSION_BOKEH,
774         EXTENSION_FACE_RETOUCH, EXTENSION_HDR, EXTENSION_NIGHT]
775    where EXTENSION_AUTOMATIC is 0, EXTENSION_BOKEH is 1, etc.
776
777    Args:
778      camera_id: int; device ID
779    Returns:
780      List of all supported extensions (as int) in ascending order.
781    """
782    cmd = {
783        'cmdName': 'getSupportedExtensions',
784        'cameraId': camera_id
785    }
786    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
787    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
788    self.sock.settimeout(timeout)
789    data, _ = self.__read_response_from_socket()
790    if data['tag'] != 'supportedExtensions':
791      raise error_util.CameraItsError('Invalid command response')
792    if not data['strValue']:
793      raise error_util.CameraItsError('No supported extensions')
794    return [int(x) for x in str(data['strValue'][1:-1]).split(', ') if x]
795
796  def get_supported_extension_sizes(self, camera_id, extension, image_format):
797    """Get all supported camera sizes for this camera, extension, and format.
798
799    Sorts in ascending order according to area, i.e.
800    ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
801
802    Args:
803      camera_id: int; device ID
804      extension: int; the integer value of the extension.
805      image_format: int; the integer value of the format.
806    Returns:
807      List of sizes supported for this camera, extension, and format.
808    """
809    cmd = {
810        'cmdName': 'getSupportedExtensionSizes',
811        'cameraId': camera_id,
812        'extension': extension,
813        'format': image_format
814    }
815    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
816    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
817    self.sock.settimeout(timeout)
818    data, _ = self.__read_response_from_socket()
819    if data[_TAG_STR] != 'supportedExtensionSizes':
820      raise error_util.CameraItsError('Invalid command response')
821    if not data[_STR_VALUE]:
822      raise error_util.CameraItsError('No supported extensions')
823    return data[_STR_VALUE].split(';')
824
825  def get_display_size(self):
826    """Get the display size of the screen.
827
828    Returns:
829      The size of the display resolution in pixels.
830    """
831    cmd = {
832        'cmdName': 'getDisplaySize'
833    }
834    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
835    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
836    self.sock.settimeout(timeout)
837    data, _ = self.__read_response_from_socket()
838    if data['tag'] != 'displaySize':
839      raise error_util.CameraItsError('Invalid command response')
840    if not data['strValue']:
841      raise error_util.CameraItsError('No display size')
842    return data['strValue'].split('x')
843
844  def get_max_camcorder_profile_size(self, camera_id):
845    """Get the maximum camcorder profile size for this camera device.
846
847    Args:
848      camera_id: int; device id
849    Returns:
850      The maximum size among all camcorder profiles supported by this camera.
851    """
852    cmd = {
853        'cmdName': 'getMaxCamcorderProfileSize',
854        'cameraId': camera_id
855    }
856    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
857    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
858    self.sock.settimeout(timeout)
859    data, _ = self.__read_response_from_socket()
860    if data['tag'] != 'maxCamcorderProfileSize':
861      raise error_util.CameraItsError('Invalid command response')
862    if not data['strValue']:
863      raise error_util.CameraItsError('No max camcorder profile size')
864    return data['strValue'].split('x')
865
866  def do_simple_capture(self, cmd, out_surface):
867    """Issue single capture request via command and read back image/metadata.
868
869    Args:
870      cmd: Dictionary specifying command name, requests, and output surface.
871      out_surface: Dictionary describing output surface.
872    Returns:
873      An object which contains following fields:
874      * data: the image data as a numpy array of bytes.
875      * width: the width of the captured image.
876      * height: the height of the captured image.
877      * format: image format
878      * metadata: the capture result object
879    """
880    fmt = out_surface['format'] if 'format' in out_surface else 'yuv'
881    if fmt == 'jpg': fmt = 'jpeg'
882
883    # we only have 1 capture request and 1 surface by definition.
884    ncap = SINGLE_CAPTURE_NCAP
885
886    cam_id = None
887    bufs = {}
888    yuv_bufs = {}
889    if self._hidden_physical_id:
890      out_surface['physicalCamera'] = self._hidden_physical_id
891
892    if 'physicalCamera' in out_surface:
893      cam_id = out_surface['physicalCamera']
894    else:
895      cam_id = self._camera_id
896
897    bufs[cam_id] = {
898        'raw': [],
899        'raw10': [],
900        'raw12': [],
901        'rawStats': [],
902        'dng': [],
903        'jpeg': [],
904        'y8': []
905    }
906
907    # Only allow yuv output to multiple targets
908    yuv_surface = None
909    if cam_id == self._camera_id:
910      if 'physicalCamera' not in out_surface:
911        if out_surface['format'] == 'yuv':
912          yuv_surface = out_surface
913    else:
914      if ('physicalCamera' in out_surface and
915          out_surface['physicalCamera'] == cam_id):
916        if out_surface['format'] == 'yuv':
917          yuv_surface = out_surface
918
919    # Compute the buffer size of YUV targets
920    yuv_maxsize_1d = 0
921    if yuv_surface is not None:
922      if ('width' not in yuv_surface and 'height' not in yuv_surface):
923        if self.props is None:
924          raise error_util.CameraItsError('Camera props are unavailable')
925        yuv_maxsize_2d = capture_request_utils.get_available_output_sizes(
926            'yuv', self.props)[0]
927        # YUV420 size = 1.5 bytes per pixel
928        yuv_maxsize_1d = (yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3) // 2
929      if 'width' in yuv_surface and 'height' in yuv_surface:
930        yuv_size = (yuv_surface['width'] * yuv_surface['height'] * 3) // 2
931      else:
932        yuv_size = yuv_maxsize_1d
933
934      yuv_bufs[cam_id] = {yuv_size: []}
935
936    cam_ids = self._camera_id
937    self.sock.settimeout(self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT)
938    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
939
940    nbufs = 0
941    md = None
942    physical_md = None
943    width = None
944    height = None
945    capture_results_returned = False
946    while (nbufs < ncap) or (not capture_results_returned):
947      json_obj, buf = self.__read_response_from_socket()
948      if (json_obj[_TAG_STR] in ItsSession.IMAGE_FORMAT_LIST_1 and
949          buf is not None):
950        fmt = json_obj[_TAG_STR][:-5]
951        bufs[self._camera_id][fmt].append(buf)
952        nbufs += 1
953      elif json_obj[_TAG_STR] == 'yuvImage':
954        buf_size = numpy.product(buf.shape)
955        yuv_bufs[self._camera_id][buf_size].append(buf)
956        nbufs += 1
957      elif json_obj[_TAG_STR] == 'captureResults':
958        capture_results_returned = True
959        md = json_obj[_OBJ_VALUE_STR]['captureResult']
960        physical_md = json_obj[_OBJ_VALUE_STR]['physicalResults']
961        outputs = json_obj[_OBJ_VALUE_STR]['outputs']
962        returned_fmt = outputs[0]['format']
963        if fmt != returned_fmt:
964          raise AssertionError(
965              f'Incorrect format. Requested: {fmt}, '
966              f'Received: {returned_fmt}')
967        width = outputs[0]['width']
968        height = outputs[0]['height']
969        requested_width = out_surface['width']
970        requested_height = out_surface['height']
971        if requested_width != width or requested_height != height:
972          raise AssertionError(
973              'Incorrect size. '
974              f'Requested: {requested_width}x{requested_height}, '
975              f'Received: {width}x{height}')
976      else:
977        tag_string = unicodedata.normalize('NFKD', json_obj[_TAG_STR]).encode(
978            'ascii', 'ignore')
979        for x in ItsSession.IMAGE_FORMAT_LIST_2:
980          x = bytes(x, encoding='utf-8')
981          if tag_string.startswith(x):
982            if x == b'yuvImage':
983              physical_id = json_obj[_TAG_STR][len(x):]
984              if physical_id in cam_ids:
985                buf_size = numpy.product(buf.shape)
986                yuv_bufs[physical_id][buf_size].append(buf)
987                nbufs += 1
988            else:
989              physical_id = json_obj[_TAG_STR][len(x):]
990              if physical_id in cam_ids:
991                fmt = x[:-5].decode('UTF-8')
992                bufs[physical_id][fmt].append(buf)
993                nbufs += 1
994
995    if 'physicalCamera' in out_surface:
996      cam_id = out_surface['physicalCamera']
997    else:
998      cam_id = self._camera_id
999    ret = {'width': width, 'height': height, 'format': fmt}
1000    if cam_id == self._camera_id:
1001      ret['metadata'] = md
1002    else:
1003      if cam_id in physical_md:
1004        ret['metadata'] = physical_md[cam_id]
1005
1006    if fmt == 'yuv':
1007      buf_size = (width * height * 3) // 2
1008      ret['data'] = yuv_bufs[cam_id][buf_size][0]
1009    else:
1010      ret['data'] = bufs[cam_id][fmt][0]
1011
1012    return ret
1013
1014  def do_capture_with_flash(self,
1015                            preview_request_start,
1016                            preview_request_idle,
1017                            still_capture_req,
1018                            out_surface):
1019    """Issue capture request with flash and read back the image and metadata.
1020
1021    Captures a single image with still_capture_req as capture request
1022    with flash. It triggers the precapture sequence with preview request
1023    preview_request_start with capture intent preview by setting aePrecapture
1024    trigger to Start. This is followed by repeated preview requests
1025    preview_request_idle with aePrecaptureTrigger set to IDLE.
1026    Once the AE is converged, a single image is captured still_capture_req
1027    during which the flash must be fired.
1028    Note: The part where we read output data from socket is cloned from
1029    do_capture and will be consolidated in U.
1030
1031    Args:
1032      preview_request_start: Preview request with aePrecaptureTrigger set to
1033        Start
1034      preview_request_idle: Preview request with aePrecaptureTrigger set to Idle
1035      still_capture_req: Single still capture request.
1036      out_surface: Specifications of the output image formats and
1037        sizes to use for capture. Supports yuv and jpeg.
1038    Returns:
1039      An object which contains following fields:
1040      * data: the image data as a numpy array of bytes.
1041      * width: the width of the captured image.
1042      * height: the height of the captured image.
1043      * format: image format
1044      * metadata: the capture result object
1045    """
1046    cmd = {}
1047    cmd[_CMD_NAME_STR] = 'doCaptureWithFlash'
1048    cmd['previewRequestStart'] = [preview_request_start]
1049    cmd['previewRequestIdle'] = [preview_request_idle]
1050    cmd['stillCaptureRequest'] = [still_capture_req]
1051    cmd['outputSurfaces'] = [out_surface]
1052
1053    logging.debug('Capturing image with ON_AUTO_FLASH.')
1054    return self.do_simple_capture(cmd, out_surface)
1055
1056  def do_capture_with_extensions(self,
1057                                 cap_request,
1058                                 extension,
1059                                 out_surface):
1060    """Issue extension capture request(s), and read back image(s) and metadata.
1061
1062    Args:
1063      cap_request: The Python dict/list specifying the capture(s), which will be
1064        converted to JSON and sent to the device.
1065      extension: The extension to be requested.
1066      out_surface: specifications of the output image format and
1067        size to use for the capture.
1068
1069    Returns:
1070      An object, list of objects, or list of lists of objects, where each
1071      object contains the following fields:
1072      * data: the image data as a numpy array of bytes.
1073      * width: the width of the captured image.
1074      * height: the height of the captured image.
1075      * format: image the format, in [
1076                        "yuv","jpeg","raw","raw10","raw12","rawStats","dng"].
1077      * metadata: the capture result object (Python dictionary).
1078    """
1079    cmd = {}
1080    cmd[_CMD_NAME_STR] = 'doCaptureWithExtensions'
1081    cmd['repeatRequests'] = []
1082    cmd['captureRequests'] = [cap_request]
1083    cmd['extension'] = extension
1084    cmd['outputSurfaces'] = [out_surface]
1085
1086    logging.debug('Capturing image with EXTENSIONS.')
1087    return self.do_simple_capture(cmd, out_surface)
1088
1089  def do_capture(self,
1090                 cap_request,
1091                 out_surfaces=None,
1092                 reprocess_format=None,
1093                 repeat_request=None):
1094    """Issue capture request(s), and read back the image(s) and metadata.
1095
1096    The main top-level function for capturing one or more images using the
1097    device. Captures a single image if cap_request is a single object, and
1098    captures a burst if it is a list of objects.
1099
1100    The optional repeat_request field can be used to assign a repeating
1101    request list ran in background for 3 seconds to warm up the capturing
1102    pipeline before start capturing. The repeat_requests will be ran on a
1103    640x480 YUV surface without sending any data back. The caller needs to
1104    make sure the stream configuration defined by out_surfaces and
1105    repeat_request are valid or do_capture may fail because device does not
1106    support such stream configuration.
1107
1108    The out_surfaces field can specify the width(s), height(s), and
1109    format(s) of the captured image. The formats may be "yuv", "jpeg",
1110    "dng", "raw", "raw10", "raw12", "rawStats" or "y8". The default is a
1111    YUV420 frame ("yuv") corresponding to a full sensor frame.
1112
1113    1. Optionally the out_surfaces field can specify physical camera id(s) if
1114    the current camera device is a logical multi-camera. The physical camera
1115    id must refer to a physical camera backing this logical camera device.
1116    2. Optionally The output_surfaces field can also specify the use case(s) if
1117    the current camera device has STREAM_USE_CASE capability.
1118
1119    Note that one or more surfaces can be specified, allowing a capture to
1120    request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
1121    yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the
1122    default is the largest resolution available for the format of that
1123    surface. At most one output surface can be specified for a given format,
1124    and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations.
1125
1126    If reprocess_format is not None, for each request, an intermediate
1127    buffer of the given reprocess_format will be captured from camera and
1128    the intermediate buffer will be reprocessed to the output surfaces. The
1129    following settings will be turned off when capturing the intermediate
1130    buffer and will be applied when reprocessing the intermediate buffer.
1131    1. android.noiseReduction.mode
1132    2. android.edge.mode
1133    3. android.reprocess.effectiveExposureFactor
1134
1135    Supported reprocess format are "yuv" and "private". Supported output
1136    surface formats when reprocessing is enabled are "yuv" and "jpeg".
1137
1138    Example of a single capture request:
1139
1140    {
1141     "android.sensor.exposureTime": 100*1000*1000,
1142     "android.sensor.sensitivity": 100
1143    }
1144
1145    Example of a list of capture requests:
1146    [
1147     {
1148       "android.sensor.exposureTime": 100*1000*1000,
1149       "android.sensor.sensitivity": 100
1150     },
1151    {
1152      "android.sensor.exposureTime": 100*1000*1000,
1153       "android.sensor.sensitivity": 200
1154     }
1155    ]
1156
1157    Example of output surface specifications:
1158    {
1159     "width": 640,
1160     "height": 480,
1161     "format": "yuv"
1162    }
1163    [
1164     {
1165       "format": "jpeg"
1166     },
1167     {
1168       "format": "raw"
1169     }
1170    ]
1171
1172    The following variables defined in this class are shortcuts for
1173    specifying one or more formats where each output is the full size for
1174    that format; they can be used as values for the out_surfaces arguments:
1175
1176    CAP_RAW
1177    CAP_DNG
1178    CAP_YUV
1179    CAP_JPEG
1180    CAP_RAW_YUV
1181    CAP_DNG_YUV
1182    CAP_RAW_JPEG
1183    CAP_DNG_JPEG
1184    CAP_YUV_JPEG
1185    CAP_RAW_YUV_JPEG
1186    CAP_DNG_YUV_JPEG
1187
1188    If multiple formats are specified, then this function returns multiple
1189    capture objects, one for each requested format. If multiple formats and
1190    multiple captures (i.e. a burst) are specified, then this function
1191    returns multiple lists of capture objects. In both cases, the order of
1192    the returned objects matches the order of the requested formats in the
1193    out_surfaces parameter. For example:
1194
1195    yuv_cap = do_capture(req1)
1196    yuv_cap = do_capture(req1,yuv_fmt)
1197    yuv_cap, raw_cap = do_capture(req1, [yuv_fmt,raw_fmt])
1198    yuv_caps = do_capture([req1,req2], yuv_fmt)
1199    yuv_caps, raw_caps = do_capture([req1,req2], [yuv_fmt,raw_fmt])
1200
1201    The "rawStats" format processes the raw image and returns a new image
1202    of statistics from the raw image. The format takes additional keys,
1203    "gridWidth" and "gridHeight" which are size of grid cells in a 2D grid
1204    of the raw image. For each grid cell, the mean and variance of each raw
1205    channel is computed, and the do_capture call returns two 4-element float
1206    images of dimensions (rawWidth / gridWidth, rawHeight / gridHeight),
1207    concatenated back-to-back, where the first image contains the 4-channel
1208    means and the second contains the 4-channel variances. Note that only
1209    pixels in the active array crop region are used; pixels outside this
1210    region (for example optical black rows) are cropped out before the
1211    gridding and statistics computation is performed.
1212
1213    For the rawStats format, if the gridWidth is not provided then the raw
1214    image width is used as the default, and similarly for gridHeight. With
1215    this, the following is an example of a output description that computes
1216    the mean and variance across each image row:
1217    {
1218      "gridHeight": 1,
1219      "format": "rawStats"
1220    }
1221
1222    Args:
1223      cap_request: The Python dict/list specifying the capture(s), which will be
1224        converted to JSON and sent to the device.
1225      out_surfaces: (Optional) specifications of the output image formats and
1226        sizes to use for each capture.
1227      reprocess_format: (Optional) The reprocessing format. If not
1228        None,reprocessing will be enabled.
1229      repeat_request: Repeating request list.
1230
1231    Returns:
1232      An object, list of objects, or list of lists of objects, where each
1233      object contains the following fields:
1234      * data: the image data as a numpy array of bytes.
1235      * width: the width of the captured image.
1236      * height: the height of the captured image.
1237      * format: image the format, in [
1238                        "yuv","jpeg","raw","raw10","raw12","rawStats","dng"].
1239      * metadata: the capture result object (Python dictionary).
1240    """
1241    cmd = {}
1242    if reprocess_format is not None:
1243      if repeat_request is not None:
1244        raise error_util.CameraItsError(
1245            'repeating request + reprocessing is not supported')
1246      cmd[_CMD_NAME_STR] = 'doReprocessCapture'
1247      cmd['reprocessFormat'] = reprocess_format
1248    else:
1249      cmd[_CMD_NAME_STR] = 'doCapture'
1250
1251    if repeat_request is None:
1252      cmd['repeatRequests'] = []
1253    elif not isinstance(repeat_request, list):
1254      cmd['repeatRequests'] = [repeat_request]
1255    else:
1256      cmd['repeatRequests'] = repeat_request
1257
1258    if not isinstance(cap_request, list):
1259      cmd['captureRequests'] = [cap_request]
1260    else:
1261      cmd['captureRequests'] = cap_request
1262
1263    if out_surfaces is not None:
1264      if not isinstance(out_surfaces, list):
1265        cmd['outputSurfaces'] = [out_surfaces]
1266      else:
1267        cmd['outputSurfaces'] = out_surfaces
1268      formats = [
1269          c['format'] if 'format' in c else 'yuv' for c in cmd['outputSurfaces']
1270      ]
1271      formats = [s if s != 'jpg' else 'jpeg' for s in formats]
1272    else:
1273      max_yuv_size = capture_request_utils.get_available_output_sizes(
1274          'yuv', self.props)[0]
1275      formats = ['yuv']
1276      cmd['outputSurfaces'] = [{
1277          'format': 'yuv',
1278          'width': max_yuv_size[0],
1279          'height': max_yuv_size[1]
1280      }]
1281
1282    ncap = len(cmd['captureRequests'])
1283    nsurf = 1 if out_surfaces is None else len(cmd['outputSurfaces'])
1284
1285    cam_ids = []
1286    bufs = {}
1287    yuv_bufs = {}
1288    for i, s in enumerate(cmd['outputSurfaces']):
1289      if self._hidden_physical_id:
1290        s['physicalCamera'] = self._hidden_physical_id
1291
1292      if 'physicalCamera' in s:
1293        cam_id = s['physicalCamera']
1294      else:
1295        cam_id = self._camera_id
1296
1297      if cam_id not in cam_ids:
1298        cam_ids.append(cam_id)
1299        bufs[cam_id] = {
1300            'raw': [],
1301            'raw10': [],
1302            'raw12': [],
1303            'rawStats': [],
1304            'dng': [],
1305            'jpeg': [],
1306            'jpeg_r': [],
1307            'y8': []
1308        }
1309
1310    for cam_id in cam_ids:
1311       # Only allow yuv output to multiple targets
1312      if cam_id == self._camera_id:
1313        yuv_surfaces = [
1314            s for s in cmd['outputSurfaces']
1315            if s['format'] == 'yuv' and 'physicalCamera' not in s
1316        ]
1317        formats_for_id = [
1318            s['format']
1319            for s in cmd['outputSurfaces']
1320            if 'physicalCamera' not in s
1321        ]
1322      else:
1323        yuv_surfaces = [
1324            s for s in cmd['outputSurfaces'] if s['format'] == 'yuv' and
1325            'physicalCamera' in s and s['physicalCamera'] == cam_id
1326        ]
1327        formats_for_id = [
1328            s['format']
1329            for s in cmd['outputSurfaces']
1330            if 'physicalCamera' in s and s['physicalCamera'] == cam_id
1331        ]
1332
1333      n_yuv = len(yuv_surfaces)
1334      # Compute the buffer size of YUV targets
1335      yuv_maxsize_1d = 0
1336      for s in yuv_surfaces:
1337        if ('width' not in s and 'height' not in s):
1338          if self.props is None:
1339            raise error_util.CameraItsError('Camera props are unavailable')
1340          yuv_maxsize_2d = capture_request_utils.get_available_output_sizes(
1341              'yuv', self.props)[0]
1342          # YUV420 size = 1.5 bytes per pixel
1343          yuv_maxsize_1d = (yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3) // 2
1344          break
1345      yuv_sizes = [
1346          (c['width'] * c['height'] * 3) // 2
1347          if 'width' in c and 'height' in c else yuv_maxsize_1d
1348          for c in yuv_surfaces
1349      ]
1350      # Currently we don't pass enough metadata from ItsService to distinguish
1351      # different yuv stream of same buffer size
1352      if len(yuv_sizes) != len(set(yuv_sizes)):
1353        raise error_util.CameraItsError(
1354            'ITS does not support yuv outputs of same buffer size')
1355      if len(formats_for_id) > len(set(formats_for_id)):
1356        if n_yuv != len(formats_for_id) - len(set(formats_for_id)) + 1:
1357          raise error_util.CameraItsError('Duplicate format requested')
1358
1359      yuv_bufs[cam_id] = {size: [] for size in yuv_sizes}
1360
1361    raw_formats = 0
1362    raw_formats += 1 if 'dng' in formats else 0
1363    raw_formats += 1 if 'raw' in formats else 0
1364    raw_formats += 1 if 'raw10' in formats else 0
1365    raw_formats += 1 if 'raw12' in formats else 0
1366    raw_formats += 1 if 'rawStats' in formats else 0
1367    if raw_formats > 1:
1368      raise error_util.CameraItsError('Different raw formats not supported')
1369
1370    # Detect long exposure time and set timeout accordingly
1371    longest_exp_time = 0
1372    for req in cmd['captureRequests']:
1373      if 'android.sensor.exposureTime' in req and req[
1374          'android.sensor.exposureTime'] > longest_exp_time:
1375        longest_exp_time = req['android.sensor.exposureTime']
1376
1377    extended_timeout = longest_exp_time // self.SEC_TO_NSEC + self.SOCK_TIMEOUT
1378    if repeat_request:
1379      extended_timeout += self.EXTRA_SOCK_TIMEOUT
1380    self.sock.settimeout(extended_timeout)
1381
1382    logging.debug('Capturing %d frame%s with %d format%s [%s]', ncap,
1383                  's' if ncap > 1 else '', nsurf, 's' if nsurf > 1 else '',
1384                  ','.join(formats))
1385    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1386
1387    # Wait for ncap*nsurf images and ncap metadata responses.
1388    # Assume that captures come out in the same order as requested in
1389    # the burst, however individual images of different formats can come
1390    # out in any order for that capture.
1391    nbufs = 0
1392    mds = []
1393    physical_mds = []
1394    widths = None
1395    heights = None
1396    while nbufs < ncap * nsurf or len(mds) < ncap:
1397      json_obj, buf = self.__read_response_from_socket()
1398      if (json_obj[_TAG_STR] in ItsSession.IMAGE_FORMAT_LIST_1 and
1399          buf is not None):
1400        fmt = json_obj[_TAG_STR][:-5]
1401        bufs[self._camera_id][fmt].append(buf)
1402        nbufs += 1
1403      # Physical camera is appended to the tag string of a private capture
1404      elif json_obj[_TAG_STR].startswith('privImage'):
1405        # The private image format buffers are opaque to camera clients
1406        # and cannot be accessed.
1407        nbufs += 1
1408      elif json_obj[_TAG_STR] == 'yuvImage':
1409        buf_size = numpy.product(buf.shape)
1410        yuv_bufs[self._camera_id][buf_size].append(buf)
1411        nbufs += 1
1412      elif json_obj[_TAG_STR] == 'captureResults':
1413        mds.append(json_obj[_OBJ_VALUE_STR]['captureResult'])
1414        physical_mds.append(json_obj[_OBJ_VALUE_STR]['physicalResults'])
1415        outputs = json_obj[_OBJ_VALUE_STR]['outputs']
1416        widths = [out['width'] for out in outputs]
1417        heights = [out['height'] for out in outputs]
1418      else:
1419        tag_string = unicodedata.normalize('NFKD', json_obj[_TAG_STR]).encode(
1420            'ascii', 'ignore')
1421        for x in ItsSession.IMAGE_FORMAT_LIST_2:
1422          x = bytes(x, encoding='utf-8')
1423          if tag_string.startswith(x):
1424            if x == b'yuvImage':
1425              physical_id = json_obj[_TAG_STR][len(x):]
1426              if physical_id in cam_ids:
1427                buf_size = numpy.product(buf.shape)
1428                yuv_bufs[physical_id][buf_size].append(buf)
1429                nbufs += 1
1430            else:
1431              physical_id = json_obj[_TAG_STR][len(x):]
1432              if physical_id in cam_ids:
1433                fmt = x[:-5].decode('UTF-8')
1434                bufs[physical_id][fmt].append(buf)
1435                nbufs += 1
1436    rets = []
1437    for j, fmt in enumerate(formats):
1438      objs = []
1439      if 'physicalCamera' in cmd['outputSurfaces'][j]:
1440        cam_id = cmd['outputSurfaces'][j]['physicalCamera']
1441      else:
1442        cam_id = self._camera_id
1443
1444      for i in range(ncap):
1445        obj = {}
1446        obj['width'] = widths[j]
1447        obj['height'] = heights[j]
1448        obj['format'] = fmt
1449        if cam_id == self._camera_id:
1450          obj['metadata'] = mds[i]
1451        else:
1452          for physical_md in physical_mds[i]:
1453            if cam_id in physical_md:
1454              obj['metadata'] = physical_md[cam_id]
1455              break
1456
1457        if fmt == 'yuv':
1458          buf_size = (widths[j] * heights[j] * 3) // 2
1459          obj['data'] = yuv_bufs[cam_id][buf_size][i]
1460        elif fmt != 'priv':
1461          obj['data'] = bufs[cam_id][fmt][i]
1462        objs.append(obj)
1463      rets.append(objs if ncap > 1 else objs[0])
1464    self.sock.settimeout(self.SOCK_TIMEOUT)
1465    if len(rets) > 1 or (isinstance(rets[0], dict) and
1466                         isinstance(cap_request, list)):
1467      return rets
1468    else:
1469      return rets[0]
1470
1471  def do_vibrate(self, pattern):
1472    """Cause the device to vibrate to a specific pattern.
1473
1474    Args:
1475      pattern: Durations (ms) for which to turn on or off the vibrator.
1476      The first value indicates the number of milliseconds to wait
1477      before turning the vibrator on. The next value indicates the
1478      number of milliseconds for which to keep the vibrator on
1479      before turning it off. Subsequent values alternate between
1480      durations in milliseconds to turn the vibrator off or to turn
1481      the vibrator on.
1482
1483    Returns:
1484      Nothing.
1485    """
1486    cmd = {}
1487    cmd[_CMD_NAME_STR] = 'doVibrate'
1488    cmd['pattern'] = pattern
1489    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1490    data, _ = self.__read_response_from_socket()
1491    if data[_TAG_STR] != 'vibrationStarted':
1492      raise error_util.CameraItsError('Invalid response for command: %s' %
1493                                      cmd[_CMD_NAME_STR])
1494
1495  def set_audio_restriction(self, mode):
1496    """Set the audio restriction mode for this camera device.
1497
1498    Args:
1499     mode: int; the audio restriction mode. See CameraDevice.java for valid
1500     value.
1501    Returns:
1502     Nothing.
1503    """
1504    cmd = {}
1505    cmd[_CMD_NAME_STR] = 'setAudioRestriction'
1506    cmd['mode'] = mode
1507    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1508    data, _ = self.__read_response_from_socket()
1509    if data[_TAG_STR] != 'audioRestrictionSet':
1510      raise error_util.CameraItsError('Invalid response for command: %s' %
1511                                      cmd[_CMD_NAME_STR])
1512
1513  # pylint: disable=dangerous-default-value
1514  def do_3a(self,
1515            regions_ae=[[0, 0, 1, 1, 1]],
1516            regions_awb=[[0, 0, 1, 1, 1]],
1517            regions_af=[[0, 0, 1, 1, 1]],
1518            do_ae=True,
1519            do_awb=True,
1520            do_af=True,
1521            lock_ae=False,
1522            lock_awb=False,
1523            get_results=False,
1524            ev_comp=0,
1525            auto_flash=False,
1526            mono_camera=False,
1527            zoom_ratio=None):
1528    """Perform a 3A operation on the device.
1529
1530    Triggers some or all of AE, AWB, and AF, and returns once they have
1531    converged. Uses the vendor 3A that is implemented inside the HAL.
1532    Note: do_awb is always enabled regardless of do_awb flag
1533
1534    Throws an assertion if 3A fails to converge.
1535
1536    Args:
1537      regions_ae: List of weighted AE regions.
1538      regions_awb: List of weighted AWB regions.
1539      regions_af: List of weighted AF regions.
1540      do_ae: Trigger AE and wait for it to converge.
1541      do_awb: Wait for AWB to converge.
1542      do_af: Trigger AF and wait for it to converge.
1543      lock_ae: Request AE lock after convergence, and wait for it.
1544      lock_awb: Request AWB lock after convergence, and wait for it.
1545      get_results: Return the 3A results from this function.
1546      ev_comp: An EV compensation value to use when running AE.
1547      auto_flash: AE control boolean to enable auto flash.
1548      mono_camera: Boolean for monochrome camera.
1549      zoom_ratio: Zoom ratio. None if default zoom
1550
1551      Region format in args:
1552         Arguments are lists of weighted regions; each weighted region is a
1553         list of 5 values, [x, y, w, h, wgt], and each argument is a list of
1554         these 5-value lists. The coordinates are given as normalized
1555         rectangles (x, y, w, h) specifying the region. For example:
1556         [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]].
1557         Weights are non-negative integers.
1558
1559    Returns:
1560      Five values are returned if get_results is true:
1561      * AE sensitivity; None if do_ae is False
1562      * AE exposure time; None if do_ae is False
1563      * AWB gains (list);
1564      * AWB transform (list);
1565      * AF focus position; None if do_af is false
1566      Otherwise, it returns five None values.
1567    """
1568    logging.debug('Running vendor 3A on device')
1569    cmd = {}
1570    cmd[_CMD_NAME_STR] = 'do3A'
1571    cmd['regions'] = {
1572        'ae': sum(regions_ae, []),
1573        'awb': sum(regions_awb, []),
1574        'af': sum(regions_af, [])
1575    }
1576    cmd['triggers'] = {'ae': do_ae, 'af': do_af}
1577    if lock_ae:
1578      cmd['aeLock'] = True
1579    if lock_awb:
1580      cmd['awbLock'] = True
1581    if ev_comp != 0:
1582      cmd['evComp'] = ev_comp
1583    if auto_flash:
1584      cmd['autoFlash'] = True
1585    if self._hidden_physical_id:
1586      cmd['physicalId'] = self._hidden_physical_id
1587    if zoom_ratio:
1588      if self.zoom_ratio_within_range(zoom_ratio):
1589        cmd['zoomRatio'] = zoom_ratio
1590      else:
1591        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
1592    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1593
1594    # Wait for each specified 3A to converge.
1595    ae_sens = None
1596    ae_exp = None
1597    awb_gains = None
1598    awb_transform = None
1599    af_dist = None
1600    converged = False
1601    while True:
1602      data, _ = self.__read_response_from_socket()
1603      vals = data[_STR_VALUE].split()
1604      if data[_TAG_STR] == 'aeResult':
1605        if do_ae:
1606          ae_sens, ae_exp = [int(i) for i in vals]
1607      elif data[_TAG_STR] == 'afResult':
1608        if do_af:
1609          af_dist = float(vals[0])
1610      elif data[_TAG_STR] == 'awbResult':
1611        awb_gains = [float(f) for f in vals[:4]]
1612        awb_transform = [float(f) for f in vals[4:]]
1613      elif data[_TAG_STR] == '3aConverged':
1614        converged = True
1615      elif data[_TAG_STR] == '3aDone':
1616        break
1617      else:
1618        raise error_util.CameraItsError('Invalid command response')
1619    if converged and not get_results:
1620      return None, None, None, None, None
1621    if (do_ae and ae_sens is None or
1622        (not mono_camera and do_awb and awb_gains is None) or
1623        do_af and af_dist is None or not converged):
1624      raise error_util.CameraItsError('3A failed to converge')
1625    return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
1626
1627  def do_autoframing(self, zoom_ratio=None):
1628    """Perform autoframing on the device.
1629
1630    Args:
1631      zoom_ratio: Zoom ratio. None if default zoom.
1632    """
1633    cmd = {}
1634    cmd[_CMD_NAME_STR] = 'doAutoframing'
1635    if zoom_ratio:
1636      if self.zoom_ratio_within_range(zoom_ratio):
1637        cmd['zoomRatio'] = zoom_ratio
1638      else:
1639        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
1640    converged = False
1641    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1642
1643    while True:
1644      data, _ = self.__read_response_from_socket()
1645      if data[_TAG_STR] == 'autoframingConverged':
1646        converged = True
1647      elif data[_TAG_STR] == 'autoframingDone':
1648        break
1649      else:
1650        raise error_util.CameraItsError('Invalid command response')
1651
1652    if not converged:
1653      raise error_util.CameraItsError('Autoframing failed to converge')
1654
1655  def calc_camera_fov(self, props):
1656    """Determine the camera field of view from internal params.
1657
1658    Args:
1659      props: Camera properties object.
1660
1661    Returns:
1662      camera_fov: string; field of view for camera.
1663    """
1664
1665    focal_ls = props['android.lens.info.availableFocalLengths']
1666    if len(focal_ls) > 1:
1667      logging.debug('Doing capture to determine logical camera focal length')
1668      cap = self.do_capture(capture_request_utils.auto_capture_request())
1669      focal_l = cap['metadata']['android.lens.focalLength']
1670    else:
1671      focal_l = focal_ls[0]
1672
1673    sensor_size = props['android.sensor.info.physicalSize']
1674    diag = math.sqrt(sensor_size['height']**2 + sensor_size['width']**2)
1675    try:
1676      fov = str(round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2))
1677    except ValueError:
1678      fov = str(0)
1679    logging.debug('Calculated FoV: %s', fov)
1680    return fov
1681
1682  def get_file_name_to_load(self, chart_distance, camera_fov, scene):
1683    """Get the image to load on the tablet depending on fov and chart_distance.
1684
1685    Args:
1686     chart_distance: float; distance in cm from camera of displayed chart
1687     camera_fov: float; camera field of view.
1688     scene: String; Scene to be used in the test.
1689
1690    Returns:
1691     file_name: file name to display on the tablet.
1692
1693    """
1694    chart_scaling = opencv_processing_utils.calc_chart_scaling(
1695        chart_distance, camera_fov)
1696    if math.isclose(
1697        chart_scaling,
1698        opencv_processing_utils.SCALE_RFOV_IN_WFOV_BOX,
1699        abs_tol=SCALING_TO_FILE_ATOL):
1700      file_name = f'{scene}_{opencv_processing_utils.SCALE_RFOV_IN_WFOV_BOX}x_scaled.png'
1701    elif math.isclose(
1702        chart_scaling,
1703        opencv_processing_utils.SCALE_TELE_IN_WFOV_BOX,
1704        abs_tol=SCALING_TO_FILE_ATOL):
1705      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE_IN_WFOV_BOX}x_scaled.png'
1706    elif math.isclose(
1707        chart_scaling,
1708        opencv_processing_utils.SCALE_TELE25_IN_RFOV_BOX,
1709        abs_tol=SCALING_TO_FILE_ATOL):
1710      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE25_IN_RFOV_BOX}x_scaled.png'
1711    elif math.isclose(
1712        chart_scaling,
1713        opencv_processing_utils.SCALE_TELE40_IN_RFOV_BOX,
1714        abs_tol=SCALING_TO_FILE_ATOL):
1715      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE40_IN_RFOV_BOX}x_scaled.png'
1716    elif math.isclose(
1717        chart_scaling,
1718        opencv_processing_utils.SCALE_TELE_IN_RFOV_BOX,
1719        abs_tol=SCALING_TO_FILE_ATOL):
1720      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE_IN_RFOV_BOX}x_scaled.png'
1721    else:
1722      file_name = f'{scene}.png'
1723    logging.debug('Scene to load: %s', file_name)
1724    return file_name
1725
1726  def is_stream_combination_supported(self, out_surfaces):
1727    """Query whether out_surfaces combination is supported by the camera device.
1728
1729    This function hooks up to the isSessionConfigurationSupported() camera API
1730    to query whether a particular stream combination is supported.
1731
1732    Args:
1733      out_surfaces: dict; see do_capture() for specifications on out_surfaces
1734
1735    Returns:
1736      Boolean
1737    """
1738    cmd = {}
1739    cmd[_CMD_NAME_STR] = 'isStreamCombinationSupported'
1740
1741    if not isinstance(out_surfaces, list):
1742      cmd['outputSurfaces'] = [out_surfaces]
1743    else:
1744      cmd['outputSurfaces'] = out_surfaces
1745    formats = [c['format'] if 'format' in c else 'yuv'
1746               for c in cmd['outputSurfaces']]
1747    formats = [s if s != 'jpg' else 'jpeg' for s in formats]
1748
1749    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1750
1751    data, _ = self.__read_response_from_socket()
1752    if data[_TAG_STR] != 'streamCombinationSupport':
1753      raise error_util.CameraItsError('Failed to query stream combination')
1754
1755    return data[_STR_VALUE] == 'supportedCombination'
1756
1757  def is_camera_privacy_mode_supported(self):
1758    """Query whether the mobile device supports camera privacy mode.
1759
1760    This function checks whether the mobile device has FEATURE_CAMERA_TOGGLE
1761    feature support, which indicates the camera device can run in privacy mode.
1762
1763    Returns:
1764      Boolean
1765    """
1766    cmd = {}
1767    cmd[_CMD_NAME_STR] = 'isCameraPrivacyModeSupported'
1768    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1769
1770    data, _ = self.__read_response_from_socket()
1771    if data[_TAG_STR] != 'cameraPrivacyModeSupport':
1772      raise error_util.CameraItsError('Failed to query camera privacy mode'
1773                                      ' support')
1774    return data[_STR_VALUE] == 'true'
1775
1776  def is_primary_camera(self):
1777    """Query whether the camera device is a primary rear/front camera.
1778
1779    A primary rear/front facing camera is a camera device with the lowest
1780    camera Id for that facing.
1781
1782    Returns:
1783      Boolean
1784    """
1785    cmd = {}
1786    cmd[_CMD_NAME_STR] = 'isPrimaryCamera'
1787    cmd[_CAMERA_ID_STR] = self._camera_id
1788    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1789
1790    data, _ = self.__read_response_from_socket()
1791    if data[_TAG_STR] != 'primaryCamera':
1792      raise error_util.CameraItsError('Failed to query primary camera')
1793    return data[_STR_VALUE] == 'true'
1794
1795  def is_performance_class(self):
1796    """Query whether the mobile device is an R or S performance class device.
1797
1798    Returns:
1799      Boolean
1800    """
1801    cmd = {}
1802    cmd[_CMD_NAME_STR] = 'isPerformanceClass'
1803    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1804
1805    data, _ = self.__read_response_from_socket()
1806    if data[_TAG_STR] != 'performanceClass':
1807      raise error_util.CameraItsError('Failed to query performance class')
1808    return data[_STR_VALUE] == 'true'
1809
1810  def measure_camera_launch_ms(self):
1811    """Measure camera launch latency in millisecond, from open to first frame.
1812
1813    Returns:
1814      Camera launch latency from camera open to receipt of first frame
1815    """
1816    cmd = {}
1817    cmd[_CMD_NAME_STR] = 'measureCameraLaunchMs'
1818    cmd[_CAMERA_ID_STR] = self._camera_id
1819    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1820
1821    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
1822    self.sock.settimeout(timeout)
1823    data, _ = self.__read_response_from_socket()
1824    self.sock.settimeout(self.SOCK_TIMEOUT)
1825
1826    if data[_TAG_STR] != 'cameraLaunchMs':
1827      raise error_util.CameraItsError('Failed to measure camera launch latency')
1828    return float(data[_STR_VALUE])
1829
1830  def measure_camera_1080p_jpeg_capture_ms(self):
1831    """Measure camera 1080P jpeg capture latency in milliseconds.
1832
1833    Returns:
1834      Camera jpeg capture latency in milliseconds
1835    """
1836    cmd = {}
1837    cmd[_CMD_NAME_STR] = 'measureCamera1080pJpegCaptureMs'
1838    cmd[_CAMERA_ID_STR] = self._camera_id
1839    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1840
1841    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
1842    self.sock.settimeout(timeout)
1843    data, _ = self.__read_response_from_socket()
1844    self.sock.settimeout(self.SOCK_TIMEOUT)
1845
1846    if data[_TAG_STR] != 'camera1080pJpegCaptureMs':
1847      raise error_util.CameraItsError(
1848          'Failed to measure camera 1080p jpeg capture latency')
1849    return float(data[_STR_VALUE])
1850
1851  def _camera_id_to_props(self):
1852    """Return the properties of each camera ID."""
1853    unparsed_ids = self.get_camera_ids().get('cameraIdArray', [])
1854    parsed_ids = parse_camera_ids(unparsed_ids)
1855    id_to_props = {}
1856    for unparsed_id, id_combo in zip(unparsed_ids, parsed_ids):
1857      if id_combo.sub_id is None:
1858        props = self.get_camera_properties_by_id(id_combo.id)
1859      else:
1860        props = self.get_camera_properties_by_id(id_combo.sub_id)
1861      id_to_props[unparsed_id] = props
1862    if not id_to_props:
1863      raise AssertionError('No camera IDs were found.')
1864    return id_to_props
1865
1866  def has_ultrawide_camera(self, facing):
1867    """Return if device has an ultrawide camera facing the same direction.
1868
1869    Args:
1870      facing: constant describing the direction the camera device lens faces.
1871
1872    Returns:
1873      True if the device has an ultrawide camera facing in that direction.
1874    """
1875    camera_ids = self.get_camera_ids()
1876    primary_rear_camera_id = camera_ids.get('primaryRearCameraId', '')
1877    primary_front_camera_id = camera_ids.get('primaryFrontCameraId', '')
1878    if facing == camera_properties_utils.LENS_FACING_BACK:
1879      primary_camera_id = primary_rear_camera_id
1880    elif facing == camera_properties_utils.LENS_FACING_FRONT:
1881      primary_camera_id = primary_front_camera_id
1882    else:
1883      raise NotImplementedError('Cameras not facing either front or back '
1884                                'are currently unsupported.')
1885    id_to_props = self._camera_id_to_props()
1886    fov_and_facing = collections.namedtuple('FovAndFacing', ['fov', 'facing'])
1887    id_to_fov_facing = {
1888        unparsed_id: fov_and_facing(
1889            self.calc_camera_fov(props), props['android.lens.facing']
1890        )
1891        for unparsed_id, props in id_to_props.items()
1892    }
1893    logging.debug('IDs to (FOVs, facing): %s', id_to_fov_facing)
1894    primary_camera_fov, primary_camera_facing = id_to_fov_facing[
1895        primary_camera_id]
1896    for unparsed_id, fov_facing_combo in id_to_fov_facing.items():
1897      if (float(fov_facing_combo.fov) > float(primary_camera_fov) and
1898          fov_facing_combo.facing == primary_camera_facing and
1899          unparsed_id != primary_camera_id):
1900        logging.debug('Ultrawide camera found with ID %s and FoV %.3f. '
1901                      'Primary camera has ID %s and FoV: %.3f.',
1902                      unparsed_id, float(fov_facing_combo.fov),
1903                      primary_camera_id, float(primary_camera_fov))
1904        return True
1905    return False
1906
1907  def get_facing_to_ids(self):
1908    """Returns mapping from lens facing to list of corresponding camera IDs."""
1909    id_to_props = self._camera_id_to_props()
1910    facing_to_ids = collections.defaultdict(list)
1911    for unparsed_id, props in id_to_props.items():
1912      facing_to_ids[props['android.lens.facing']].append(unparsed_id)
1913    for ids in facing_to_ids.values():
1914      ids.sort()
1915    logging.debug('Facing to camera IDs: %s', facing_to_ids)
1916    return facing_to_ids
1917
1918
1919def parse_camera_ids(ids):
1920  """Parse the string of camera IDs into array of CameraIdCombo tuples.
1921
1922  Args:
1923   ids: List of camera ids.
1924
1925  Returns:
1926   Array of CameraIdCombo
1927  """
1928  camera_id_combo = collections.namedtuple('CameraIdCombo', ['id', 'sub_id'])
1929  id_combos = []
1930  for one_id in ids:
1931    one_combo = one_id.split(SUB_CAMERA_SEPARATOR)
1932    if len(one_combo) == 1:
1933      id_combos.append(camera_id_combo(one_combo[0], None))
1934    elif len(one_combo) == 2:
1935      id_combos.append(camera_id_combo(one_combo[0], one_combo[1]))
1936    else:
1937      raise AssertionError('Camera id parameters must be either ID or '
1938                           f'ID{SUB_CAMERA_SEPARATOR}SUB_ID')
1939  return id_combos
1940
1941
1942def _run(cmd):
1943  """Replacement for os.system, with hiding of stdout+stderr messages.
1944
1945  Args:
1946    cmd: Command to be executed in string format.
1947  """
1948  with open(os.devnull, 'wb') as devnull:
1949    subprocess.check_call(cmd.split(), stdout=devnull, stderr=subprocess.STDOUT)
1950
1951
1952def do_capture_with_latency(cam, req, sync_latency, fmt=None):
1953  """Helper function to take enough frames to allow sync latency.
1954
1955  Args:
1956    cam: camera object
1957    req: request for camera
1958    sync_latency: integer number of frames
1959    fmt: format for the capture
1960  Returns:
1961    single capture with the unsettled frames discarded
1962  """
1963  caps = cam.do_capture([req]*(sync_latency+1), fmt)
1964  return caps[-1]
1965
1966
1967def load_scene(cam, props, scene, tablet, chart_distance, lighting_check=True,
1968               log_path=None):
1969  """Load the scene for the camera based on the FOV.
1970
1971  Args:
1972    cam: camera object
1973    props: camera properties
1974    scene: scene to be loaded
1975    tablet: tablet to load scene on
1976    chart_distance: distance to tablet
1977    lighting_check: Boolean for lighting check enabled
1978    log_path: [Optional] path to store artifacts
1979  """
1980  if not tablet:
1981    logging.info('Manual run: no tablet to load scene on.')
1982    return
1983  # Calculate camera_fov which will determine the image to load on tablet.
1984  camera_fov = cam.calc_camera_fov(props)
1985  file_name = cam.get_file_name_to_load(chart_distance, camera_fov, scene)
1986  if 'scene' not in file_name:
1987    file_name = f'scene{file_name}'
1988  logging.debug('Displaying %s on the tablet', file_name)
1989
1990  # Display the scene on the tablet depending on camera_fov
1991  tablet.adb.shell(
1992      'am start -a android.intent.action.VIEW -t image/png '
1993      f'-d file://mnt/sdcard/Download/{file_name}')
1994  time.sleep(LOAD_SCENE_DELAY_SEC)
1995  rfov_camera_in_rfov_box = (
1996      math.isclose(
1997          chart_distance,
1998          opencv_processing_utils.CHART_DISTANCE_RFOV, rel_tol=0.1) and
1999      opencv_processing_utils.FOV_THRESH_TELE <= float(camera_fov)
2000      <= opencv_processing_utils.FOV_THRESH_WFOV)
2001  wfov_camera_in_wfov_box = (
2002      math.isclose(
2003          chart_distance,
2004          opencv_processing_utils.CHART_DISTANCE_WFOV, rel_tol=0.1) and
2005      float(camera_fov) > opencv_processing_utils.FOV_THRESH_WFOV)
2006  if (rfov_camera_in_rfov_box or wfov_camera_in_wfov_box) and lighting_check:
2007    cam.do_3a()
2008    cap = cam.do_capture(
2009        capture_request_utils.auto_capture_request(), cam.CAP_YUV)
2010    y_plane, _, _ = image_processing_utils.convert_capture_to_planes(cap)
2011    validate_lighting(y_plane, scene, log_path=log_path)
2012
2013
2014def validate_lighting(y_plane, scene, state='ON', log_path=None):
2015  """Validates the lighting level in scene corners based on empirical values.
2016
2017  Args:
2018    y_plane: Y plane of YUV image
2019    scene: scene name
2020    state: string 'ON' or 'OFF'
2021    log_path: [Optional] path to store artifacts
2022
2023  Returns:
2024    boolean True if lighting validated, else raise AssertionError
2025  """
2026  logging.debug('Validating lighting levels.')
2027  file_name = f'validate_lighting_{scene}.jpg'
2028  if log_path:
2029    file_name = os.path.join(log_path, f'validate_lighting_{scene}.jpg')
2030
2031  # Test patches from each corner.
2032  for location, coordinates in _VALIDATE_LIGHTING_REGIONS.items():
2033    patch = image_processing_utils.get_image_patch(
2034        y_plane, coordinates[0], coordinates[1],
2035        _VALIDATE_LIGHTING_PATCH_W, _VALIDATE_LIGHTING_PATCH_H)
2036    y_mean = image_processing_utils.compute_image_means(patch)[0]
2037    logging.debug('%s corner Y mean: %.3f', location, y_mean)
2038    if state == 'ON':
2039      if y_mean > _VALIDATE_LIGHTING_THRESH:
2040        logging.debug('Lights ON in test rig.')
2041        return True
2042      else:
2043        image_processing_utils.write_image(y_plane, file_name)
2044        raise AssertionError('Lights OFF in test rig. Turn ON and retry.')
2045    elif state == 'OFF':
2046      if y_mean < _VALIDATE_LIGHTING_THRESH:
2047        logging.debug('Lights OFF in test rig.')
2048        return True
2049      else:
2050        image_processing_utils.write_image(y_plane, file_name)
2051        raise AssertionError('Lights ON in test rig. Turn OFF and retry.')
2052    else:
2053      raise AssertionError('Invalid lighting state string. '
2054                           "Valid strings: 'ON', 'OFF'.")
2055
2056
2057def get_build_sdk_version(device_id):
2058  """Return the int build version of the device."""
2059  cmd = f'adb -s {device_id} shell getprop ro.build.version.sdk'
2060  try:
2061    build_sdk_version = int(subprocess.check_output(cmd.split()).rstrip())
2062    logging.debug('Build SDK version: %d', build_sdk_version)
2063  except (subprocess.CalledProcessError, ValueError) as exp_errors:
2064    raise AssertionError('No build_sdk_version.') from exp_errors
2065  return build_sdk_version
2066
2067
2068def get_first_api_level(device_id):
2069  """Return the int value for the first API level of the device."""
2070  cmd = f'adb -s {device_id} shell getprop ro.product.first_api_level'
2071  try:
2072    first_api_level = int(subprocess.check_output(cmd.split()).rstrip())
2073    logging.debug('First API level: %d', first_api_level)
2074  except (subprocess.CalledProcessError, ValueError):
2075    logging.error('No first_api_level. Setting to build version.')
2076    first_api_level = get_build_sdk_version(device_id)
2077  return first_api_level
2078
2079
2080def get_vendor_api_level(device_id):
2081  """Return the int value for the vendor API level of the device."""
2082  cmd = f'adb -s {device_id} shell getprop ro.vendor.api_level'
2083  try:
2084    vendor_api_level = int(subprocess.check_output(cmd.split()).rstrip())
2085    logging.debug('First vendor API level: %d', vendor_api_level)
2086  except (subprocess.CalledProcessError, ValueError):
2087    logging.error('No vendor_api_level. Setting to build version.')
2088    vendor_api_level = get_build_sdk_version(device_id)
2089  return vendor_api_level
2090
2091
2092def get_media_performance_class(device_id):
2093  """Return the int value for the media performance class of the device."""
2094  cmd = (f'adb -s {device_id} shell '
2095         'getprop ro.odm.build.media_performance_class')
2096  try:
2097    media_performance_class = int(
2098        subprocess.check_output(cmd.split()).rstrip())
2099    logging.debug('Media performance class: %d', media_performance_class)
2100  except (subprocess.CalledProcessError, ValueError):
2101    logging.debug('No media performance class. Setting to 0.')
2102    media_performance_class = 0
2103  return media_performance_class
2104
2105
2106def raise_mpc_assertion_error(required_mpc, test_name, found_mpc):
2107  raise AssertionError(f'With MPC >= {required_mpc}, {test_name} must be run. '
2108                       f'Found MPC: {found_mpc}')
2109