• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2013 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Utility functions to form an ItsSession and perform various camera actions.
15"""
16
17
18import collections
19import json
20import logging
21import math
22import os
23import socket
24import subprocess
25import sys
26import time
27import unicodedata
28import unittest
29
30import numpy
31
32import camera_properties_utils
33import capture_request_utils
34import error_util
35import image_processing_utils
36import opencv_processing_utils
37
38ANDROID13_API_LEVEL = 33
39LOAD_SCENE_DELAY_SEC = 3
40SUB_CAMERA_SEPARATOR = '.'
41DEFAULT_TABLET_BRIGHTNESS = 192  # 8-bit tablet 75% brightness
42ELEVEN_BIT_TABLET_BRIGHTNESS = 1536
43ELEVEN_BIT_TABLET_NAMES = ('nabu',)
44LEGACY_TABLET_BRIGHTNESS = 96
45LEGACY_TABLET_NAME = 'dragon'
46TABLET_REQUIREMENTS_URL = 'https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-requirements'
47BRIGHTNESS_ERROR_MSG = ('Tablet brightness not set as per '
48                        f'{TABLET_REQUIREMENTS_URL} in the config file')
49
50_VALIDATE_LIGHTING_PATCH_H = 0.05
51_VALIDATE_LIGHTING_PATCH_W = 0.05
52_VALIDATE_LIGHTING_REGIONS = {
53    'top-left': (0, 0),
54    'top-right': (0, 1-_VALIDATE_LIGHTING_PATCH_H),
55    'bottom-left': (1-_VALIDATE_LIGHTING_PATCH_W, 0),
56    'bottom-right': (1-_VALIDATE_LIGHTING_PATCH_W,
57                     1-_VALIDATE_LIGHTING_PATCH_H),
58}
59_VALIDATE_LIGHTING_THRESH = 0.05  # Determined empirically from scene[1:6] tests
60
61
62def validate_tablet_brightness(tablet_name, brightness):
63  """Ensures tablet brightness is set according to documentation.
64
65  https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-requirements
66  Args:
67    tablet_name: tablet product name specified by `ro.build.product`.
68    brightness: brightness specified by config file.
69  """
70  name_to_brightness = {
71      LEGACY_TABLET_NAME: LEGACY_TABLET_BRIGHTNESS,
72  }
73  for name in ELEVEN_BIT_TABLET_NAMES:
74    name_to_brightness[name] = ELEVEN_BIT_TABLET_BRIGHTNESS
75  if tablet_name in name_to_brightness:
76    if brightness != name_to_brightness[tablet_name]:
77      raise AssertionError(BRIGHTNESS_ERROR_MSG)
78  else:
79    if brightness != DEFAULT_TABLET_BRIGHTNESS:
80      raise AssertionError(BRIGHTNESS_ERROR_MSG)
81
82
83class ItsSession(object):
84  """Controls a device over adb to run ITS scripts.
85
86    The script importing this module (on the host machine) prepares JSON
87    objects encoding CaptureRequests, specifying sets of parameters to use
88    when capturing an image using the Camera2 APIs. This class encapsulates
89    sending the requests to the device, monitoring the device's progress, and
90    copying the resultant captures back to the host machine when done. TCP
91    forwarded over adb is the transport mechanism used.
92
93    The device must have CtsVerifier.apk installed.
94
95    Attributes:
96        sock: The open socket.
97  """
98
99  # Open a connection to localhost:<host_port>, forwarded to port 6000 on the
100  # device. <host_port> is determined at run-time to support multiple
101  # connected devices.
102  IPADDR = '127.0.0.1'
103  REMOTE_PORT = 6000
104  BUFFER_SIZE = 4096
105
106  # LOCK_PORT is used as a mutex lock to protect the list of forwarded ports
107  # among all processes. The script assumes LOCK_PORT is available and will
108  # try to use ports between CLIENT_PORT_START and
109  # CLIENT_PORT_START+MAX_NUM_PORTS-1 on host for ITS sessions.
110  CLIENT_PORT_START = 6000
111  MAX_NUM_PORTS = 100
112  LOCK_PORT = CLIENT_PORT_START + MAX_NUM_PORTS
113
114  # Seconds timeout on each socket operation.
115  SOCK_TIMEOUT = 20.0
116  # Seconds timeout on performance measurement socket operation
117  SOCK_TIMEOUT_FOR_PERF_MEASURE = 40.0
118
119  # Additional timeout in seconds when ITS service is doing more complicated
120  # operations, for example: issuing warmup requests before actual capture.
121  EXTRA_SOCK_TIMEOUT = 5.0
122
123  PACKAGE = 'com.android.cts.verifier.camera.its'
124  INTENT_START = 'com.android.cts.verifier.camera.its.START'
125
126  # This string must be in sync with ItsService. Updated when interface
127  # between script and ItsService is changed.
128  ITS_SERVICE_VERSION = '1.0'
129
130  SEC_TO_NSEC = 1000*1000*1000.0
131  adb = 'adb -d'
132
133  # Predefine camera props. Save props extracted from the function,
134  # "get_camera_properties".
135  props = None
136
137  IMAGE_FORMAT_LIST_1 = [
138      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
139      'dngImage', 'y8Image'
140  ]
141
142  IMAGE_FORMAT_LIST_2 = [
143      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
144      'yuvImage'
145  ]
146
147  CAP_JPEG = {'format': 'jpeg'}
148  CAP_RAW = {'format': 'raw'}
149  CAP_YUV = {'format': 'yuv'}
150  CAP_RAW_YUV = [{'format': 'raw'}, {'format': 'yuv'}]
151
152  def __init_socket_port(self):
153    """Initialize the socket port for the host to forward requests to the device.
154
155    This method assumes localhost's LOCK_PORT is available and will try to
156    use ports between CLIENT_PORT_START and CLIENT_PORT_START+MAX_NUM_PORTS-1
157    """
158    num_retries = 100
159    retry_wait_time_sec = 0.05
160
161    # Bind a socket to use as mutex lock
162    socket_lock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
163    for i in range(num_retries):
164      try:
165        socket_lock.bind((ItsSession.IPADDR, ItsSession.LOCK_PORT))
166        break
167      except (socket.error, socket.timeout) as socket_issue:
168        if i == num_retries - 1:
169          raise error_util.CameraItsError(
170              self._device_id, 'socket lock returns error') from socket_issue
171        else:
172          time.sleep(retry_wait_time_sec)
173
174    # Check if a port is already assigned to the device.
175    command = 'adb forward --list'
176    proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
177    # pylint: disable=unused-variable
178    output, error = proc.communicate()
179    port = None
180    used_ports = []
181    for line  in output.decode('utf-8').split(os.linesep):
182      # each line should be formatted as:
183      # "<device_id> tcp:<host_port> tcp:<remote_port>"
184      forward_info = line.split()
185      if len(forward_info) >= 3 and len(
186          forward_info[1]) > 4 and forward_info[1][:4] == 'tcp:' and len(
187              forward_info[2]) > 4 and forward_info[2][:4] == 'tcp:':
188        local_p = int(forward_info[1][4:])
189        remote_p = int(forward_info[2][4:])
190        if forward_info[
191            0] == self._device_id and remote_p == ItsSession.REMOTE_PORT:
192          port = local_p
193          break
194        else:
195          used_ports.append(local_p)
196
197      # Find the first available port if no port is assigned to the device.
198    if port is None:
199      for p in range(ItsSession.CLIENT_PORT_START,
200                     ItsSession.CLIENT_PORT_START + ItsSession.MAX_NUM_PORTS):
201        if self.check_port_availability(p, used_ports):
202          port = p
203          break
204
205    if port is None:
206      raise error_util.CameraItsError(self._device_id,
207                                      ' cannot find an available ' + 'port')
208
209    # Release the socket as mutex unlock
210    socket_lock.close()
211
212    # Connect to the socket
213    self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
214    self.sock.connect((self.IPADDR, port))
215    self.sock.settimeout(self.SOCK_TIMEOUT)
216
217  def check_port_availability(self, check_port, used_ports):
218    """Check if the port is available or not.
219
220    Args:
221      check_port: Port to check for availability
222      used_ports: List of used ports
223
224    Returns:
225     True if the given port is available and can be assigned to the device.
226    """
227    if check_port not in used_ports:
228      # Try to run "adb forward" with the port
229      command = ('%s forward tcp:%d tcp:%d' %
230                 (self.adb, check_port, self.REMOTE_PORT))
231      proc = subprocess.Popen(
232          command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
233      error = proc.communicate()[1]
234
235      # Check if there is no error
236      if error is None or error.find('error'.encode()) < 0:
237        return True
238      else:
239        return False
240
241  def __wait_for_service(self):
242    """Wait for ItsService to be ready and reboot the device if needed.
243
244    This also includes the optional reboot handling: if the user
245    provides a "reboot" or "reboot=N" arg, then reboot the device,
246    waiting for N seconds (default 30) before returning.
247    """
248
249    for s in sys.argv[1:]:
250      if s[:6] == 'reboot':
251        duration = 30
252        if len(s) > 7 and s[6] == '=':
253          duration = int(s[7:])
254        logging.debug('Rebooting device')
255        _run('%s reboot' % (self.adb))
256        _run('%s wait-for-device' % (self.adb))
257        time.sleep(duration)
258        logging.debug('Reboot complete')
259
260    # Flush logcat so following code won't be misled by previous
261    # 'ItsService ready' log.
262    _run('%s logcat -c' % (self.adb))
263    time.sleep(1)
264
265    _run('%s shell am force-stop --user 0 %s' % (self.adb, self.PACKAGE))
266    _run(('%s shell am start-foreground-service --user 0 -t text/plain '
267          '-a %s') % (self.adb, self.INTENT_START))
268
269    # Wait until the socket is ready to accept a connection.
270    proc = subprocess.Popen(
271        self.adb.split() + ['logcat'], stdout=subprocess.PIPE)
272    logcat = proc.stdout
273    while True:
274      line = logcat.readline().strip()
275      if line.find(b'ItsService ready') >= 0:
276        break
277    proc.kill()
278    proc.communicate()
279
280  def __init__(self, device_id=None, camera_id=None, hidden_physical_id=None):
281    self._camera_id = camera_id
282    self._device_id = device_id
283    self._hidden_physical_id = hidden_physical_id
284
285    # Initialize device id and adb command.
286    self.adb = 'adb -s ' + self._device_id
287    self.__wait_for_service()
288    self.__init_socket_port()
289
290  def __enter__(self):
291    self.__close_camera()
292    self.__open_camera()
293    return self
294
295  def __exit__(self, exec_type, exec_value, exec_traceback):
296    if hasattr(self, 'sock') and self.sock:
297      self.__close_camera()
298      self.sock.close()
299    return False
300
301  def override_with_hidden_physical_camera_props(self, props):
302    """Check that it is a valid sub-camera backing the logical camera.
303
304    If current session is for a hidden physical camera, check that it is a valid
305    sub-camera backing the logical camera, override self.props, and return the
306    characteristics of sub-camera. Otherwise, return "props" directly.
307
308    Args:
309     props: Camera properties object.
310
311    Returns:
312     The properties of the hidden physical camera if possible.
313    """
314    if self._hidden_physical_id:
315      if not camera_properties_utils.logical_multi_camera(props):
316        raise AssertionError(f'{self._camera_id} is not a logical multi-camera')
317      physical_ids = camera_properties_utils.logical_multi_camera_physical_ids(
318          props)
319      if self._hidden_physical_id not in physical_ids:
320        raise AssertionError(f'{self._hidden_physical_id} is not a hidden '
321                             f'sub-camera of {self._camera_id}')
322      props = self.get_camera_properties_by_id(self._hidden_physical_id)
323      self.props = props
324    return props
325
326  def get_camera_properties(self):
327    """Get the camera properties object for the device.
328
329    Returns:
330     The Python dictionary object for the CameraProperties object.
331    """
332    cmd = {}
333    cmd['cmdName'] = 'getCameraProperties'
334    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
335    data, _ = self.__read_response_from_socket()
336    if data['tag'] != 'cameraProperties':
337      raise error_util.CameraItsError('Invalid command response')
338    self.props = data['objValue']['cameraProperties']
339    return data['objValue']['cameraProperties']
340
341  def get_camera_properties_by_id(self, camera_id):
342    """Get the camera properties object for device with camera_id.
343
344    Args:
345     camera_id: The ID string of the camera
346
347    Returns:
348     The Python dictionary object for the CameraProperties object. Empty
349     if no such device exists.
350    """
351    cmd = {}
352    cmd['cmdName'] = 'getCameraPropertiesById'
353    cmd['cameraId'] = camera_id
354    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
355    data, _ = self.__read_response_from_socket()
356    if data['tag'] != 'cameraProperties':
357      raise error_util.CameraItsError('Invalid command response')
358    return data['objValue']['cameraProperties']
359
360  def __read_response_from_socket(self):
361    """Reads a line (newline-terminated) string serialization of JSON object.
362
363    Returns:
364     Deserialized json obj.
365    """
366    chars = []
367    while not chars or chars[-1] != '\n':
368      ch = self.sock.recv(1).decode('utf-8')
369      if not ch:
370        # Socket was probably closed; otherwise don't get empty strings
371        raise error_util.CameraItsError('Problem with socket on device side')
372      chars.append(ch)
373    line = ''.join(chars)
374    jobj = json.loads(line)
375    # Optionally read a binary buffer of a fixed size.
376    buf = None
377    if 'bufValueSize' in jobj:
378      n = jobj['bufValueSize']
379      buf = bytearray(n)
380      view = memoryview(buf)
381      while n > 0:
382        nbytes = self.sock.recv_into(view, n)
383        view = view[nbytes:]
384        n -= nbytes
385      buf = numpy.frombuffer(buf, dtype=numpy.uint8)
386    return jobj, buf
387
388  def __open_camera(self):
389    """Get the camera ID to open if it is an argument as a single camera.
390
391    This allows passing camera=# to individual tests at command line
392    and camera=#,#,# or an no camera argv with tools/run_all_tests.py.
393    In case the camera is a logical multi-camera, to run ITS on the
394    hidden physical sub-camera, pass camera=[logical ID]:[physical ID]
395    to an individual test at the command line, and same applies to multiple
396    camera IDs for tools/run_all_tests.py: camera=#,#:#,#:#,#
397    """
398    if not self._camera_id:
399      self._camera_id = 0
400      for s in sys.argv[1:]:
401        if s[:7] == 'camera=' and len(s) > 7:
402          camera_ids = s[7:].split(',')
403          camera_id_combos = parse_camera_ids(camera_ids)
404          if len(camera_id_combos) == 1:
405            self._camera_id = camera_id_combos[0].id
406            self._hidden_physical_id = camera_id_combos[0].sub_id
407
408    logging.debug('Opening camera: %s', self._camera_id)
409    cmd = {'cmdName': 'open', 'cameraId': self._camera_id}
410    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
411    data, _ = self.__read_response_from_socket()
412    if data['tag'] != 'cameraOpened':
413      raise error_util.CameraItsError('Invalid command response')
414
415  def __close_camera(self):
416    cmd = {'cmdName': 'close'}
417    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
418    data, _ = self.__read_response_from_socket()
419    if data['tag'] != 'cameraClosed':
420      raise error_util.CameraItsError('Invalid command response')
421
422  def get_sensors(self):
423    """Get all sensors on the device.
424
425    Returns:
426       A Python dictionary that returns keys and booleans for each sensor.
427    """
428    cmd = {}
429    cmd['cmdName'] = 'checkSensorExistence'
430    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
431    data, _ = self.__read_response_from_socket()
432    if data['tag'] != 'sensorExistence':
433      raise error_util.CameraItsError('Invalid response for command: %s' %
434                                      cmd['cmdName'])
435    return data['objValue']
436
437  def start_sensor_events(self):
438    """Start collecting sensor events on the device.
439
440    See get_sensor_events for more info.
441
442    Returns:
443       Nothing.
444    """
445    cmd = {}
446    cmd['cmdName'] = 'startSensorEvents'
447    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
448    data, _ = self.__read_response_from_socket()
449    if data['tag'] != 'sensorEventsStarted':
450      raise error_util.CameraItsError('Invalid response for command: %s' %
451                                      cmd['cmdName'])
452
453  def get_sensor_events(self):
454    """Get a trace of all sensor events on the device.
455
456        The trace starts when the start_sensor_events function is called. If
457        the test runs for a long time after this call, then the device's
458        internal memory can fill up. Calling get_sensor_events gets all events
459        from the device, and then stops the device from collecting events and
460        clears the internal buffer; to start again, the start_sensor_events
461        call must be used again.
462
463        Events from the accelerometer, compass, and gyro are returned; each
464        has a timestamp and x,y,z values.
465
466        Note that sensor events are only produced if the device isn't in its
467        standby mode (i.e.) if the screen is on.
468
469    Returns:
470            A Python dictionary with three keys ("accel", "mag", "gyro") each
471            of which maps to a list of objects containing "time","x","y","z"
472            keys.
473    """
474    cmd = {}
475    cmd['cmdName'] = 'getSensorEvents'
476    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
477    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
478    self.sock.settimeout(timeout)
479    data, _ = self.__read_response_from_socket()
480    if data['tag'] != 'sensorEvents':
481      raise error_util.CameraItsError('Invalid response for command: %s ' %
482                                      cmd['cmdName'])
483    self.sock.settimeout(self.SOCK_TIMEOUT)
484    return data['objValue']
485
486  def get_camera_ids(self):
487    """Returns the list of all camera_ids.
488
489    Returns:
490      List of camera ids on the device.
491    """
492    cmd = {'cmdName': 'getCameraIds'}
493    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
494    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
495    self.sock.settimeout(timeout)
496    data, _ = self.__read_response_from_socket()
497    if data['tag'] != 'cameraIds':
498      raise error_util.CameraItsError('Invalid command response')
499    return data['objValue']
500
501  def get_unavailable_physical_cameras(self, camera_id):
502    """Get the unavailable physical cameras ids.
503
504    Args:
505      camera_id: int; device id
506    Returns:
507      List of all physical camera ids which are unavailable.
508    """
509    cmd = {'cmdName': 'doGetUnavailablePhysicalCameras',
510           'cameraId': camera_id}
511    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
512    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
513    self.sock.settimeout(timeout)
514    data, _ = self.__read_response_from_socket()
515    if data['tag'] != 'unavailablePhysicalCameras':
516      raise error_util.CameraItsError('Invalid command response')
517    return data['objValue']
518
519  def is_hlg10_recording_supported(self, profile_id):
520    """Query whether the camera device supports HLG10 video recording.
521
522    Args:
523      profile_id: int; profile id corresponding to the quality level.
524    Returns:
525      Boolean: True, if device supports HLG10 video recording, False in
526      all other cases.
527    """
528    cmd = {}
529    cmd['cmdName'] = 'isHLG10Supported'
530    cmd['cameraId'] = self._camera_id
531    cmd['profileId'] = profile_id
532    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
533
534    data, _ = self.__read_response_from_socket()
535    if data['tag'] != 'hlg10Response':
536      raise error_util.CameraItsError('Failed to query HLG10 support')
537    return data['strValue'] == 'true'
538
539  def do_basic_recording(self, profile_id, quality, duration,
540                         video_stabilization_mode=0, hlg10_enabled=False):
541    """Issue a recording request and read back the video recording object.
542
543    The recording will be done with the format specified in quality. These
544    quality levels correspond to the profiles listed in CamcorderProfile.
545    The duration is the time in seconds for which the video will be recorded.
546    The recorded object consists of a path on the device at which the
547    recorded video is saved.
548
549    Args:
550      profile_id: int; profile id corresponding to the quality level.
551      quality: Video recording quality such as High, Low, VGA.
552      duration: The time in seconds for which the video will be recorded.
553      video_stabilization_mode: Video stabilization mode ON/OFF. Value can be
554      0: 'OFF', 1: 'ON', 2: 'PREVIEW'
555      hlg10_enabled: boolean: True Enable 10-bit HLG video recording, False
556      record using the regular SDR profile
557    Returns:
558      video_recorded_object: The recorded object returned from ItsService which
559      contains path at which the recording is saved on the device, quality of
560      the recorded video, video size of the recorded video, video frame rate
561      and 'hlg10' if 'hlg10_enabled' is set to True.
562      Ex:
563      VideoRecordingObject: {
564        'tag': 'recordingResponse',
565        'objValue': {
566          'recordedOutputPath':
567            '/storage/emulated/0/Android/data/com.android.cts.verifier'
568            '/files/VideoITS/VID_20220324_080414_0_CIF_352x288.mp4',
569          'quality': 'CIF',
570          'videoFrameRate': 30,
571          'videoSize': '352x288'
572        }
573      }
574    """
575    cmd = {'cmdName': 'doBasicRecording', 'cameraId': self._camera_id,
576           'profileId': profile_id, 'quality': quality,
577           'recordingDuration': duration,
578           'videoStabilizationMode': video_stabilization_mode,
579           'hlg10Enabled': hlg10_enabled}
580    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
581    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
582    self.sock.settimeout(timeout)
583    data, _ = self.__read_response_from_socket()
584    if data['tag'] != 'recordingResponse':
585      raise error_util.CameraItsError(
586          f'Invalid response for command: {cmd["cmdName"]}')
587    logging.debug('VideoRecordingObject: %s', data)
588    return data['objValue']
589
590  def do_preview_recording(self, video_size, duration, stabilize):
591    """Issue a preview request and read back the preview recording object.
592
593    The resolution of the preview and its recording will be determined by
594    video_size. The duration is the time in seconds for which the preview will
595    be recorded. The recorded object consists of a path on the device at
596    which the recorded video is saved.
597
598    Args:
599      video_size: str; Preview resolution at which to record. ex. "1920x1080"
600      duration: int; The time in seconds for which the video will be recorded.
601      stabilize: boolean; Whether the preview should be stabilized or not
602    Returns:
603      video_recorded_object: The recorded object returned from ItsService which
604      contains path at which the recording is saved on the device, quality of
605      the recorded video which is always set to "preview", video size of the
606      recorded video, video frame rate.
607      Ex:
608      VideoRecordingObject: {
609        'tag': 'recordingResponse',
610        'objValue': {
611          'recordedOutputPath': '/storage/emulated/0/Android/data/'
612                                'com.android.cts.verifier/files/VideoITS/'
613                                'VID_20220324_080414_0_CIF_352x288.mp4',
614          'quality': 'preview',
615          'videoSize': '352x288'
616        }
617      }
618    """
619
620    cmd = {
621        'cmdName': 'doPreviewRecording',
622        'cameraId': self._camera_id,
623        'videoSize': video_size,
624        'recordingDuration': duration,
625        'stabilize': stabilize
626    }
627    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
628    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
629    self.sock.settimeout(timeout)
630
631    data, _ = self.__read_response_from_socket()
632    logging.debug('VideoRecordingObject: %s', str(data))
633    if data['tag'] != 'recordingResponse':
634      raise error_util.CameraItsError(
635          f'Invalid response from command{cmd["cmdName"]}')
636    return data['objValue']
637
638  def get_supported_video_qualities(self, camera_id):
639    """Get all supported video qualities for this camera device.
640
641    ie. ['480:4', '1080:6', '2160:8', '720:5', 'CIF:3', 'HIGH:1', 'LOW:0',
642         'QCIF:2', 'QVGA:7']
643
644    Args:
645      camera_id: device id
646    Returns:
647      List of all supported video qualities and corresponding profileIds.
648    """
649    cmd = {}
650    cmd['cmdName'] = 'getSupportedVideoQualities'
651    cmd['cameraId'] = camera_id
652    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
653    data, _ = self.__read_response_from_socket()
654    if data['tag'] != 'supportedVideoQualities':
655      raise error_util.CameraItsError('Invalid command response')
656    return data['strValue'].split(';')[:-1]  # remove the last appended ';'
657
658  def get_supported_preview_sizes(self, camera_id):
659    """Get all supported preview resolutions for this camera device.
660
661    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
662
663    Args:
664      camera_id: int; device id
665    Returns:
666      List of all supported video resolutions in ascending order.
667    """
668    cmd = {
669        'cmdName': 'getSupportedPreviewSizes',
670        'cameraId': camera_id
671    }
672    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
673    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
674    self.sock.settimeout(timeout)
675    data, _ = self.__read_response_from_socket()
676    if data['tag'] != 'supportedPreviewSizes':
677      raise error_util.CameraItsError('Invalid command response')
678    if not data['strValue']:
679      raise error_util.CameraItsError('No supported preview sizes')
680    return data['strValue'].split(';')
681
682  def get_display_size(self):
683    """ Get the display size of the screen.
684
685    Returns:
686      The size of the display resolution in pixels.
687    """
688    cmd = {
689        'cmdName': 'getDisplaySize'
690    }
691    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
692    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
693    self.sock.settimeout(timeout)
694    data, _ = self.__read_response_from_socket()
695    if data['tag'] != 'displaySize':
696      raise error_util.CameraItsError('Invalid command response')
697    if not data['strValue']:
698      raise error_util.CameraItsError('No display size')
699    return data['strValue'].split('x')
700
701  def get_max_camcorder_profile_size(self, camera_id):
702    """ Get the maximum camcorder profile size for this camera device.
703
704    Args:
705      camera_id: int; device id
706    Returns:
707      The maximum size among all camcorder profiles supported by this camera.
708    """
709    cmd = {
710        'cmdName': 'getMaxCamcorderProfileSize',
711        'cameraId': camera_id
712    }
713    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
714    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
715    self.sock.settimeout(timeout)
716    data, _ = self.__read_response_from_socket()
717    if data['tag'] != 'maxCamcorderProfileSize':
718      raise error_util.CameraItsError('Invalid command response')
719    if not data['strValue']:
720      raise error_util.CameraItsError('No max camcorder profile size')
721    return data['strValue'].split('x')
722
723  def do_capture_with_flash(self,
724                            preview_request_start,
725                            preview_request_idle,
726                            still_capture_req,
727                            out_surface):
728    """Issue capture request with flash and read back the image and metadata.
729
730    Captures a single image with still_capture_req as capture request
731    with flash. It triggers the precapture sequence with preview request
732    preview_request_start with capture intent preview by setting aePrecapture
733    trigger to Start. This is followed by repeated preview requests
734    preview_request_idle with aePrecaptureTrigger set to IDLE.
735    Once the AE is converged, a single image is captured still_capture_req
736    during which the flash must be fired.
737    Note: The part where we read output data from socket is cloned from
738    do_capture and will be consolidated in U.
739
740    Args:
741      preview_request_start: Preview request with aePrecaptureTrigger set to
742        Start
743      preview_request_idle: Preview request with aePrecaptureTrigger set to Idle
744      still_capture_req: Single still capture request.
745      out_surface: Specifications of the output image formats and
746        sizes to use for capture.
747    Returns:
748      An object which contains following fields:
749      * data: the image data as a numpy array of bytes.
750      * width: the width of the captured image.
751      * height: the height of the captured image.
752      * format: image format
753      * metadata: the capture result object
754    """
755    cmd = {}
756    cmd['cmdName'] = 'doCaptureWithFlash'
757    cmd['previewRequestStart'] = [preview_request_start]
758    cmd['previewRequestIdle'] = [preview_request_idle]
759    cmd['stillCaptureRequest'] = [still_capture_req]
760    cmd['outputSurfaces'] = [out_surface]
761
762    cam_ids = self._camera_id
763    self.sock.settimeout(self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT)
764    logging.debug('Capturing image with ON_AUTO_FLASH.')
765    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
766
767    bufs = {}
768    bufs[self._camera_id] = {'jpeg': []}
769    formats = ['jpeg']
770    rets = []
771    nbufs = 0
772    mds = []
773    physical_mds = []
774    widths = None
775    heights = None
776    ncap = 1
777    capture_results_returned = False
778    yuv_bufs = {}
779    while (nbufs < ncap) or (not capture_results_returned):
780      json_obj, buf = self.__read_response_from_socket()
781      if json_obj['tag'] in ItsSession.IMAGE_FORMAT_LIST_1 and buf is not None:
782        fmt = json_obj['tag'][:-5]
783        bufs[self._camera_id][fmt].append(buf)
784        nbufs += 1
785      elif json_obj['tag'] == 'captureResults':
786        capture_results_returned = True
787        mds.append(json_obj['objValue']['captureResult'])
788        physical_mds.append(json_obj['objValue']['physicalResults'])
789        outputs = json_obj['objValue']['outputs']
790        widths = [out['width'] for out in outputs]
791        heights = [out['height'] for out in outputs]
792      else:
793        tag_string = unicodedata.normalize('NFKD', json_obj['tag']).encode(
794            'ascii', 'ignore')
795        for x in ItsSession.IMAGE_FORMAT_LIST_2:
796          x = bytes(x, encoding='utf-8')
797          if tag_string.startswith(x):
798            if x == b'yuvImage':
799              physical_id = json_obj['tag'][len(x):]
800              if physical_id in cam_ids:
801                buf_size = numpy.product(buf.shape)
802                yuv_bufs[physical_id][buf_size].append(buf)
803                nbufs += 1
804            else:
805              physical_id = json_obj['tag'][len(x):]
806              if physical_id in cam_ids:
807                fmt = x[:-5].decode('UTF-8')
808                bufs[physical_id][fmt].append(buf)
809                nbufs += 1
810    rets = []
811    for j, fmt in enumerate(formats):
812      objs = []
813      if 'physicalCamera' in cmd['outputSurfaces'][j]:
814        cam_id = cmd['outputSurfaces'][j]['physicalCamera']
815      else:
816        cam_id = self._camera_id
817      for i in range(ncap):
818        obj = {}
819        obj['width'] = widths[j]
820        obj['height'] = heights[j]
821        obj['format'] = fmt
822        if cam_id == self._camera_id:
823          obj['metadata'] = mds[i]
824        else:
825          for physical_md in physical_mds[i]:
826            if cam_id in physical_md:
827              obj['metadata'] = physical_md[cam_id]
828              break
829        obj['data'] = bufs[cam_id][fmt][i]
830        objs.append(obj)
831      rets.append(objs if ncap > 1 else objs[0])
832    self.sock.settimeout(self.SOCK_TIMEOUT)
833    if len(rets) > 1 or (isinstance(rets[0], dict) and
834                         isinstance(still_capture_req, list)):
835      return rets
836    else:
837      return rets[0]
838
839  def do_capture(self,
840                 cap_request,
841                 out_surfaces=None,
842                 reprocess_format=None,
843                 repeat_request=None):
844    """Issue capture request(s), and read back the image(s) and metadata.
845
846    The main top-level function for capturing one or more images using the
847    device. Captures a single image if cap_request is a single object, and
848    captures a burst if it is a list of objects.
849
850    The optional repeat_request field can be used to assign a repeating
851    request list ran in background for 3 seconds to warm up the capturing
852    pipeline before start capturing. The repeat_requests will be ran on a
853    640x480 YUV surface without sending any data back. The caller needs to
854    make sure the stream configuration defined by out_surfaces and
855    repeat_request are valid or do_capture may fail because device does not
856    support such stream configuration.
857
858    The out_surfaces field can specify the width(s), height(s), and
859    format(s) of the captured image. The formats may be "yuv", "jpeg",
860    "dng", "raw", "raw10", "raw12", "rawStats" or "y8". The default is a
861    YUV420 frame ("yuv") corresponding to a full sensor frame.
862
863    1. Optionally the out_surfaces field can specify physical camera id(s) if
864    the current camera device is a logical multi-camera. The physical camera
865    id must refer to a physical camera backing this logical camera device.
866    2. Optionally The output_surfaces field can also specify the use case(s) if
867    the current camera device has STREAM_USE_CASE capability.
868
869    Note that one or more surfaces can be specified, allowing a capture to
870    request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
871    yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the
872    default is the largest resolution available for the format of that
873    surface. At most one output surface can be specified for a given format,
874    and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations.
875
876    If reprocess_format is not None, for each request, an intermediate
877    buffer of the given reprocess_format will be captured from camera and
878    the intermediate buffer will be reprocessed to the output surfaces. The
879    following settings will be turned off when capturing the intermediate
880    buffer and will be applied when reprocessing the intermediate buffer.
881    1. android.noiseReduction.mode
882    2. android.edge.mode
883    3. android.reprocess.effectiveExposureFactor
884
885    Supported reprocess format are "yuv" and "private". Supported output
886    surface formats when reprocessing is enabled are "yuv" and "jpeg".
887
888    Example of a single capture request:
889
890    {
891     "android.sensor.exposureTime": 100*1000*1000,
892     "android.sensor.sensitivity": 100
893    }
894
895    Example of a list of capture requests:
896    [
897     {
898       "android.sensor.exposureTime": 100*1000*1000,
899       "android.sensor.sensitivity": 100
900     },
901    {
902      "android.sensor.exposureTime": 100*1000*1000,
903       "android.sensor.sensitivity": 200
904     }
905    ]
906
907    Example of output surface specifications:
908    {
909     "width": 640,
910     "height": 480,
911     "format": "yuv"
912    }
913    [
914     {
915       "format": "jpeg"
916     },
917     {
918       "format": "raw"
919     }
920    ]
921
922    The following variables defined in this class are shortcuts for
923    specifying one or more formats where each output is the full size for
924    that format; they can be used as values for the out_surfaces arguments:
925
926    CAP_RAW
927    CAP_DNG
928    CAP_YUV
929    CAP_JPEG
930    CAP_RAW_YUV
931    CAP_DNG_YUV
932    CAP_RAW_JPEG
933    CAP_DNG_JPEG
934    CAP_YUV_JPEG
935    CAP_RAW_YUV_JPEG
936    CAP_DNG_YUV_JPEG
937
938    If multiple formats are specified, then this function returns multiple
939    capture objects, one for each requested format. If multiple formats and
940    multiple captures (i.e. a burst) are specified, then this function
941    returns multiple lists of capture objects. In both cases, the order of
942    the returned objects matches the order of the requested formats in the
943    out_surfaces parameter. For example:
944
945    yuv_cap = do_capture(req1)
946    yuv_cap = do_capture(req1,yuv_fmt)
947    yuv_cap, raw_cap = do_capture(req1, [yuv_fmt,raw_fmt])
948    yuv_caps = do_capture([req1,req2], yuv_fmt)
949    yuv_caps, raw_caps = do_capture([req1,req2], [yuv_fmt,raw_fmt])
950
951    The "rawStats" format processes the raw image and returns a new image
952    of statistics from the raw image. The format takes additional keys,
953    "gridWidth" and "gridHeight" which are size of grid cells in a 2D grid
954    of the raw image. For each grid cell, the mean and variance of each raw
955    channel is computed, and the do_capture call returns two 4-element float
956    images of dimensions (rawWidth / gridWidth, rawHeight / gridHeight),
957    concatenated back-to-back, where the first image contains the 4-channel
958    means and the second contains the 4-channel variances. Note that only
959    pixels in the active array crop region are used; pixels outside this
960    region (for example optical black rows) are cropped out before the
961    gridding and statistics computation is performed.
962
963    For the rawStats format, if the gridWidth is not provided then the raw
964    image width is used as the default, and similarly for gridHeight. With
965    this, the following is an example of a output description that computes
966    the mean and variance across each image row:
967    {
968      "gridHeight": 1,
969      "format": "rawStats"
970    }
971
972    Args:
973      cap_request: The Python dict/list specifying the capture(s), which will be
974        converted to JSON and sent to the device.
975      out_surfaces: (Optional) specifications of the output image formats and
976        sizes to use for each capture.
977      reprocess_format: (Optional) The reprocessing format. If not
978        None,reprocessing will be enabled.
979      repeat_request: Repeating request list.
980
981    Returns:
982      An object, list of objects, or list of lists of objects, where each
983      object contains the following fields:
984      * data: the image data as a numpy array of bytes.
985      * width: the width of the captured image.
986      * height: the height of the captured image.
987      * format: image the format, in [
988                        "yuv","jpeg","raw","raw10","raw12","rawStats","dng"].
989      * metadata: the capture result object (Python dictionary).
990    """
991    cmd = {}
992    if reprocess_format is not None:
993      if repeat_request is not None:
994        raise error_util.CameraItsError(
995            'repeating request + reprocessing is not supported')
996      cmd['cmdName'] = 'doReprocessCapture'
997      cmd['reprocessFormat'] = reprocess_format
998    else:
999      cmd['cmdName'] = 'doCapture'
1000
1001    if repeat_request is None:
1002      cmd['repeatRequests'] = []
1003    elif not isinstance(repeat_request, list):
1004      cmd['repeatRequests'] = [repeat_request]
1005    else:
1006      cmd['repeatRequests'] = repeat_request
1007
1008    if not isinstance(cap_request, list):
1009      cmd['captureRequests'] = [cap_request]
1010    else:
1011      cmd['captureRequests'] = cap_request
1012
1013    if out_surfaces is not None:
1014      if not isinstance(out_surfaces, list):
1015        cmd['outputSurfaces'] = [out_surfaces]
1016      else:
1017        cmd['outputSurfaces'] = out_surfaces
1018      formats = [
1019          c['format'] if 'format' in c else 'yuv' for c in cmd['outputSurfaces']
1020      ]
1021      formats = [s if s != 'jpg' else 'jpeg' for s in formats]
1022    else:
1023      max_yuv_size = capture_request_utils.get_available_output_sizes(
1024          'yuv', self.props)[0]
1025      formats = ['yuv']
1026      cmd['outputSurfaces'] = [{
1027          'format': 'yuv',
1028          'width': max_yuv_size[0],
1029          'height': max_yuv_size[1]
1030      }]
1031
1032    ncap = len(cmd['captureRequests'])
1033    nsurf = 1 if out_surfaces is None else len(cmd['outputSurfaces'])
1034
1035    cam_ids = []
1036    bufs = {}
1037    yuv_bufs = {}
1038    for i, s in enumerate(cmd['outputSurfaces']):
1039      if self._hidden_physical_id:
1040        s['physicalCamera'] = self._hidden_physical_id
1041
1042      if 'physicalCamera' in s:
1043        cam_id = s['physicalCamera']
1044      else:
1045        cam_id = self._camera_id
1046
1047      if cam_id not in cam_ids:
1048        cam_ids.append(cam_id)
1049        bufs[cam_id] = {
1050            'raw': [],
1051            'raw10': [],
1052            'raw12': [],
1053            'rawStats': [],
1054            'dng': [],
1055            'jpeg': [],
1056            'y8': []
1057        }
1058
1059    for cam_id in cam_ids:
1060       # Only allow yuv output to multiple targets
1061      if cam_id == self._camera_id:
1062        yuv_surfaces = [
1063            s for s in cmd['outputSurfaces']
1064            if s['format'] == 'yuv' and 'physicalCamera' not in s
1065        ]
1066        formats_for_id = [
1067            s['format']
1068            for s in cmd['outputSurfaces']
1069            if 'physicalCamera' not in s
1070        ]
1071      else:
1072        yuv_surfaces = [
1073            s for s in cmd['outputSurfaces'] if s['format'] == 'yuv' and
1074            'physicalCamera' in s and s['physicalCamera'] == cam_id
1075        ]
1076        formats_for_id = [
1077            s['format']
1078            for s in cmd['outputSurfaces']
1079            if 'physicalCamera' in s and s['physicalCamera'] == cam_id
1080        ]
1081
1082      n_yuv = len(yuv_surfaces)
1083      # Compute the buffer size of YUV targets
1084      yuv_maxsize_1d = 0
1085      for s in yuv_surfaces:
1086        if ('width' not in s and 'height' not in s):
1087          if self.props is None:
1088            raise error_util.CameraItsError('Camera props are unavailable')
1089          yuv_maxsize_2d = capture_request_utils.get_available_output_sizes(
1090              'yuv', self.props)[0]
1091          # YUV420 size = 1.5 bytes per pixel
1092          yuv_maxsize_1d = (yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3) // 2
1093          break
1094      yuv_sizes = [
1095          (c['width'] * c['height'] * 3) // 2
1096          if 'width' in c and 'height' in c else yuv_maxsize_1d
1097          for c in yuv_surfaces
1098      ]
1099      # Currently we don't pass enough metadta from ItsService to distinguish
1100      # different yuv stream of same buffer size
1101      if len(yuv_sizes) != len(set(yuv_sizes)):
1102        raise error_util.CameraItsError(
1103            'ITS does not support yuv outputs of same buffer size')
1104      if len(formats_for_id) > len(set(formats_for_id)):
1105        if n_yuv != len(formats_for_id) - len(set(formats_for_id)) + 1:
1106          raise error_util.CameraItsError('Duplicate format requested')
1107
1108      yuv_bufs[cam_id] = {size: [] for size in yuv_sizes}
1109
1110    raw_formats = 0
1111    raw_formats += 1 if 'dng' in formats else 0
1112    raw_formats += 1 if 'raw' in formats else 0
1113    raw_formats += 1 if 'raw10' in formats else 0
1114    raw_formats += 1 if 'raw12' in formats else 0
1115    raw_formats += 1 if 'rawStats' in formats else 0
1116    if raw_formats > 1:
1117      raise error_util.CameraItsError('Different raw formats not supported')
1118
1119    # Detect long exposure time and set timeout accordingly
1120    longest_exp_time = 0
1121    for req in cmd['captureRequests']:
1122      if 'android.sensor.exposureTime' in req and req[
1123          'android.sensor.exposureTime'] > longest_exp_time:
1124        longest_exp_time = req['android.sensor.exposureTime']
1125
1126    extended_timeout = longest_exp_time // self.SEC_TO_NSEC + self.SOCK_TIMEOUT
1127    if repeat_request:
1128      extended_timeout += self.EXTRA_SOCK_TIMEOUT
1129    self.sock.settimeout(extended_timeout)
1130
1131    logging.debug('Capturing %d frame%s with %d format%s [%s]', ncap,
1132                  's' if ncap > 1 else '', nsurf, 's' if nsurf > 1 else '',
1133                  ','.join(formats))
1134    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1135
1136    # Wait for ncap*nsurf images and ncap metadata responses.
1137    # Assume that captures come out in the same order as requested in
1138    # the burst, however individual images of different formats can come
1139    # out in any order for that capture.
1140    nbufs = 0
1141    mds = []
1142    physical_mds = []
1143    widths = None
1144    heights = None
1145    while nbufs < ncap * nsurf or len(mds) < ncap:
1146      json_obj, buf = self.__read_response_from_socket()
1147      if json_obj['tag'] in ItsSession.IMAGE_FORMAT_LIST_1 and buf is not None:
1148        fmt = json_obj['tag'][:-5]
1149        bufs[self._camera_id][fmt].append(buf)
1150        nbufs += 1
1151      elif json_obj['tag'] == 'yuvImage':
1152        buf_size = numpy.product(buf.shape)
1153        yuv_bufs[self._camera_id][buf_size].append(buf)
1154        nbufs += 1
1155      elif json_obj['tag'] == 'captureResults':
1156        mds.append(json_obj['objValue']['captureResult'])
1157        physical_mds.append(json_obj['objValue']['physicalResults'])
1158        outputs = json_obj['objValue']['outputs']
1159        widths = [out['width'] for out in outputs]
1160        heights = [out['height'] for out in outputs]
1161      else:
1162        tag_string = unicodedata.normalize('NFKD', json_obj['tag']).encode(
1163            'ascii', 'ignore')
1164        for x in ItsSession.IMAGE_FORMAT_LIST_2:
1165          x = bytes(x, encoding='utf-8')
1166          if tag_string.startswith(x):
1167            if x == b'yuvImage':
1168              physical_id = json_obj['tag'][len(x):]
1169              if physical_id in cam_ids:
1170                buf_size = numpy.product(buf.shape)
1171                yuv_bufs[physical_id][buf_size].append(buf)
1172                nbufs += 1
1173            else:
1174              physical_id = json_obj['tag'][len(x):]
1175              if physical_id in cam_ids:
1176                fmt = x[:-5].decode('UTF-8')
1177                bufs[physical_id][fmt].append(buf)
1178                nbufs += 1
1179    rets = []
1180    for j, fmt in enumerate(formats):
1181      objs = []
1182      if 'physicalCamera' in cmd['outputSurfaces'][j]:
1183        cam_id = cmd['outputSurfaces'][j]['physicalCamera']
1184      else:
1185        cam_id = self._camera_id
1186
1187      for i in range(ncap):
1188        obj = {}
1189        obj['width'] = widths[j]
1190        obj['height'] = heights[j]
1191        obj['format'] = fmt
1192        if cam_id == self._camera_id:
1193          obj['metadata'] = mds[i]
1194        else:
1195          for physical_md in physical_mds[i]:
1196            if cam_id in physical_md:
1197              obj['metadata'] = physical_md[cam_id]
1198              break
1199
1200        if fmt == 'yuv':
1201          buf_size = (widths[j] * heights[j] * 3) // 2
1202          obj['data'] = yuv_bufs[cam_id][buf_size][i]
1203        else:
1204          obj['data'] = bufs[cam_id][fmt][i]
1205        objs.append(obj)
1206      rets.append(objs if ncap > 1 else objs[0])
1207    self.sock.settimeout(self.SOCK_TIMEOUT)
1208    if len(rets) > 1 or (isinstance(rets[0], dict) and
1209                         isinstance(cap_request, list)):
1210      return rets
1211    else:
1212      return rets[0]
1213
1214  def do_vibrate(self, pattern):
1215    """Cause the device to vibrate to a specific pattern.
1216
1217    Args:
1218      pattern: Durations (ms) for which to turn on or off the vibrator.
1219      The first value indicates the number of milliseconds to wait
1220      before turning the vibrator on. The next value indicates the
1221      number of milliseconds for which to keep the vibrator on
1222      before turning it off. Subsequent values alternate between
1223      durations in milliseconds to turn the vibrator off or to turn
1224      the vibrator on.
1225
1226    Returns:
1227      Nothing.
1228    """
1229    cmd = {}
1230    cmd['cmdName'] = 'doVibrate'
1231    cmd['pattern'] = pattern
1232    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1233    data, _ = self.__read_response_from_socket()
1234    if data['tag'] != 'vibrationStarted':
1235      raise error_util.CameraItsError('Invalid response for command: %s' %
1236                                      cmd['cmdName'])
1237
1238  def set_audio_restriction(self, mode):
1239    """Set the audio restriction mode for this camera device.
1240
1241    Args:
1242     mode: int; the audio restriction mode. See CameraDevice.java for valid
1243     value.
1244    Returns:
1245     Nothing.
1246    """
1247    cmd = {}
1248    cmd['cmdName'] = 'setAudioRestriction'
1249    cmd['mode'] = mode
1250    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1251    data, _ = self.__read_response_from_socket()
1252    if data['tag'] != 'audioRestrictionSet':
1253      raise error_util.CameraItsError('Invalid response for command: %s' %
1254                                      cmd['cmdName'])
1255
1256  # pylint: disable=dangerous-default-value
1257  def do_3a(self,
1258            regions_ae=[[0, 0, 1, 1, 1]],
1259            regions_awb=[[0, 0, 1, 1, 1]],
1260            regions_af=[[0, 0, 1, 1, 1]],
1261            do_ae=True,
1262            do_awb=True,
1263            do_af=True,
1264            lock_ae=False,
1265            lock_awb=False,
1266            get_results=False,
1267            ev_comp=0,
1268            auto_flash=False,
1269            mono_camera=False):
1270    """Perform a 3A operation on the device.
1271
1272    Triggers some or all of AE, AWB, and AF, and returns once they have
1273    converged. Uses the vendor 3A that is implemented inside the HAL.
1274    Note: do_awb is always enabled regardless of do_awb flag
1275
1276    Throws an assertion if 3A fails to converge.
1277
1278    Args:
1279      regions_ae: List of weighted AE regions.
1280      regions_awb: List of weighted AWB regions.
1281      regions_af: List of weighted AF regions.
1282      do_ae: Trigger AE and wait for it to converge.
1283      do_awb: Wait for AWB to converge.
1284      do_af: Trigger AF and wait for it to converge.
1285      lock_ae: Request AE lock after convergence, and wait for it.
1286      lock_awb: Request AWB lock after convergence, and wait for it.
1287      get_results: Return the 3A results from this function.
1288      ev_comp: An EV compensation value to use when running AE.
1289      auto_flash: AE control boolean to enable auto flash.
1290      mono_camera: Boolean for monochrome camera.
1291
1292      Region format in args:
1293         Arguments are lists of weighted regions; each weighted region is a
1294         list of 5 values, [x, y, w, h, wgt], and each argument is a list of
1295         these 5-value lists. The coordinates are given as normalized
1296         rectangles (x, y, w, h) specifying the region. For example:
1297         [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]].
1298         Weights are non-negative integers.
1299
1300    Returns:
1301      Five values are returned if get_results is true:
1302      * AE sensitivity; None if do_ae is False
1303      * AE exposure time; None if do_ae is False
1304      * AWB gains (list);
1305      * AWB transform (list);
1306      * AF focus position; None if do_af is false
1307      Otherwise, it returns five None values.
1308    """
1309    logging.debug('Running vendor 3A on device')
1310    cmd = {}
1311    cmd['cmdName'] = 'do3A'
1312    cmd['regions'] = {
1313        'ae': sum(regions_ae, []),
1314        'awb': sum(regions_awb, []),
1315        'af': sum(regions_af, [])
1316    }
1317    cmd['triggers'] = {'ae': do_ae, 'af': do_af}
1318    if lock_ae:
1319      cmd['aeLock'] = True
1320    if lock_awb:
1321      cmd['awbLock'] = True
1322    if ev_comp != 0:
1323      cmd['evComp'] = ev_comp
1324    if auto_flash:
1325      cmd['autoFlash'] = True
1326    if self._hidden_physical_id:
1327      cmd['physicalId'] = self._hidden_physical_id
1328    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1329
1330    # Wait for each specified 3A to converge.
1331    ae_sens = None
1332    ae_exp = None
1333    awb_gains = None
1334    awb_transform = None
1335    af_dist = None
1336    converged = False
1337    while True:
1338      data, _ = self.__read_response_from_socket()
1339      vals = data['strValue'].split()
1340      if data['tag'] == 'aeResult':
1341        if do_ae:
1342          ae_sens, ae_exp = [int(i) for i in vals]
1343      elif data['tag'] == 'afResult':
1344        if do_af:
1345          af_dist = float(vals[0])
1346      elif data['tag'] == 'awbResult':
1347        awb_gains = [float(f) for f in vals[:4]]
1348        awb_transform = [float(f) for f in vals[4:]]
1349      elif data['tag'] == '3aConverged':
1350        converged = True
1351      elif data['tag'] == '3aDone':
1352        break
1353      else:
1354        raise error_util.CameraItsError('Invalid command response')
1355    if converged and not get_results:
1356      return None, None, None, None, None
1357    if (do_ae and ae_sens is None or
1358        (not mono_camera and do_awb and awb_gains is None) or
1359        do_af and af_dist is None or not converged):
1360      raise error_util.CameraItsError('3A failed to converge')
1361    return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
1362
1363  def calc_camera_fov(self, props):
1364    """Determine the camera field of view from internal params.
1365
1366    Args:
1367      props: Camera properties object.
1368
1369    Returns:
1370      camera_fov: string; field of view for camera.
1371    """
1372
1373    focal_ls = props['android.lens.info.availableFocalLengths']
1374    if len(focal_ls) > 1:
1375      logging.debug('Doing capture to determine logical camera focal length')
1376      cap = self.do_capture(capture_request_utils.auto_capture_request())
1377      focal_l = cap['metadata']['android.lens.focalLength']
1378    else:
1379      focal_l = focal_ls[0]
1380
1381    sensor_size = props['android.sensor.info.physicalSize']
1382    diag = math.sqrt(sensor_size['height']**2 + sensor_size['width']**2)
1383    try:
1384      fov = str(round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2))
1385    except ValueError:
1386      fov = str(0)
1387    logging.debug('Calculated FoV: %s', fov)
1388    return fov
1389
1390  def get_file_name_to_load(self, chart_distance, camera_fov, scene):
1391    """Get the image to load on the tablet depending on fov and chart_distance.
1392
1393    Args:
1394     chart_distance: float; distance in cm from camera of displayed chart
1395     camera_fov: float; camera field of view.
1396     scene: String; Scene to be used in the test.
1397
1398    Returns:
1399     file_name: file name to display on the tablet.
1400
1401    """
1402    chart_scaling = opencv_processing_utils.calc_chart_scaling(
1403        chart_distance, camera_fov)
1404    if numpy.isclose(
1405        chart_scaling,
1406        opencv_processing_utils.SCALE_RFOV_IN_WFOV_BOX,
1407        atol=0.01):
1408      file_name = '%s_%sx_scaled.png' % (
1409          scene, str(opencv_processing_utils.SCALE_RFOV_IN_WFOV_BOX))
1410    elif numpy.isclose(
1411        chart_scaling,
1412        opencv_processing_utils.SCALE_TELE_IN_WFOV_BOX,
1413        atol=0.01):
1414      file_name = '%s_%sx_scaled.png' % (
1415          scene, str(opencv_processing_utils.SCALE_TELE_IN_WFOV_BOX))
1416    elif numpy.isclose(
1417        chart_scaling,
1418        opencv_processing_utils.SCALE_TELE25_IN_RFOV_BOX,
1419        atol=0.01):
1420      file_name = '%s_%sx_scaled.png' % (
1421          scene, str(opencv_processing_utils.SCALE_TELE25_IN_RFOV_BOX))
1422    elif numpy.isclose(
1423        chart_scaling,
1424        opencv_processing_utils.SCALE_TELE40_IN_RFOV_BOX,
1425        atol=0.01):
1426      file_name = '%s_%sx_scaled.png' % (
1427          scene, str(opencv_processing_utils.SCALE_TELE40_IN_RFOV_BOX))
1428    elif numpy.isclose(
1429        chart_scaling,
1430        opencv_processing_utils.SCALE_TELE_IN_RFOV_BOX,
1431        atol=0.01):
1432      file_name = '%s_%sx_scaled.png' % (
1433          scene, str(opencv_processing_utils.SCALE_TELE_IN_RFOV_BOX))
1434    else:
1435      file_name = '%s.png' % scene
1436    logging.debug('Scene to load: %s', file_name)
1437    return file_name
1438
1439  def is_stream_combination_supported(self, out_surfaces):
1440    """Query whether out_surfaces combination is supported by the camera device.
1441
1442    This function hooks up to the isSessionConfigurationSupported() camera API
1443    to query whether a particular stream combination is supported.
1444
1445    Args:
1446      out_surfaces: dict; see do_capture() for specifications on out_surfaces
1447
1448    Returns:
1449      Boolean
1450    """
1451    cmd = {}
1452    cmd['cmdName'] = 'isStreamCombinationSupported'
1453
1454    if not isinstance(out_surfaces, list):
1455      cmd['outputSurfaces'] = [out_surfaces]
1456    else:
1457      cmd['outputSurfaces'] = out_surfaces
1458    formats = [c['format'] if 'format' in c else 'yuv'
1459               for c in cmd['outputSurfaces']]
1460    formats = [s if s != 'jpg' else 'jpeg' for s in formats]
1461
1462    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1463
1464    data, _ = self.__read_response_from_socket()
1465    if data['tag'] != 'streamCombinationSupport':
1466      raise error_util.CameraItsError('Failed to query stream combination')
1467
1468    return data['strValue'] == 'supportedCombination'
1469
1470  def is_camera_privacy_mode_supported(self):
1471    """Query whether the mobile device supports camera privacy mode.
1472
1473    This function checks whether the mobile device has FEATURE_CAMERA_TOGGLE
1474    feature support, which indicates the camera device can run in privacy mode.
1475
1476    Returns:
1477      Boolean
1478    """
1479    cmd = {}
1480    cmd['cmdName'] = 'isCameraPrivacyModeSupported'
1481    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1482
1483    data, _ = self.__read_response_from_socket()
1484    if data['tag'] != 'cameraPrivacyModeSupport':
1485      raise error_util.CameraItsError('Failed to query camera privacy mode'
1486                                      ' support')
1487    return data['strValue'] == 'true'
1488
1489  def is_primary_camera(self):
1490    """Query whether the camera device is a primary rear/front camera.
1491
1492    A primary rear/front facing camera is a camera device with the lowest
1493    camera Id for that facing.
1494
1495    Returns:
1496      Boolean
1497    """
1498    cmd = {}
1499    cmd['cmdName'] = 'isPrimaryCamera'
1500    cmd['cameraId'] = self._camera_id
1501    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1502
1503    data, _ = self.__read_response_from_socket()
1504    if data['tag'] != 'primaryCamera':
1505      raise error_util.CameraItsError('Failed to query primary camera')
1506    return data['strValue'] == 'true'
1507
1508  def is_performance_class(self):
1509    """Query whether the mobile device is an R or S performance class device.
1510
1511    Returns:
1512      Boolean
1513    """
1514    cmd = {}
1515    cmd['cmdName'] = 'isPerformanceClass'
1516    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1517
1518    data, _ = self.__read_response_from_socket()
1519    if data['tag'] != 'performanceClass':
1520      raise error_util.CameraItsError('Failed to query performance class')
1521    return data['strValue'] == 'true'
1522
1523  def measure_camera_launch_ms(self):
1524    """Measure camera launch latency in millisecond, from open to first frame.
1525
1526    Returns:
1527      Camera launch latency from camera open to receipt of first frame
1528    """
1529    cmd = {}
1530    cmd['cmdName'] = 'measureCameraLaunchMs'
1531    cmd['cameraId'] = self._camera_id
1532    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1533
1534    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
1535    self.sock.settimeout(timeout)
1536    data, _ = self.__read_response_from_socket()
1537    self.sock.settimeout(self.SOCK_TIMEOUT)
1538
1539    if data['tag'] != 'cameraLaunchMs':
1540      raise error_util.CameraItsError('Failed to measure camera launch latency')
1541    return float(data['strValue'])
1542
1543  def measure_camera_1080p_jpeg_capture_ms(self):
1544    """Measure camera 1080P jpeg capture latency in milliseconds.
1545
1546    Returns:
1547      Camera jpeg capture latency in milliseconds
1548    """
1549    cmd = {}
1550    cmd['cmdName'] = 'measureCamera1080pJpegCaptureMs'
1551    cmd['cameraId'] = self._camera_id
1552    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1553
1554    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
1555    self.sock.settimeout(timeout)
1556    data, _ = self.__read_response_from_socket()
1557    self.sock.settimeout(self.SOCK_TIMEOUT)
1558
1559    if data['tag'] != 'camera1080pJpegCaptureMs':
1560      raise error_util.CameraItsError(
1561          'Failed to measure camera 1080p jpeg capture latency')
1562    return float(data['strValue'])
1563
1564
1565def parse_camera_ids(ids):
1566  """Parse the string of camera IDs into array of CameraIdCombo tuples.
1567
1568  Args:
1569   ids: List of camera ids.
1570
1571  Returns:
1572   Array of CameraIdCombo
1573  """
1574  camera_id_combo = collections.namedtuple('CameraIdCombo', ['id', 'sub_id'])
1575  id_combos = []
1576  for one_id in ids:
1577    one_combo = one_id.split(SUB_CAMERA_SEPARATOR)
1578    if len(one_combo) == 1:
1579      id_combos.append(camera_id_combo(one_combo[0], None))
1580    elif len(one_combo) == 2:
1581      id_combos.append(camera_id_combo(one_combo[0], one_combo[1]))
1582    else:
1583      raise AssertionError('Camera id parameters must be either ID or '
1584                           f'ID{SUB_CAMERA_SEPARATOR}SUB_ID')
1585  return id_combos
1586
1587
1588def _run(cmd):
1589  """Replacement for os.system, with hiding of stdout+stderr messages.
1590
1591  Args:
1592    cmd: Command to be executed in string format.
1593  """
1594  with open(os.devnull, 'wb') as devnull:
1595    subprocess.check_call(cmd.split(), stdout=devnull, stderr=subprocess.STDOUT)
1596
1597
1598def do_capture_with_latency(cam, req, sync_latency, fmt=None):
1599  """Helper function to take enough frames to allow sync latency.
1600
1601  Args:
1602    cam: camera object
1603    req: request for camera
1604    sync_latency: integer number of frames
1605    fmt: format for the capture
1606  Returns:
1607    single capture with the unsettled frames discarded
1608  """
1609  caps = cam.do_capture([req]*(sync_latency+1), fmt)
1610  return caps[-1]
1611
1612
1613def load_scene(cam, props, scene, tablet, chart_distance, lighting_check=True):
1614  """Load the scene for the camera based on the FOV.
1615
1616  Args:
1617    cam: camera object
1618    props: camera properties
1619    scene: scene to be loaded
1620    tablet: tablet to load scene on
1621    chart_distance: distance to tablet
1622    lighting_check: Boolean for lighting check enabled
1623  """
1624  if not tablet:
1625    logging.info('Manual run: no tablet to load scene on.')
1626    return
1627  # Calculate camera_fov which will determine the image to load on tablet.
1628  camera_fov = cam.calc_camera_fov(props)
1629  file_name = cam.get_file_name_to_load(chart_distance, camera_fov, scene)
1630  logging.debug('Displaying %s on the tablet', file_name)
1631  # Display the scene on the tablet depending on camera_fov
1632  tablet.adb.shell(
1633      'am start -a android.intent.action.VIEW -t image/png '
1634      f'-d file://mnt/sdcard/Download/{file_name}')
1635  time.sleep(LOAD_SCENE_DELAY_SEC)
1636  rfov_camera_in_rfov_box = (
1637      numpy.isclose(
1638          chart_distance,
1639          opencv_processing_utils.CHART_DISTANCE_RFOV, rtol=0.1) and
1640      opencv_processing_utils.FOV_THRESH_TELE <= float(camera_fov)
1641      <= opencv_processing_utils.FOV_THRESH_WFOV)
1642  wfov_camera_in_wfov_box = (
1643      numpy.isclose(
1644          chart_distance,
1645          opencv_processing_utils.CHART_DISTANCE_WFOV, rtol=0.1) and
1646      float(camera_fov) > opencv_processing_utils.FOV_THRESH_WFOV)
1647  if (rfov_camera_in_rfov_box or wfov_camera_in_wfov_box) and lighting_check:
1648    cam.do_3a()
1649    cap = cam.do_capture(
1650        capture_request_utils.auto_capture_request(), cam.CAP_YUV)
1651    y_plane, _, _ = image_processing_utils.convert_capture_to_planes(cap)
1652    validate_lighting(y_plane, scene)
1653
1654
1655def validate_lighting(y_plane, scene, state='ON'):
1656  """Validates the lighting level in scene corners based on empirical values.
1657
1658  Args:
1659    y_plane: Y plane of YUV image
1660    scene: scene name
1661    state: string 'ON' or 'OFF'
1662
1663  Returns:
1664    boolean True if lighting validated, else raise AssertionError
1665  """
1666  logging.debug('Validating lighting levels.')
1667
1668  # Test patches from each corner.
1669  for location, coordinates in _VALIDATE_LIGHTING_REGIONS.items():
1670    patch = image_processing_utils.get_image_patch(
1671        y_plane, coordinates[0], coordinates[1],
1672        _VALIDATE_LIGHTING_PATCH_W, _VALIDATE_LIGHTING_PATCH_H)
1673    y_mean = image_processing_utils.compute_image_means(patch)[0]
1674    logging.debug('%s corner Y mean: %.3f', location, y_mean)
1675    if state == 'ON':
1676      if y_mean > _VALIDATE_LIGHTING_THRESH:
1677        logging.debug('Lights ON in test rig.')
1678        return True
1679      else:
1680        image_processing_utils.write_image(
1681            y_plane, f'validate_lighting_{scene}.jpg')
1682        raise AssertionError('Lights OFF in test rig. Turn ON and retry.')
1683    elif state == 'OFF':
1684      if y_mean < _VALIDATE_LIGHTING_THRESH:
1685        logging.debug('Lights OFF in test rig.')
1686        return True
1687      else:
1688        image_processing_utils.write_image(
1689            y_plane, f'validate_lighting_{scene}.jpg')
1690        raise AssertionError('Lights ON in test rig. Turn OFF and retry.')
1691    else:
1692      raise AssertionError('Invalid lighting state string. '
1693                           "Valid strings: 'ON', 'OFF'.")
1694
1695
1696def get_build_sdk_version(device_id):
1697  """Return the int build version of the device."""
1698  cmd = 'adb -s %s shell getprop ro.build.version.sdk' % device_id
1699  try:
1700    build_sdk_version = int(subprocess.check_output(cmd.split()).rstrip())
1701    logging.debug('Build SDK version: %d', build_sdk_version)
1702  except (subprocess.CalledProcessError, ValueError) as exp_errors:
1703    raise AssertionError('No build_sdk_version.') from exp_errors
1704  return build_sdk_version
1705
1706
1707def get_first_api_level(device_id):
1708  """Return the int value for the first API level of the device."""
1709  cmd = 'adb -s %s shell getprop ro.product.first_api_level' % device_id
1710  try:
1711    first_api_level = int(subprocess.check_output(cmd.split()).rstrip())
1712    logging.debug('First API level: %d', first_api_level)
1713  except (subprocess.CalledProcessError, ValueError):
1714    logging.error('No first_api_level. Setting to build version.')
1715    first_api_level = get_build_sdk_version(device_id)
1716  return first_api_level
1717
1718
1719def get_vendor_api_level(device_id):
1720  """Return the int value for the vendor API level of the device."""
1721  cmd = 'adb -s %s shell getprop ro.vendor.api_level' % device_id
1722  try:
1723    vendor_api_level = int(subprocess.check_output(cmd.split()).rstrip())
1724    logging.debug('First vendor API level: %d', vendor_api_level)
1725  except (subprocess.CalledProcessError, ValueError):
1726    logging.error('No vendor_api_level. Setting to build version.')
1727    vendor_api_level = get_build_sdk_version(device_id)
1728  return vendor_api_level
1729
1730
1731class ItsSessionUtilsTests(unittest.TestCase):
1732  """Run a suite of unit tests on this module."""
1733
1734  _BRIGHTNESS_CHECKS = (0.0,
1735                        _VALIDATE_LIGHTING_THRESH-0.01,
1736                        _VALIDATE_LIGHTING_THRESH,
1737                        _VALIDATE_LIGHTING_THRESH+0.01,
1738                        1.0)
1739  _TEST_IMG_W = 640
1740  _TEST_IMG_H = 480
1741
1742  def _generate_test_image(self, brightness):
1743    """Creates a Y plane array with pixel values of brightness.
1744
1745    Args:
1746      brightness: float between [0.0, 1.0]
1747
1748    Returns:
1749      Y plane array with elements of value brightness
1750    """
1751    test_image = numpy.zeros((self._TEST_IMG_W, self._TEST_IMG_H, 1),
1752                             dtype=float)
1753    test_image.fill(brightness)
1754    return test_image
1755
1756  def test_validate_lighting(self):
1757    """Tests validate_lighting() works correctly."""
1758    # Run with different brightnesses to validate.
1759    for brightness in self._BRIGHTNESS_CHECKS:
1760      logging.debug('Testing validate_lighting with brightness %.1f',
1761                    brightness)
1762      test_image = self._generate_test_image(brightness)
1763      print(f'Testing brightness: {brightness}')
1764      if brightness <= _VALIDATE_LIGHTING_THRESH:
1765        self.assertRaises(
1766            AssertionError, validate_lighting, test_image, 'unittest')
1767      else:
1768        self.assertTrue(validate_lighting(test_image, 'unittest'),
1769                        f'image value {brightness} should PASS')
1770
1771
1772if __name__ == '__main__':
1773  unittest.main()
1774