1# Copyright 2013 The Android Open Source Project 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14 15import json 16import os 17import socket 18import string 19import subprocess 20import sys 21import time 22import unicodedata 23import unittest 24 25import its.error 26import numpy 27 28from collections import namedtuple 29 30 31class ItsSession(object): 32 """Controls a device over adb to run ITS scripts. 33 34 The script importing this module (on the host machine) prepares JSON 35 objects encoding CaptureRequests, specifying sets of parameters to use 36 when capturing an image using the Camera2 APIs. This class encapsulates 37 sending the requests to the device, monitoring the device's progress, and 38 copying the resultant captures back to the host machine when done. TCP 39 forwarded over adb is the transport mechanism used. 40 41 The device must have CtsVerifier.apk installed. 42 43 Attributes: 44 sock: The open socket. 45 """ 46 47 # Open a connection to localhost:<host_port>, forwarded to port 6000 on the 48 # device. <host_port> is determined at run-time to support multiple 49 # connected devices. 50 IPADDR = '127.0.0.1' 51 REMOTE_PORT = 6000 52 BUFFER_SIZE = 4096 53 54 # LOCK_PORT is used as a mutex lock to protect the list of forwarded ports 55 # among all processes. The script assumes LOCK_PORT is available and will 56 # try to use ports between CLIENT_PORT_START and 57 # CLIENT_PORT_START+MAX_NUM_PORTS-1 on host for ITS sessions. 58 CLIENT_PORT_START = 6000 59 MAX_NUM_PORTS = 100 60 LOCK_PORT = CLIENT_PORT_START + MAX_NUM_PORTS 61 62 # Seconds timeout on each socket operation. 63 SOCK_TIMEOUT = 20.0 64 # Additional timeout in seconds when ITS service is doing more complicated 65 # operations, for example: issuing warmup requests before actual capture. 66 EXTRA_SOCK_TIMEOUT = 5.0 67 68 SEC_TO_NSEC = 1000*1000*1000.0 69 70 PACKAGE = 'com.android.cts.verifier.camera.its' 71 INTENT_START = 'com.android.cts.verifier.camera.its.START' 72 ACTION_ITS_RESULT = 'com.android.cts.verifier.camera.its.ACTION_ITS_RESULT' 73 EXTRA_VERSION = 'camera.its.extra.VERSION' 74 CURRENT_ITS_VERSION = '1.0' # version number to sync with CtsVerifier 75 EXTRA_CAMERA_ID = 'camera.its.extra.CAMERA_ID' 76 EXTRA_RESULTS = 'camera.its.extra.RESULTS' 77 ITS_TEST_ACTIVITY = 'com.android.cts.verifier/.camera.its.ItsTestActivity' 78 79 # This string must be in sync with ItsService. Updated when interface 80 # between script and ItsService is changed. 81 ITS_SERVICE_VERSION = "1.0" 82 83 RESULT_PASS = 'PASS' 84 RESULT_FAIL = 'FAIL' 85 RESULT_NOT_EXECUTED = 'NOT_EXECUTED' 86 RESULT_VALUES = {RESULT_PASS, RESULT_FAIL, RESULT_NOT_EXECUTED} 87 RESULT_KEY = 'result' 88 SUMMARY_KEY = 'summary' 89 START_TIME_KEY = 'start' 90 END_TIME_KEY = 'end' 91 92 adb = "adb -d" 93 device_id = "" 94 95 CAMERA_ID_TOKENIZER = '.' 96 97 # Definitions for some of the common output format options for do_capture(). 98 # Each gets images of full resolution for each requested format. 99 CAP_RAW = {"format":"raw"} 100 CAP_DNG = {"format":"dng"} 101 CAP_YUV = {"format":"yuv"} 102 CAP_JPEG = {"format":"jpeg"} 103 CAP_RAW_YUV = [{"format":"raw"}, {"format":"yuv"}] 104 CAP_DNG_YUV = [{"format":"dng"}, {"format":"yuv"}] 105 CAP_RAW_JPEG = [{"format":"raw"}, {"format":"jpeg"}] 106 CAP_DNG_JPEG = [{"format":"dng"}, {"format":"jpeg"}] 107 CAP_YUV_JPEG = [{"format":"yuv"}, {"format":"jpeg"}] 108 CAP_RAW_YUV_JPEG = [{"format":"raw"}, {"format":"yuv"}, {"format":"jpeg"}] 109 CAP_DNG_YUV_JPEG = [{"format":"dng"}, {"format":"yuv"}, {"format":"jpeg"}] 110 111 # Predefine camera props. Save props extracted from the function, 112 # "get_camera_properties". 113 props = None 114 115 # Initialize the socket port for the host to forward requests to the device. 116 # This method assumes localhost's LOCK_PORT is available and will try to 117 # use ports between CLIENT_PORT_START and CLIENT_PORT_START+MAX_NUM_PORTS-1 118 def __init_socket_port(self): 119 NUM_RETRIES = 100 120 RETRY_WAIT_TIME_SEC = 0.05 121 122 # Bind a socket to use as mutex lock 123 socket_lock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 124 for i in range(NUM_RETRIES): 125 try: 126 socket_lock.bind((ItsSession.IPADDR, ItsSession.LOCK_PORT)) 127 break 128 except socket.error or socket.timeout: 129 if i == NUM_RETRIES - 1: 130 raise its.error.Error(self.device_id, 131 "socket lock returns error") 132 else: 133 time.sleep(RETRY_WAIT_TIME_SEC) 134 135 # Check if a port is already assigned to the device. 136 command = "adb forward --list" 137 proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE) 138 output, error = proc.communicate() 139 140 port = None 141 used_ports = [] 142 for line in output.split(os.linesep): 143 # each line should be formatted as: 144 # "<device_id> tcp:<host_port> tcp:<remote_port>" 145 forward_info = line.split() 146 if len(forward_info) >= 3 and \ 147 len(forward_info[1]) > 4 and forward_info[1][:4] == "tcp:" and \ 148 len(forward_info[2]) > 4 and forward_info[2][:4] == "tcp:": 149 local_p = int(forward_info[1][4:]) 150 remote_p = int(forward_info[2][4:]) 151 if forward_info[0] == self.device_id and \ 152 remote_p == ItsSession.REMOTE_PORT: 153 port = local_p 154 break 155 else: 156 used_ports.append(local_p) 157 158 # Find the first available port if no port is assigned to the device. 159 if port is None: 160 for p in range(ItsSession.CLIENT_PORT_START, 161 ItsSession.CLIENT_PORT_START + 162 ItsSession.MAX_NUM_PORTS): 163 if p not in used_ports: 164 # Try to run "adb forward" with the port 165 command = "%s forward tcp:%d tcp:%d" % \ 166 (self.adb, p, self.REMOTE_PORT) 167 proc = subprocess.Popen(command.split(), 168 stdout=subprocess.PIPE, 169 stderr=subprocess.PIPE) 170 output, error = proc.communicate() 171 172 # Check if there is no error 173 if error is None or error.find("error") < 0: 174 port = p 175 break 176 177 if port is None: 178 raise its.error.Error(self.device_id, " cannot find an available " + 179 "port") 180 181 # Release the socket as mutex unlock 182 socket_lock.close() 183 184 # Connect to the socket 185 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 186 self.sock.connect((self.IPADDR, port)) 187 self.sock.settimeout(self.SOCK_TIMEOUT) 188 189 # Reboot the device if needed and wait for the service to be ready for 190 # connection. 191 def __wait_for_service(self): 192 # This also includes the optional reboot handling: if the user 193 # provides a "reboot" or "reboot=N" arg, then reboot the device, 194 # waiting for N seconds (default 30) before returning. 195 for s in sys.argv[1:]: 196 if s[:6] == "reboot": 197 duration = 30 198 if len(s) > 7 and s[6] == "=": 199 duration = int(s[7:]) 200 print "Rebooting device" 201 _run("%s reboot" % (self.adb)) 202 _run("%s wait-for-device" % (self.adb)) 203 time.sleep(duration) 204 print "Reboot complete" 205 206 # Flush logcat so following code won't be misled by previous 207 # 'ItsService ready' log. 208 _run('%s logcat -c' % (self.adb)) 209 time.sleep(1) 210 211 # TODO: Figure out why "--user 0" is needed, and fix the problem. 212 _run('%s shell am force-stop --user 0 %s' % (self.adb, self.PACKAGE)) 213 _run(('%s shell am start-foreground-service --user 0 -t text/plain ' 214 '-a %s') % (self.adb, self.INTENT_START)) 215 216 # Wait until the socket is ready to accept a connection. 217 proc = subprocess.Popen( 218 self.adb.split() + ["logcat"], 219 stdout=subprocess.PIPE) 220 logcat = proc.stdout 221 while True: 222 line = logcat.readline().strip() 223 if line.find('ItsService ready') >= 0: 224 break 225 proc.kill() 226 227 def __init__(self, camera_id=None, hidden_physical_id=None): 228 self._camera_id = camera_id 229 self._hidden_physical_id = hidden_physical_id 230 231 def __enter__(self): 232 # Initialize device id and adb command. 233 self.device_id = get_device_id() 234 self.adb = "adb -s " + self.device_id 235 236 self.__wait_for_service() 237 self.__init_socket_port() 238 239 self.__close_camera() 240 self.__open_camera() 241 return self 242 243 def __exit__(self, type, value, traceback): 244 if hasattr(self, 'sock') and self.sock: 245 self.__close_camera() 246 self.sock.close() 247 return False 248 249 def __read_response_from_socket(self): 250 # Read a line (newline-terminated) string serialization of JSON object. 251 chars = [] 252 while len(chars) == 0 or chars[-1] != '\n': 253 ch = self.sock.recv(1) 254 if len(ch) == 0: 255 # Socket was probably closed; otherwise don't get empty strings 256 raise its.error.SocketError(self.device_id, 'Problem with socket on device side') 257 chars.append(ch) 258 line = ''.join(chars) 259 jobj = json.loads(line) 260 # Optionally read a binary buffer of a fixed size. 261 buf = None 262 if jobj.has_key("bufValueSize"): 263 n = jobj["bufValueSize"] 264 buf = bytearray(n) 265 view = memoryview(buf) 266 while n > 0: 267 nbytes = self.sock.recv_into(view, n) 268 view = view[nbytes:] 269 n -= nbytes 270 buf = numpy.frombuffer(buf, dtype=numpy.uint8) 271 return jobj, buf 272 273 def __open_camera(self): 274 # Get the camera ID to open if it is an argument as a single camera. 275 # This allows passing camera=# to individual tests at command line 276 # and camera=#,#,# or an no camera argv with tools/run_all_tests.py. 277 # 278 # In case the camera is a logical multi-camera, to run ITS on the 279 # hidden physical sub-camera, pass camera=[logical ID]:[physical ID] 280 # to an individual test at the command line, and same applies to multiple 281 # camera IDs for tools/run_all_tests.py: camera=#,#:#,#:#,# 282 if not self._camera_id: 283 self._camera_id = 0 284 for s in sys.argv[1:]: 285 if s[:7] == "camera=" and len(s) > 7: 286 camera_ids = s[7:].split(',') 287 camera_id_combos = parse_camera_ids(camera_ids) 288 if len(camera_id_combos) == 1: 289 self._camera_id = camera_id_combos[0].id 290 self._hidden_physical_id = camera_id_combos[0].sub_id 291 292 cmd = {"cmdName":"open", "cameraId":self._camera_id} 293 self.sock.send(json.dumps(cmd) + "\n") 294 data,_ = self.__read_response_from_socket() 295 if data['tag'] != 'cameraOpened': 296 raise its.error.Error('Invalid command response') 297 298 def __close_camera(self): 299 cmd = {"cmdName":"close"} 300 self.sock.send(json.dumps(cmd) + "\n") 301 data,_ = self.__read_response_from_socket() 302 if data['tag'] != 'cameraClosed': 303 raise its.error.Error('Invalid command response') 304 305 def do_vibrate(self, pattern): 306 """Cause the device to vibrate to a specific pattern. 307 308 Args: 309 pattern: Durations (ms) for which to turn on or off the vibrator. 310 The first value indicates the number of milliseconds to wait 311 before turning the vibrator on. The next value indicates the 312 number of milliseconds for which to keep the vibrator on 313 before turning it off. Subsequent values alternate between 314 durations in milliseconds to turn the vibrator off or to turn 315 the vibrator on. 316 317 Returns: 318 Nothing. 319 """ 320 cmd = {} 321 cmd["cmdName"] = "doVibrate" 322 cmd["pattern"] = pattern 323 self.sock.send(json.dumps(cmd) + "\n") 324 data,_ = self.__read_response_from_socket() 325 if data['tag'] != 'vibrationStarted': 326 raise its.error.Error('Invalid command response') 327 328 def set_audio_restriction(self, mode): 329 """Set the audio restriction mode for this camera device. 330 331 Args: 332 mode: the audio restriction mode. See CameraDevice.java for valid 333 value. 334 Returns: 335 Nothing. 336 """ 337 cmd = {} 338 cmd["cmdName"] = "setAudioRestriction" 339 cmd["mode"] = mode 340 self.sock.send(json.dumps(cmd) + "\n") 341 data,_ = self.__read_response_from_socket() 342 if data["tag"] != "audioRestrictionSet": 343 raise its.error.Error("Invalid command response") 344 345 def get_sensors(self): 346 """Get all sensors on the device. 347 348 Returns: 349 A Python dictionary that returns keys and booleans for each sensor. 350 """ 351 cmd = {} 352 cmd["cmdName"] = "checkSensorExistence" 353 self.sock.send(json.dumps(cmd) + "\n") 354 data,_ = self.__read_response_from_socket() 355 if data['tag'] != 'sensorExistence': 356 raise its.error.Error('Invalid command response') 357 return data['objValue'] 358 359 def start_sensor_events(self): 360 """Start collecting sensor events on the device. 361 362 See get_sensor_events for more info. 363 364 Returns: 365 Nothing. 366 """ 367 cmd = {} 368 cmd["cmdName"] = "startSensorEvents" 369 self.sock.send(json.dumps(cmd) + "\n") 370 data,_ = self.__read_response_from_socket() 371 if data['tag'] != 'sensorEventsStarted': 372 raise its.error.Error('Invalid command response') 373 374 def get_sensor_events(self): 375 """Get a trace of all sensor events on the device. 376 377 The trace starts when the start_sensor_events function is called. If 378 the test runs for a long time after this call, then the device's 379 internal memory can fill up. Calling get_sensor_events gets all events 380 from the device, and then stops the device from collecting events and 381 clears the internal buffer; to start again, the start_sensor_events 382 call must be used again. 383 384 Events from the accelerometer, compass, and gyro are returned; each 385 has a timestamp and x,y,z values. 386 387 Note that sensor events are only produced if the device isn't in its 388 standby mode (i.e.) if the screen is on. 389 390 Returns: 391 A Python dictionary with three keys ("accel", "mag", "gyro") each 392 of which maps to a list of objects containing "time","x","y","z" 393 keys. 394 """ 395 cmd = {} 396 cmd["cmdName"] = "getSensorEvents" 397 self.sock.send(json.dumps(cmd) + "\n") 398 timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT 399 self.sock.settimeout(timeout) 400 data,_ = self.__read_response_from_socket() 401 if data['tag'] != 'sensorEvents': 402 raise its.error.Error('Invalid command response') 403 self.sock.settimeout(self.SOCK_TIMEOUT) 404 return data['objValue'] 405 406 def get_camera_ids(self): 407 """Get a list of camera device Ids that can be opened. 408 409 Returns: 410 a list of camera ID string 411 """ 412 cmd = {} 413 cmd["cmdName"] = "getCameraIds" 414 self.sock.send(json.dumps(cmd) + "\n") 415 data,_ = self.__read_response_from_socket() 416 if data['tag'] != 'cameraIds': 417 raise its.error.Error('Invalid command response') 418 return data['objValue']['cameraIdArray'] 419 420 def check_its_version_compatible(self): 421 """Check the java side ItsService is compatible with current host script. 422 Raise ItsException if versions are incompatible 423 424 Returns: None 425 """ 426 cmd = {} 427 cmd["cmdName"] = "getItsVersion" 428 self.sock.send(json.dumps(cmd) + "\n") 429 data,_ = self.__read_response_from_socket() 430 if data['tag'] != 'ItsVersion': 431 raise its.error.Error('ItsService is incompatible with host python script') 432 server_version = data['strValue'] 433 if self.ITS_SERVICE_VERSION != server_version: 434 raise its.error.Error('Version mismatch ItsService(%s) vs host script(%s)' % ( 435 server_version, ITS_SERVICE_VERSION)) 436 437 def override_with_hidden_physical_camera_props(self, props): 438 """If current session is for a hidden physical camera, check that it is a valid 439 sub-camera backing the logical camera, and return the 440 characteristics of sub-camera. Otherwise, return "props" directly. 441 442 Returns: The properties of the hidden physical camera if possible 443 """ 444 if self._hidden_physical_id: 445 e_msg = 'Camera %s is not a logical multi-camera' % self._camera_id 446 assert its.caps.logical_multi_camera(props), e_msg 447 physical_ids = its.caps.logical_multi_camera_physical_ids(props) 448 e_msg = 'Camera %s is not a hidden sub-camera of camera %s' % ( 449 self._hidden_physical_id, self._camera_id) 450 assert self._hidden_physical_id in physical_ids, e_msg 451 props = self.get_camera_properties_by_id(self._hidden_physical_id) 452 return props 453 454 def get_camera_properties(self): 455 """Get the camera properties object for the device. 456 457 Returns: 458 The Python dictionary object for the CameraProperties object. 459 """ 460 cmd = {} 461 cmd["cmdName"] = "getCameraProperties" 462 self.sock.send(json.dumps(cmd) + "\n") 463 data,_ = self.__read_response_from_socket() 464 if data['tag'] != 'cameraProperties': 465 raise its.error.Error('Invalid command response') 466 self.props = data['objValue']['cameraProperties'] 467 return data['objValue']['cameraProperties'] 468 469 def get_camera_properties_by_id(self, camera_id): 470 """Get the camera properties object for device with camera_id 471 472 Args: 473 camera_id: The ID string of the camera 474 475 Returns: 476 The Python dictionary object for the CameraProperties object. Empty 477 if no such device exists. 478 479 """ 480 cmd = {} 481 cmd["cmdName"] = "getCameraPropertiesById" 482 cmd["cameraId"] = camera_id 483 self.sock.send(json.dumps(cmd) + "\n") 484 data,_ = self.__read_response_from_socket() 485 if data['tag'] != 'cameraProperties': 486 raise its.error.Error('Invalid command response') 487 return data['objValue']['cameraProperties'] 488 489 def do_3a(self, regions_ae=[[0,0,1,1,1]], 490 regions_awb=[[0,0,1,1,1]], 491 regions_af=[[0,0,1,1,1]], 492 do_ae=True, do_awb=True, do_af=True, 493 lock_ae=False, lock_awb=False, 494 get_results=False, 495 ev_comp=0, mono_camera=False): 496 """Perform a 3A operation on the device. 497 498 Triggers some or all of AE, AWB, and AF, and returns once they have 499 converged. Uses the vendor 3A that is implemented inside the HAL. 500 Note: do_awb is always enabled regardless of do_awb flag 501 502 Throws an assertion if 3A fails to converge. 503 504 Args: 505 regions_ae: List of weighted AE regions. 506 regions_awb: List of weighted AWB regions. 507 regions_af: List of weighted AF regions. 508 do_ae: Trigger AE and wait for it to converge. 509 do_awb: Wait for AWB to converge. 510 do_af: Trigger AF and wait for it to converge. 511 lock_ae: Request AE lock after convergence, and wait for it. 512 lock_awb: Request AWB lock after convergence, and wait for it. 513 get_results: Return the 3A results from this function. 514 ev_comp: An EV compensation value to use when running AE. 515 mono_camera: Boolean for monochrome camera. 516 517 Region format in args: 518 Arguments are lists of weighted regions; each weighted region is a 519 list of 5 values, [x,y,w,h, wgt], and each argument is a list of 520 these 5-value lists. The coordinates are given as normalized 521 rectangles (x,y,w,h) specifying the region. For example: 522 [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]]. 523 Weights are non-negative integers. 524 525 Returns: 526 Five values are returned if get_results is true:: 527 * AE sensitivity; None if do_ae is False 528 * AE exposure time; None if do_ae is False 529 * AWB gains (list); 530 * AWB transform (list); 531 * AF focus position; None if do_af is false 532 Otherwise, it returns five None values. 533 """ 534 print "Running vendor 3A on device" 535 cmd = {} 536 cmd["cmdName"] = "do3A" 537 cmd["regions"] = {"ae": sum(regions_ae, []), 538 "awb": sum(regions_awb, []), 539 "af": sum(regions_af, [])} 540 cmd["triggers"] = {"ae": do_ae, "af": do_af} 541 if lock_ae: 542 cmd["aeLock"] = True 543 if lock_awb: 544 cmd["awbLock"] = True 545 if ev_comp != 0: 546 cmd["evComp"] = ev_comp 547 if self._hidden_physical_id: 548 cmd["physicalId"] = self._hidden_physical_id 549 self.sock.send(json.dumps(cmd) + "\n") 550 551 # Wait for each specified 3A to converge. 552 ae_sens = None 553 ae_exp = None 554 awb_gains = None 555 awb_transform = None 556 af_dist = None 557 converged = False 558 while True: 559 data,_ = self.__read_response_from_socket() 560 vals = data['strValue'].split() 561 if data['tag'] == 'aeResult': 562 if do_ae: 563 ae_sens, ae_exp = [int(i) for i in vals] 564 elif data['tag'] == 'afResult': 565 if do_af: 566 af_dist = float(vals[0]) 567 elif data['tag'] == 'awbResult': 568 awb_gains = [float(f) for f in vals[:4]] 569 awb_transform = [float(f) for f in vals[4:]] 570 elif data['tag'] == '3aConverged': 571 converged = True 572 elif data['tag'] == '3aDone': 573 break 574 else: 575 raise its.error.Error('Invalid command response') 576 if converged and not get_results: 577 return None,None,None,None,None 578 if (do_ae and ae_sens == None or (not mono_camera and do_awb and awb_gains == None) 579 or do_af and af_dist == None or not converged): 580 581 raise its.error.Error('3A failed to converge') 582 return ae_sens, ae_exp, awb_gains, awb_transform, af_dist 583 584 def is_stream_combination_supported(self, out_surfaces): 585 """Query whether a output surfaces combination is supported by the camera device. 586 587 This function hooks up to the isSessionConfigurationSupported() camera API 588 to query whether a particular stream combination is supported. 589 590 Refer to do_capture function for specification of out_surfaces field. 591 """ 592 cmd = {} 593 cmd['cmdName'] = 'isStreamCombinationSupported' 594 595 if not isinstance(out_surfaces, list): 596 cmd['outputSurfaces'] = [out_surfaces] 597 else: 598 cmd['outputSurfaces'] = out_surfaces 599 formats = [c['format'] if 'format' in c else 'yuv' 600 for c in cmd['outputSurfaces']] 601 formats = [s if s != 'jpg' else 'jpeg' for s in formats] 602 603 self.sock.send(json.dumps(cmd) + '\n') 604 605 data,_ = self.__read_response_from_socket() 606 if data['tag'] != 'streamCombinationSupport': 607 its.error.Error('Failed to query stream combination') 608 609 return data['strValue'] == 'supportedCombination' 610 611 def do_capture(self, cap_request, 612 out_surfaces=None, reprocess_format=None, repeat_request=None): 613 """Issue capture request(s), and read back the image(s) and metadata. 614 615 The main top-level function for capturing one or more images using the 616 device. Captures a single image if cap_request is a single object, and 617 captures a burst if it is a list of objects. 618 619 The optional repeat_request field can be used to assign a repeating 620 request list ran in background for 3 seconds to warm up the capturing 621 pipeline before start capturing. The repeat_requests will be ran on a 622 640x480 YUV surface without sending any data back. The caller needs to 623 make sure the stream configuration defined by out_surfaces and 624 repeat_request are valid or do_capture may fail because device does not 625 support such stream configuration. 626 627 The out_surfaces field can specify the width(s), height(s), and 628 format(s) of the captured image. The formats may be "yuv", "jpeg", 629 "dng", "raw", "raw10", "raw12", "rawStats" or "y8". The default is a YUV420 630 frame ("yuv") corresponding to a full sensor frame. 631 632 Optionally the out_surfaces field can specify physical camera id(s) if the 633 current camera device is a logical multi-camera. The physical camera id 634 must refer to a physical camera backing this logical camera device. 635 636 Note that one or more surfaces can be specified, allowing a capture to 637 request images back in multiple formats (e.g.) raw+yuv, raw+jpeg, 638 yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the 639 default is the largest resolution available for the format of that 640 surface. At most one output surface can be specified for a given format, 641 and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations. 642 643 If reprocess_format is not None, for each request, an intermediate 644 buffer of the given reprocess_format will be captured from camera and 645 the intermediate buffer will be reprocessed to the output surfaces. The 646 following settings will be turned off when capturing the intermediate 647 buffer and will be applied when reprocessing the intermediate buffer. 648 1. android.noiseReduction.mode 649 2. android.edge.mode 650 3. android.reprocess.effectiveExposureFactor 651 652 Supported reprocess format are "yuv" and "private". Supported output 653 surface formats when reprocessing is enabled are "yuv" and "jpeg". 654 655 Example of a single capture request: 656 657 { 658 "android.sensor.exposureTime": 100*1000*1000, 659 "android.sensor.sensitivity": 100 660 } 661 662 Example of a list of capture requests: 663 664 [ 665 { 666 "android.sensor.exposureTime": 100*1000*1000, 667 "android.sensor.sensitivity": 100 668 }, 669 { 670 "android.sensor.exposureTime": 100*1000*1000, 671 "android.sensor.sensitivity": 200 672 } 673 ] 674 675 Examples of output surface specifications: 676 677 { 678 "width": 640, 679 "height": 480, 680 "format": "yuv" 681 } 682 683 [ 684 { 685 "format": "jpeg" 686 }, 687 { 688 "format": "raw" 689 } 690 ] 691 692 The following variables defined in this class are shortcuts for 693 specifying one or more formats where each output is the full size for 694 that format; they can be used as values for the out_surfaces arguments: 695 696 CAP_RAW 697 CAP_DNG 698 CAP_YUV 699 CAP_JPEG 700 CAP_RAW_YUV 701 CAP_DNG_YUV 702 CAP_RAW_JPEG 703 CAP_DNG_JPEG 704 CAP_YUV_JPEG 705 CAP_RAW_YUV_JPEG 706 CAP_DNG_YUV_JPEG 707 708 If multiple formats are specified, then this function returns multiple 709 capture objects, one for each requested format. If multiple formats and 710 multiple captures (i.e. a burst) are specified, then this function 711 returns multiple lists of capture objects. In both cases, the order of 712 the returned objects matches the order of the requested formats in the 713 out_surfaces parameter. For example: 714 715 yuv_cap = do_capture( req1 ) 716 yuv_cap = do_capture( req1, yuv_fmt ) 717 yuv_cap, raw_cap = do_capture( req1, [yuv_fmt,raw_fmt] ) 718 yuv_caps = do_capture( [req1,req2], yuv_fmt ) 719 yuv_caps, raw_caps = do_capture( [req1,req2], [yuv_fmt,raw_fmt] ) 720 721 The "rawStats" format processes the raw image and returns a new image 722 of statistics from the raw image. The format takes additional keys, 723 "gridWidth" and "gridHeight" which are size of grid cells in a 2D grid 724 of the raw image. For each grid cell, the mean and variance of each raw 725 channel is computed, and the do_capture call returns two 4-element float 726 images of dimensions (rawWidth / gridWidth, rawHeight / gridHeight), 727 concatenated back-to-back, where the first iamge contains the 4-channel 728 means and the second contains the 4-channel variances. Note that only 729 pixels in the active array crop region are used; pixels outside this 730 region (for example optical black rows) are cropped out before the 731 gridding and statistics computation is performed. 732 733 For the rawStats format, if the gridWidth is not provided then the raw 734 image width is used as the default, and similarly for gridHeight. With 735 this, the following is an example of a output description that computes 736 the mean and variance across each image row: 737 738 { 739 "gridHeight": 1, 740 "format": "rawStats" 741 } 742 743 Args: 744 cap_request: The Python dict/list specifying the capture(s), which 745 will be converted to JSON and sent to the device. 746 out_surfaces: (Optional) specifications of the output image formats 747 and sizes to use for each capture. 748 reprocess_format: (Optional) The reprocessing format. If not None, 749 reprocessing will be enabled. 750 751 Returns: 752 An object, list of objects, or list of lists of objects, where each 753 object contains the following fields: 754 * data: the image data as a numpy array of bytes. 755 * width: the width of the captured image. 756 * height: the height of the captured image. 757 * format: image the format, in [ 758 "yuv","jpeg","raw","raw10","raw12","rawStats","dng"]. 759 * metadata: the capture result object (Python dictionary). 760 """ 761 cmd = {} 762 if reprocess_format != None: 763 cmd["cmdName"] = "doReprocessCapture" 764 cmd["reprocessFormat"] = reprocess_format 765 else: 766 cmd["cmdName"] = "doCapture" 767 768 if repeat_request is not None and reprocess_format is not None: 769 raise its.error.Error('repeating request + reprocessing is not supported') 770 771 if repeat_request is None: 772 cmd["repeatRequests"] = [] 773 elif not isinstance(repeat_request, list): 774 cmd["repeatRequests"] = [repeat_request] 775 else: 776 cmd["repeatRequests"] = repeat_request 777 778 if not isinstance(cap_request, list): 779 cmd["captureRequests"] = [cap_request] 780 else: 781 cmd["captureRequests"] = cap_request 782 if out_surfaces is not None: 783 if not isinstance(out_surfaces, list): 784 cmd["outputSurfaces"] = [out_surfaces] 785 else: 786 cmd["outputSurfaces"] = out_surfaces 787 formats = [c["format"] if "format" in c else "yuv" 788 for c in cmd["outputSurfaces"]] 789 formats = [s if s != "jpg" else "jpeg" for s in formats] 790 else: 791 max_yuv_size = its.objects.get_available_output_sizes( 792 "yuv", self.props)[0] 793 formats = ['yuv'] 794 cmd["outputSurfaces"] = [{"format": "yuv", 795 "width" : max_yuv_size[0], 796 "height": max_yuv_size[1]}] 797 798 ncap = len(cmd["captureRequests"]) 799 nsurf = 1 if out_surfaces is None else len(cmd["outputSurfaces"]) 800 801 cam_ids = [] 802 bufs = {} 803 yuv_bufs = {} 804 for i,s in enumerate(cmd["outputSurfaces"]): 805 if self._hidden_physical_id: 806 s['physicalCamera'] = self._hidden_physical_id 807 808 if 'physicalCamera' in s: 809 cam_id = s['physicalCamera'] 810 else: 811 cam_id = self._camera_id 812 813 if cam_id not in cam_ids: 814 cam_ids.append(cam_id) 815 bufs[cam_id] = {"raw":[], "raw10":[], "raw12":[], 816 "rawStats":[], "dng":[], "jpeg":[], "y8":[]} 817 818 for cam_id in cam_ids: 819 # Only allow yuv output to multiple targets 820 if cam_id == self._camera_id: 821 yuv_surfaces = [s for s in cmd["outputSurfaces"] if s["format"]=="yuv"\ 822 and "physicalCamera" not in s] 823 formats_for_id = [s["format"] for s in cmd["outputSurfaces"] if \ 824 "physicalCamera" not in s] 825 else: 826 yuv_surfaces = [s for s in cmd["outputSurfaces"] if s["format"]=="yuv"\ 827 and "physicalCamera" in s and s["physicalCamera"] == cam_id] 828 formats_for_id = [s["format"] for s in cmd["outputSurfaces"] if \ 829 "physicalCamera" in s and s["physicalCamera"] == cam_id] 830 831 n_yuv = len(yuv_surfaces) 832 # Compute the buffer size of YUV targets 833 yuv_maxsize_1d = 0 834 for s in yuv_surfaces: 835 if not ("width" in s and "height" in s): 836 if self.props is None: 837 raise its.error.Error('Camera props are unavailable') 838 yuv_maxsize_2d = its.objects.get_available_output_sizes( 839 "yuv", self.props)[0] 840 yuv_maxsize_1d = yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3 / 2 841 break 842 yuv_sizes = [c["width"]*c["height"]*3/2 843 if "width" in c and "height" in c 844 else yuv_maxsize_1d 845 for c in yuv_surfaces] 846 # Currently we don't pass enough metadta from ItsService to distinguish 847 # different yuv stream of same buffer size 848 if len(yuv_sizes) != len(set(yuv_sizes)): 849 raise its.error.Error( 850 'ITS does not support yuv outputs of same buffer size') 851 if len(formats_for_id) > len(set(formats_for_id)): 852 if n_yuv != len(formats_for_id) - len(set(formats_for_id)) + 1: 853 raise its.error.Error('Duplicate format requested') 854 855 yuv_bufs[cam_id] = {size:[] for size in yuv_sizes} 856 857 raw_formats = 0; 858 raw_formats += 1 if "dng" in formats else 0 859 raw_formats += 1 if "raw" in formats else 0 860 raw_formats += 1 if "raw10" in formats else 0 861 raw_formats += 1 if "raw12" in formats else 0 862 raw_formats += 1 if "rawStats" in formats else 0 863 if raw_formats > 1: 864 raise its.error.Error('Different raw formats not supported') 865 866 # Detect long exposure time and set timeout accordingly 867 longest_exp_time = 0 868 for req in cmd["captureRequests"]: 869 if "android.sensor.exposureTime" in req and \ 870 req["android.sensor.exposureTime"] > longest_exp_time: 871 longest_exp_time = req["android.sensor.exposureTime"] 872 873 extended_timeout = longest_exp_time / self.SEC_TO_NSEC + \ 874 self.SOCK_TIMEOUT 875 if repeat_request: 876 extended_timeout += self.EXTRA_SOCK_TIMEOUT 877 self.sock.settimeout(extended_timeout) 878 879 print "Capturing %d frame%s with %d format%s [%s]" % ( 880 ncap, "s" if ncap>1 else "", nsurf, "s" if nsurf>1 else "", 881 ",".join(formats)) 882 self.sock.send(json.dumps(cmd) + "\n") 883 884 # Wait for ncap*nsurf images and ncap metadata responses. 885 # Assume that captures come out in the same order as requested in 886 # the burst, however individual images of different formats can come 887 # out in any order for that capture. 888 nbufs = 0 889 mds = [] 890 physical_mds = [] 891 widths = None 892 heights = None 893 while nbufs < ncap*nsurf or len(mds) < ncap: 894 jsonObj,buf = self.__read_response_from_socket() 895 if jsonObj['tag'] in ['jpegImage', 'rawImage', \ 896 'raw10Image', 'raw12Image', 'rawStatsImage', 'dngImage', 'y8Image'] \ 897 and buf is not None: 898 fmt = jsonObj['tag'][:-5] 899 bufs[self._camera_id][fmt].append(buf) 900 nbufs += 1 901 elif jsonObj['tag'] == 'yuvImage': 902 buf_size = numpy.product(buf.shape) 903 yuv_bufs[self._camera_id][buf_size].append(buf) 904 nbufs += 1 905 elif jsonObj['tag'] == 'captureResults': 906 mds.append(jsonObj['objValue']['captureResult']) 907 physical_mds.append(jsonObj['objValue']['physicalResults']) 908 outputs = jsonObj['objValue']['outputs'] 909 widths = [out['width'] for out in outputs] 910 heights = [out['height'] for out in outputs] 911 else: 912 tagString = unicodedata.normalize('NFKD', jsonObj['tag']).encode('ascii', 'ignore'); 913 for x in ['jpegImage', 'rawImage', \ 914 'raw10Image', 'raw12Image', 'rawStatsImage', 'yuvImage']: 915 if tagString.startswith(x): 916 if x == 'yuvImage': 917 physicalId = jsonObj['tag'][len(x):] 918 if physicalId in cam_ids: 919 buf_size = numpy.product(buf.shape) 920 yuv_bufs[physicalId][buf_size].append(buf) 921 nbufs += 1 922 else: 923 physicalId = jsonObj['tag'][len(x):] 924 if physicalId in cam_ids: 925 fmt = x[:-5] 926 bufs[physicalId][fmt].append(buf) 927 nbufs += 1 928 rets = [] 929 for j,fmt in enumerate(formats): 930 objs = [] 931 if "physicalCamera" in cmd["outputSurfaces"][j]: 932 cam_id = cmd["outputSurfaces"][j]["physicalCamera"] 933 else: 934 cam_id = self._camera_id 935 936 for i in range(ncap): 937 obj = {} 938 obj["width"] = widths[j] 939 obj["height"] = heights[j] 940 obj["format"] = fmt 941 if cam_id == self._camera_id: 942 obj["metadata"] = mds[i] 943 else: 944 for physical_md in physical_mds[i]: 945 if cam_id in physical_md: 946 obj["metadata"] = physical_md[cam_id] 947 break 948 949 if fmt == "yuv": 950 buf_size = widths[j] * heights[j] * 3 / 2 951 obj["data"] = yuv_bufs[cam_id][buf_size][i] 952 else: 953 obj["data"] = bufs[cam_id][fmt][i] 954 objs.append(obj) 955 rets.append(objs if ncap > 1 else objs[0]) 956 self.sock.settimeout(self.SOCK_TIMEOUT) 957 if len(rets) > 1 or (isinstance(rets[0], dict) and 958 isinstance(cap_request, list)): 959 return rets 960 else: 961 return rets[0] 962 963def do_capture_with_latency(cam, req, sync_latency, fmt=None): 964 """Helper function to take enough frames with do_capture to allow sync latency. 965 966 Args: 967 cam: camera object 968 req: request for camera 969 sync_latency: integer number of frames 970 fmt: format for the capture 971 Returns: 972 single capture with the unsettled frames discarded 973 """ 974 caps = cam.do_capture([req]*(sync_latency+1), fmt) 975 return caps[-1] 976 977 978def get_device_id(): 979 """Return the ID of the device that the test is running on. 980 981 Return the device ID provided in the command line if it's connected. If no 982 device ID is provided in the command line and there is only one device 983 connected, return the device ID by parsing the result of "adb devices". 984 Also, if the environment variable ANDROID_SERIAL is set, use it as device 985 id. When both ANDROID_SERIAL and device argument present, device argument 986 takes priority. 987 988 Raise an exception if no device is connected; or the device ID provided in 989 the command line is not connected; or no device ID is provided in the 990 command line or environment variable and there are more than 1 device 991 connected. 992 993 Returns: 994 Device ID string. 995 """ 996 device_id = None 997 998 # Check if device id is set in env 999 if "ANDROID_SERIAL" in os.environ: 1000 device_id = os.environ["ANDROID_SERIAL"] 1001 1002 for s in sys.argv[1:]: 1003 if s[:7] == "device=" and len(s) > 7: 1004 device_id = str(s[7:]) 1005 1006 # Get a list of connected devices 1007 devices = [] 1008 command = "adb devices" 1009 proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE) 1010 output, error = proc.communicate() 1011 for line in output.split(os.linesep): 1012 device_info = line.split() 1013 if len(device_info) == 2 and device_info[1] == "device": 1014 devices.append(device_info[0]) 1015 1016 if len(devices) == 0: 1017 raise its.error.Error("No device is connected!") 1018 elif device_id is not None and device_id not in devices: 1019 raise its.error.Error(device_id + " is not connected!") 1020 elif device_id is None and len(devices) >= 2: 1021 raise its.error.Error("More than 1 device are connected. " + 1022 "Use device=<device_id> to specify a device to test.") 1023 elif len(devices) == 1: 1024 device_id = devices[0] 1025 1026 return device_id 1027 1028def report_result(device_id, camera_id, results): 1029 """Send a pass/fail result to the device, via an intent. 1030 1031 Args: 1032 device_id: The ID string of the device to report the results to. 1033 camera_id: The ID string of the camera for which to report pass/fail. 1034 results: a dictionary contains all ITS scenes as key and result/summary 1035 of current ITS run. See test_report_result unit test for 1036 an example. 1037 Returns: 1038 Nothing. 1039 """ 1040 ACTIVITY_START_WAIT = 1.5 # seconds 1041 adb = "adb -s " + device_id 1042 1043 # Start ItsTestActivity to receive test results 1044 cmd = "%s shell am start %s --activity-brought-to-front" % (adb, ItsSession.ITS_TEST_ACTIVITY) 1045 _run(cmd) 1046 time.sleep(ACTIVITY_START_WAIT) 1047 1048 # Validate/process results argument 1049 for scene in results: 1050 result_key = ItsSession.RESULT_KEY 1051 summary_key = ItsSession.SUMMARY_KEY 1052 if result_key not in results[scene]: 1053 raise its.error.Error('ITS result not found for ' + scene) 1054 if results[scene][result_key] not in ItsSession.RESULT_VALUES: 1055 raise its.error.Error('Unknown ITS result for %s: %s' % ( 1056 scene, results[result_key])) 1057 if summary_key in results[scene]: 1058 device_summary_path = "/sdcard/its_camera%s_%s.txt" % ( 1059 camera_id, scene) 1060 _run("%s push %s %s" % ( 1061 adb, results[scene][summary_key], device_summary_path)) 1062 results[scene][summary_key] = device_summary_path 1063 1064 json_results = json.dumps(results) 1065 cmd = "%s shell am broadcast -a %s --es %s %s --es %s %s --es %s \'%s\'" % ( 1066 adb, ItsSession.ACTION_ITS_RESULT, 1067 ItsSession.EXTRA_VERSION, ItsSession.CURRENT_ITS_VERSION, 1068 ItsSession.EXTRA_CAMERA_ID, camera_id, 1069 ItsSession.EXTRA_RESULTS, json_results) 1070 if len(cmd) > 4095: 1071 print "ITS command string might be too long! len:", len(cmd) 1072 _run(cmd) 1073 1074def adb_log(device_id, msg): 1075 """Send a log message to adb logcat 1076 1077 Args: 1078 device_id: The ID string of the adb device 1079 msg: the message string to be send to logcat 1080 1081 Returns: 1082 Nothing. 1083 """ 1084 adb = "adb -s " + device_id 1085 cmd = "%s shell log -p i -t \"ItsTestHost\" %s" % (adb, msg) 1086 _run(cmd) 1087 1088def get_device_fingerprint(device_id): 1089 """ Return the Build FingerPrint of the device that the test is running on. 1090 1091 Returns: 1092 Device Build Fingerprint string. 1093 """ 1094 device_bfp = None 1095 1096 # Get a list of connected devices 1097 1098 com = ('adb -s %s shell getprop | grep ro.build.fingerprint' % device_id) 1099 proc = subprocess.Popen(com.split(), stdout=subprocess.PIPE) 1100 output, error = proc.communicate() 1101 assert error is None 1102 1103 lst = string.split( \ 1104 string.replace( \ 1105 string.replace( \ 1106 string.replace(output, 1107 '\n', ''), '[', ''), ']', ''), \ 1108 ' ') 1109 1110 if lst[0].find('ro.build.fingerprint') != -1: 1111 device_bfp = lst[1] 1112 1113 return device_bfp 1114 1115def parse_camera_ids(ids): 1116 """Parse the string of camera IDs into array of CameraIdCombo tuples. 1117 """ 1118 CameraIdCombo = namedtuple('CameraIdCombo', ['id', 'sub_id']) 1119 id_combos = [] 1120 for one_id in ids: 1121 one_combo = one_id.split(ItsSession.CAMERA_ID_TOKENIZER) 1122 if len(one_combo) == 1: 1123 id_combos.append(CameraIdCombo(one_combo[0], None)) 1124 elif len(one_combo) == 2: 1125 id_combos.append(CameraIdCombo(one_combo[0], one_combo[1])) 1126 else: 1127 assert(False), 'Camera id parameters must be either ID, or ID:SUB_ID' 1128 return id_combos 1129 1130 1131def get_build_sdk_version(device_id=None): 1132 """Get the build version of the device.""" 1133 if not device_id: 1134 device_id = get_device_id() 1135 cmd = 'adb -s %s shell getprop ro.build.version.sdk' % device_id 1136 try: 1137 build_sdk_version = int(subprocess.check_output(cmd.split()).rstrip()) 1138 print 'Build SDK version: %d' % build_sdk_version 1139 except (subprocess.CalledProcessError, ValueError): 1140 print 'No build_sdk_version.' 1141 assert 0 1142 return build_sdk_version 1143 1144 1145def get_first_api_level(device_id=None): 1146 """Get the first API level for device.""" 1147 if not device_id: 1148 device_id = get_device_id() 1149 cmd = 'adb -s %s shell getprop ro.product.first_api_level' % device_id 1150 try: 1151 first_api_level = int(subprocess.check_output(cmd.split()).rstrip()) 1152 print 'First API level: %d' % first_api_level 1153 except (subprocess.CalledProcessError, ValueError): 1154 print 'No first_api_level. Setting to build version.' 1155 first_api_level = get_build_sdk_version(device_id) 1156 return first_api_level 1157 1158 1159def _run(cmd): 1160 """Replacement for os.system, with hiding of stdout+stderr messages. 1161 """ 1162 with open(os.devnull, 'wb') as devnull: 1163 subprocess.check_call( 1164 cmd.split(), stdout=devnull, stderr=subprocess.STDOUT) 1165 1166 1167class __UnitTest(unittest.TestCase): 1168 """Run a suite of unit tests on this module. 1169 """ 1170 1171 """ 1172 # TODO: this test currently needs connected device to pass 1173 # Need to remove that dependency before enabling the test 1174 def test_report_result(self): 1175 device_id = get_device_id() 1176 camera_id = "1" 1177 result_key = ItsSession.RESULT_KEY 1178 results = {"scene0":{result_key:"PASS"}, 1179 "scene1":{result_key:"PASS"}, 1180 "scene2":{result_key:"PASS"}, 1181 "scene3":{result_key:"PASS"}, 1182 "sceneNotExist":{result_key:"FAIL"}} 1183 report_result(device_id, camera_id, results) 1184 """ 1185 1186if __name__ == '__main__': 1187 unittest.main() 1188 1189