• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2018 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Verify multi-camera alignment using internal parameters."""
15
16
17import logging
18import math
19import os.path
20
21import cv2
22from mobly import test_runner
23import numpy as np
24
25import its_base_test
26import camera_properties_utils
27import capture_request_utils
28import image_processing_utils
29import its_session_utils
30import opencv_processing_utils
31
32_ALIGN_TOL_MM = 5.0  # mm
33_ALIGN_TOL = 0.01  # multiplied by sensor diagonal to convert to pixels
34_CHART_DISTANCE_RTOL = 0.1
35_CIRCLE_COLOR = 0  # [0: black, 255: white]
36_CIRCLE_MIN_AREA = 0.005  # multiplied by image size
37_CIRCLE_RTOL = 0.1  # 10%
38_CM_TO_M = 1E-2
39_FMT_CODE_RAW = 0x20
40_FMT_CODE_YUV = 0x23
41_LENS_FACING_BACK = 1  # 0: FRONT, 1: BACK, 2: EXTERNAL
42_M_TO_MM = 1E3
43_MM_TO_UM = 1E3
44_NAME = os.path.splitext(os.path.basename(__file__))[0]
45_REFERENCE_GYRO = 1
46_REFERENCE_UNDEFINED = 2
47_TEST_REQUIRED_MPC = 33
48_TRANS_MATRIX_REF = np.array([0, 0, 0])  # translation matrix for ref cam is 000
49
50
51def convert_cap_and_prep_img(cap, props, fmt, img_name, debug):
52  """Convert the capture to an RGB image and prep image.
53
54  Args:
55    cap: capture element
56    props: dict of capture properties
57    fmt: capture format ('raw' or 'yuv')
58    img_name: name to save image as
59    debug: boolean for debug mode
60
61  Returns:
62    img uint8 numpy array
63  """
64
65  img = image_processing_utils.convert_capture_to_rgb_image(cap, props=props)
66
67  # save images if debug
68  if debug:
69    image_processing_utils.write_image(img, img_name)
70
71  # convert [0, 1] image to [0, 255] and cast as uint8
72  img = image_processing_utils.convert_image_to_uint8(img)
73
74  # scale to match calibration data if RAW
75  if fmt == 'raw':
76    img = cv2.resize(img, None, fx=2, fy=2)
77
78  return img
79
80
81def calc_pixel_size(props):
82  ar = props['android.sensor.info.pixelArraySize']
83  sensor_size = props['android.sensor.info.physicalSize']
84  pixel_size_w = sensor_size['width'] / ar['width']
85  pixel_size_h = sensor_size['height'] / ar['height']
86  logging.debug('pixel size(um): %.2f x %.2f',
87                pixel_size_w * _MM_TO_UM, pixel_size_h * _MM_TO_UM)
88  return (pixel_size_w + pixel_size_h) / 2 * _MM_TO_UM
89
90
91def select_ids_to_test(ids, props, chart_distance):
92  """Determine the best 2 cameras to test for the rig used.
93
94  Cameras are pre-filtered to only include supportable cameras.
95  Supportable cameras are: YUV(RGB), RAW(Bayer)
96
97  Args:
98    ids: unicode string; physical camera ids
99    props: dict; physical camera properties dictionary
100    chart_distance: float; distance to chart in meters
101  Returns:
102    test_ids to be tested
103  """
104  chart_distance = abs(chart_distance)*100  # convert M to CM
105  test_ids = []
106  for i in ids:
107    sensor_size = props[i]['android.sensor.info.physicalSize']
108    focal_l = props[i]['android.lens.info.availableFocalLengths'][0]
109    diag = math.sqrt(sensor_size['height'] ** 2 + sensor_size['width'] ** 2)
110    fov = round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2)
111    logging.debug('Camera: %s, FoV: %.2f, chart_distance: %.1fcm', i, fov,
112                  chart_distance)
113    # determine best combo with rig used or recommend different rig
114    if (opencv_processing_utils.FOV_THRESH_TELE < fov <
115        opencv_processing_utils.FOV_THRESH_WFOV):
116      test_ids.append(i)  # RFoV camera
117    elif fov < opencv_processing_utils.FOV_THRESH_TELE40:
118      logging.debug('Skipping camera. Not appropriate multi-camera testing.')
119      continue  # super-TELE camera
120    elif (fov <= opencv_processing_utils.FOV_THRESH_TELE and
121          math.isclose(chart_distance,
122                       opencv_processing_utils.CHART_DISTANCE_RFOV,
123                       rel_tol=_CHART_DISTANCE_RTOL)):
124      test_ids.append(i)  # TELE camera in RFoV rig
125    elif (fov >= opencv_processing_utils.FOV_THRESH_WFOV and
126          math.isclose(chart_distance,
127                       opencv_processing_utils.CHART_DISTANCE_WFOV,
128                       rel_tol=_CHART_DISTANCE_RTOL)):
129      test_ids.append(i)  # WFoV camera in WFoV rig
130    else:
131      logging.debug('Skipping camera. Not appropriate for test rig.')
132
133  if len(test_ids) < 2:
134    raise AssertionError('Error: started with 2+ cameras, reduced to <2 based '
135                         f'on FoVs. Wrong test rig? test_ids: {test_ids}')
136  return test_ids[0:2]
137
138
139def determine_valid_out_surfaces(cam, props, fmt, cap_camera_ids, sizes):
140  """Determine a valid output surfaces for captures.
141
142  Args:
143    cam:                obj; camera object
144    props:              dict; props for the physical cameras
145    fmt:                str; capture format ('yuv' or 'raw')
146    cap_camera_ids:     list; camera capture ids
147    sizes:              dict; valid physical sizes for the cap_camera_ids
148
149  Returns:
150    valid out_surfaces
151  """
152  valid_stream_combo = False
153
154  # try simultaneous capture
155  w, h = capture_request_utils.get_available_output_sizes('yuv', props)[0]
156  out_surfaces = [{'format': 'yuv', 'width': w, 'height': h},
157                  {'format': fmt, 'physicalCamera': cap_camera_ids[0],
158                   'width': sizes[cap_camera_ids[0]][0],
159                   'height': sizes[cap_camera_ids[0]][1]},
160                  {'format': fmt, 'physicalCamera': cap_camera_ids[1],
161                   'width': sizes[cap_camera_ids[1]][0],
162                   'height': sizes[cap_camera_ids[1]][1]},]
163  valid_stream_combo = cam.is_stream_combination_supported(out_surfaces)
164
165  # try each camera individually
166  if not valid_stream_combo:
167    out_surfaces = []
168    for cap_id in cap_camera_ids:
169      out_surface = {'format': fmt, 'physicalCamera': cap_id,
170                     'width': sizes[cap_id][0],
171                     'height': sizes[cap_id][1]}
172      valid_stream_combo = cam.is_stream_combination_supported(out_surface)
173      if valid_stream_combo:
174        out_surfaces.append(out_surface)
175      else:
176        camera_properties_utils.skip_unless(valid_stream_combo)
177
178  return out_surfaces
179
180
181def take_images(cam, caps, props, fmt, cap_camera_ids, out_surfaces,
182                name_with_log_path, debug):
183  """Do image captures.
184
185  Args:
186    cam: obj; camera object
187    caps: dict; capture results indexed by (fmt, id)
188    props: dict; props for the physical cameras
189    fmt: str; capture format ('yuv' or 'raw')
190    cap_camera_ids: list; camera capture ids
191    out_surfaces: list; valid output surfaces for caps
192    name_with_log_path: str; file name with location to save files
193    debug: bool; determine if debug mode or not.
194
195  Returns:
196    caps: dict; capture information indexed by (fmt, cap_id)
197  """
198
199  logging.debug('out_surfaces: %s', str(out_surfaces))
200  if len(out_surfaces) == 3:  # do simultaneous capture
201    # Do 3A without getting the values
202    cam.do_3a(lock_ae=True, lock_awb=True)
203    req = capture_request_utils.auto_capture_request(props=props, do_af=True)
204    _, caps[(fmt,
205             cap_camera_ids[0])], caps[(fmt,
206                                        cap_camera_ids[1])] = cam.do_capture(
207                                            req, out_surfaces)
208
209  else:  # step through cameras individually
210    for i, out_surface in enumerate(out_surfaces):
211      # Do 3A without getting the values
212      cam.do_3a(lock_ae=True, lock_awb=True)
213      req = capture_request_utils.auto_capture_request(props=props, do_af=True)
214      caps[(fmt, cap_camera_ids[i])] = cam.do_capture(req, out_surface)
215
216  # save images if debug
217  if debug:
218    for i in [0, 1]:
219      img = image_processing_utils.convert_capture_to_rgb_image(
220          caps[(fmt, cap_camera_ids[i])], props=props[cap_camera_ids[i]])
221      image_processing_utils.write_image(
222          img, f'{name_with_log_path}_{fmt}_{cap_camera_ids[i]}.jpg')
223
224  return caps
225
226
227def undo_zoom(cap, circle):
228  """Correct coordinates and size of circle for zoom.
229
230  Assume that the maximum physical YUV image size is close to active array size.
231
232  Args:
233    cap: camera capture element
234    circle: dict of circle values
235  Returns:
236    unzoomed circle dict
237  """
238  yuv_w = cap['width']
239  yuv_h = cap['height']
240  logging.debug('cap size: %d x %d', yuv_w, yuv_h)
241  cr = cap['metadata']['android.scaler.cropRegion']
242  cr_w = cr['right'] - cr['left']
243  cr_h = cr['bottom'] - cr['top']
244
245  # Offset due to aspect ratio difference of crop region and yuv
246  # - fit yuv box inside of differently shaped cr box
247  yuv_aspect = yuv_w / yuv_h
248  relative_aspect = yuv_aspect / (cr_w/cr_h)
249  if relative_aspect > 1:
250    zoom_ratio = yuv_w / cr_w
251    yuv_x = 0
252    yuv_y = (cr_h - cr_w / yuv_aspect) / 2
253  else:
254    zoom_ratio = yuv_h / cr_h
255    yuv_x = (cr_w - cr_h * yuv_aspect) / 2
256    yuv_y = 0
257
258  circle['x'] = cr['left'] + yuv_x + circle['x'] / zoom_ratio
259  circle['y'] = cr['top'] + yuv_y + circle['y'] / zoom_ratio
260  circle['r'] = circle['r'] / zoom_ratio
261
262  logging.debug(' Calculated zoom ratio: %.3f', zoom_ratio)
263  logging.debug(' Corrected circle X: %.2f', circle['x'])
264  logging.debug(' Corrected circle Y: %.2f', circle['y'])
265  logging.debug(' Corrected circle radius: %.2f', circle['r'])
266
267  return circle
268
269
270def convert_to_world_coordinates(x, y, r, t, k, z_w):
271  """Convert x,y coordinates to world coordinates.
272
273  Conversion equation is:
274  A = [[x*r[2][0] - dot(k_row0, r_col0), x*r_[2][1] - dot(k_row0, r_col1)],
275       [y*r[2][0] - dot(k_row1, r_col0), y*r_[2][1] - dot(k_row1, r_col1)]]
276  b = [[z_w*dot(k_row0, r_col2) + dot(k_row0, t) - x*(r[2][2]*z_w + t[2])],
277       [z_w*dot(k_row1, r_col2) + dot(k_row1, t) - y*(r[2][2]*z_w + t[2])]]
278
279  [[x_w], [y_w]] = inv(A) * b
280
281  Args:
282    x: x location in pixel space
283    y: y location in pixel space
284    r: rotation matrix
285    t: translation matrix
286    k: intrinsic matrix
287    z_w: z distance in world space
288
289  Returns:
290    x_w:    x in meters in world space
291    y_w:    y in meters in world space
292  """
293  c_1 = r[2, 2] * z_w + t[2]
294  k_x1 = np.dot(k[0, :], r[:, 0])
295  k_x2 = np.dot(k[0, :], r[:, 1])
296  k_x3 = z_w * np.dot(k[0, :], r[:, 2]) + np.dot(k[0, :], t)
297  k_y1 = np.dot(k[1, :], r[:, 0])
298  k_y2 = np.dot(k[1, :], r[:, 1])
299  k_y3 = z_w * np.dot(k[1, :], r[:, 2]) + np.dot(k[1, :], t)
300
301  a = np.array([[x*r[2][0]-k_x1, x*r[2][1]-k_x2],
302                [y*r[2][0]-k_y1, y*r[2][1]-k_y2]])
303  b = np.array([[k_x3-x*c_1], [k_y3-y*c_1]])
304  return (float(x) for x in np.dot(np.linalg.inv(a), b))
305
306
307def convert_to_image_coordinates(p_w, r, t, k):
308  p_c = np.dot(r, p_w) + t
309  p_h = np.dot(k, p_c)
310  return p_h[0] / p_h[2], p_h[1] / p_h[2]
311
312
313def define_reference_camera(pose_reference, cam_reference):
314  """Determine the reference camera.
315
316  Args:
317    pose_reference: 0 for cameras, 1 for gyro
318    cam_reference: dict with key of physical camera and value True/False
319  Returns:
320    i_ref: physical id of reference camera
321    i_2nd: physical id of secondary camera
322  """
323
324  if pose_reference == _REFERENCE_GYRO:
325    logging.debug('pose_reference is GYRO')
326    keys = list(cam_reference.keys())
327    i_ref = keys[0]  # pick first camera as ref
328    i_2nd = keys[1]
329  else:
330    logging.debug('pose_reference is CAMERA')
331    i_refs = [k for (k, v) in cam_reference.items() if v]
332    i_ref = i_refs[0]
333    if len(i_refs) > 1:
334      logging.debug('Warning: more than 1 reference camera. Check translation '
335                    'matrices. cam_reference: %s', str(cam_reference))
336      i_2nd = i_refs[1]  # use second camera since both at same location
337    else:
338      i_2nd = next(k for (k, v) in cam_reference.items() if not v)
339  return i_ref, i_2nd
340
341
342class MultiCameraAlignmentTest(its_base_test.ItsBaseTest):
343
344  """Test the multi camera system parameters related to camera spacing.
345
346  Using the multi-camera physical cameras, take a picture of scene4
347  (a black circle and surrounding square on a white background) with
348  one of the physical cameras. Then find the circle center and radius. Using
349  the parameters:
350      android.lens.poseReference
351      android.lens.poseTranslation
352      android.lens.poseRotation
353      android.lens.instrinsicCalibration
354      android.lens.distortion (if available)
355  project the circle center to the world coordinates for each camera.
356  Compare the difference between the two cameras' circle centers in
357  world coordinates.
358
359  Reproject the world coordinates back to pixel coordinates and compare
360  against originals as a correctness check.
361
362  Compare the circle sizes if the focal lengths of the cameras are
363  different using
364      android.lens.availableFocalLengths.
365  """
366
367  def test_multi_camera_alignment(self):
368    # capture images
369    with its_session_utils.ItsSession(
370        device_id=self.dut.serial,
371        camera_id=self.camera_id,
372        hidden_physical_id=self.hidden_physical_id) as cam:
373      props = cam.get_camera_properties()
374      name_with_log_path = os.path.join(self.log_path, _NAME)
375      chart_distance = self.chart_distance * _CM_TO_M
376
377      # check media performance class
378      should_run = (camera_properties_utils.read_3a(props) and
379                    camera_properties_utils.per_frame_control(props) and
380                    camera_properties_utils.logical_multi_camera(props) and
381                    camera_properties_utils.backward_compatible(props))
382      media_performance_class = its_session_utils.get_media_performance_class(
383          self.dut.serial)
384      cameras_facing_same_direction = cam.get_facing_to_ids().get(
385          props['android.lens.facing'], [])
386      has_multiple_same_facing_cameras = len(cameras_facing_same_direction) > 1
387
388      if (media_performance_class >= _TEST_REQUIRED_MPC and
389          not should_run and
390          cam.is_primary_camera() and
391          has_multiple_same_facing_cameras):
392        logging.error('Found multiple camera IDs %s facing in the same '
393                      'direction as primary camera %s.',
394                      cameras_facing_same_direction, self.camera_id)
395        its_session_utils.raise_mpc_assertion_error(
396            _TEST_REQUIRED_MPC, _NAME, media_performance_class)
397
398      # check SKIP conditions
399      camera_properties_utils.skip_unless(should_run)
400
401      # load chart for scene
402      its_session_utils.load_scene(
403          cam, props, self.scene, self.tablet, self.chart_distance)
404
405      debug = self.debug_mode
406      pose_reference = props['android.lens.poseReference']
407
408      # Convert chart_distance for lens facing back
409      if props['android.lens.facing'] == _LENS_FACING_BACK:
410        # API spec defines +z is pointing out from screen
411        logging.debug('lens facing BACK')
412        chart_distance *= -1
413
414      # find physical camera IDs
415      ids = camera_properties_utils.logical_multi_camera_physical_ids(props)
416      physical_props = {}
417      physical_ids = []
418      physical_raw_ids = []
419      for i in ids:
420        physical_props[i] = cam.get_camera_properties_by_id(i)
421        if physical_props[i][
422            'android.lens.poseReference'] == _REFERENCE_UNDEFINED:
423          continue
424        # find YUV+RGB capable physical cameras
425        if (camera_properties_utils.backward_compatible(physical_props[i]) and
426            not camera_properties_utils.mono_camera(physical_props[i])):
427          physical_ids.append(i)
428        # find RAW+RGB capable physical cameras
429        if (camera_properties_utils.backward_compatible(physical_props[i]) and
430            not camera_properties_utils.mono_camera(physical_props[i]) and
431            camera_properties_utils.raw16(physical_props[i])):
432          physical_raw_ids.append(i)
433
434      # determine formats and select cameras
435      fmts = ['yuv']
436      if len(physical_raw_ids) >= 2:
437        fmts.insert(0, 'raw')  # add RAW to analysis if enough cameras
438        logging.debug('Selecting RAW+RGB supported cameras')
439        physical_raw_ids = select_ids_to_test(physical_raw_ids, physical_props,
440                                              chart_distance)
441      logging.debug('Selecting YUV+RGB cameras')
442      camera_properties_utils.skip_unless(len(physical_ids) >= 2)
443      physical_ids = select_ids_to_test(physical_ids, physical_props,
444                                        chart_distance)
445
446      # do captures for valid formats
447      caps = {}
448      for i, fmt in enumerate(fmts):
449        physical_sizes = {}
450        capture_cam_ids = physical_ids
451        fmt_code = _FMT_CODE_YUV
452        if fmt == 'raw':
453          capture_cam_ids = physical_raw_ids
454          fmt_code = _FMT_CODE_RAW
455        for physical_id in capture_cam_ids:
456          configs = physical_props[physical_id][
457              'android.scaler.streamConfigurationMap'][
458                  'availableStreamConfigurations']
459          fmt_configs = [cfg for cfg in configs if cfg['format'] == fmt_code]
460          out_configs = [cfg for cfg in fmt_configs if not cfg['input']]
461          out_sizes = [(cfg['width'], cfg['height']) for cfg in out_configs]
462          physical_sizes[physical_id] = max(out_sizes, key=lambda item: item[1])
463
464        out_surfaces = determine_valid_out_surfaces(
465            cam, props, fmt, capture_cam_ids, physical_sizes)
466        caps = take_images(cam, caps, physical_props, fmt, capture_cam_ids,
467                           out_surfaces, name_with_log_path, debug)
468
469    # process images for correctness
470    for j, fmt in enumerate(fmts):
471      size = {}
472      k = {}
473      cam_reference = {}
474      r = {}
475      t = {}
476      circle = {}
477      fl = {}
478      sensor_diag = {}
479      pixel_sizes = {}
480      capture_cam_ids = physical_ids
481      if fmt == 'raw':
482        capture_cam_ids = physical_raw_ids
483      logging.debug('Format: %s', str(fmt))
484      for i in capture_cam_ids:
485        # convert cap and prep image
486        img_name = f'{name_with_log_path}_{fmt}_{i}.jpg'
487        img = convert_cap_and_prep_img(
488            caps[(fmt, i)], physical_props[i], fmt, img_name, debug)
489        size[i] = (caps[fmt, i]['width'], caps[fmt, i]['height'])
490
491        # load parameters for each physical camera
492        if j == 0:
493          logging.debug('Camera %s', i)
494        k[i] = camera_properties_utils.get_intrinsic_calibration(
495            physical_props[i], j == 0)
496        r[i] = camera_properties_utils.get_rotation_matrix(
497            physical_props[i], j == 0)
498        t[i] = camera_properties_utils.get_translation_matrix(
499            physical_props[i], j == 0)
500
501        # API spec defines poseTranslation as the world coordinate p_w_cam of
502        # optics center. When applying [R|t] to go from world coordinates to
503        # camera coordinates, we need -R*p_w_cam of the coordinate reported in
504        # metadata.
505        # ie. for a camera with optical center at world coordinate (5, 4, 3)
506        # and identity rotation, to convert a world coordinate into the
507        # camera's coordinate, we need a translation vector of [-5, -4, -3]
508        # so that: [I|[-5, -4, -3]^T] * [5, 4, 3]^T = [0,0,0]^T
509        t[i] = -1.0 * np.dot(r[i], t[i])
510        if debug and j == 1:
511          logging.debug('t: %s', str(t[i]))
512          logging.debug('r: %s', str(r[i]))
513
514        if (t[i] == _TRANS_MATRIX_REF).all():
515          cam_reference[i] = True
516        else:
517          cam_reference[i] = False
518
519        # Correct lens distortion to image (if available) and save before/after
520        if (camera_properties_utils.distortion_correction(physical_props[i]) and
521            camera_properties_utils.intrinsic_calibration(physical_props[i]) and
522            fmt == 'raw'):
523          cv2_distort = camera_properties_utils.get_distortion_matrix(
524              physical_props[i])
525          if cv2_distort is None:
526            raise AssertionError(f'Camera {i} has no distortion matrix!')
527          if not np.any(cv2_distort):
528            logging.warning('Camera %s has distortion matrix of all zeroes', i)
529          image_processing_utils.write_image(
530              img/255, f'{name_with_log_path}_{fmt}_{i}.jpg')
531          img = cv2.undistort(img, k[i], cv2_distort)
532          image_processing_utils.write_image(
533              img/255, f'{name_with_log_path}_{fmt}_correct_{i}.jpg')
534
535        # Find the circles in grayscale image
536        circle[i] = opencv_processing_utils.find_circle(
537            img, f'{name_with_log_path}_{fmt}_gray_{i}.jpg',
538            _CIRCLE_MIN_AREA, _CIRCLE_COLOR)
539        logging.debug('Circle radius %s:  %.2f', format(i), circle[i]['r'])
540
541        # Undo zoom to image (if applicable).
542        if fmt == 'yuv':
543          circle[i] = undo_zoom(caps[(fmt, i)], circle[i])
544
545        # Find focal length, pixel & sensor size
546        fl[i] = physical_props[i]['android.lens.info.availableFocalLengths'][0]
547        pixel_sizes[i] = calc_pixel_size(physical_props[i])
548        sensor_diag[i] = math.sqrt(size[i][0] ** 2 + size[i][1] ** 2)
549
550      i_ref, i_2nd = define_reference_camera(pose_reference, cam_reference)
551      logging.debug('reference camera: %s, secondary camera: %s', i_ref, i_2nd)
552
553      # Convert circle centers to real world coordinates
554      x_w = {}
555      y_w = {}
556      for i in [i_ref, i_2nd]:
557        x_w[i], y_w[i] = convert_to_world_coordinates(
558            circle[i]['x'], circle[i]['y'], r[i], t[i], k[i], chart_distance)
559
560      # Back convert to image coordinates for correctness check
561      x_p = {}
562      y_p = {}
563      x_p[i_2nd], y_p[i_2nd] = convert_to_image_coordinates(
564          [x_w[i_ref], y_w[i_ref], chart_distance], r[i_2nd], t[i_2nd],
565          k[i_2nd])
566      x_p[i_ref], y_p[i_ref] = convert_to_image_coordinates(
567          [x_w[i_2nd], y_w[i_2nd], chart_distance], r[i_ref], t[i_ref],
568          k[i_ref])
569
570      # Summarize results
571      for i in [i_ref, i_2nd]:
572        logging.debug(' Camera: %s', i)
573        logging.debug(' x, y (pixels): %.1f, %.1f', circle[i]['x'],
574                      circle[i]['y'])
575        logging.debug(' x_w, y_w (mm): %.2f, %.2f', x_w[i] * 1.0E3,
576                      y_w[i] * 1.0E3)
577        logging.debug(' x_p, y_p (pixels): %.1f, %.1f', x_p[i], y_p[i])
578
579      # Check center locations
580      err_mm = np.linalg.norm(np.array([x_w[i_ref], y_w[i_ref]]) -
581                              np.array([x_w[i_2nd], y_w[i_2nd]])) * _M_TO_MM
582      logging.debug('Center location err (mm): %.2f', err_mm)
583      if err_mm > _ALIGN_TOL_MM:
584        raise AssertionError(
585            f'Centers {i_ref} <-> {i_2nd} too different! '
586            f'val={err_mm:.2f}, ATOL={_ALIGN_TOL_MM} mm')
587
588      # Check projections back into pixel space
589      for i in [i_ref, i_2nd]:
590        err = np.linalg.norm(np.array([circle[i]['x'], circle[i]['y']]) -
591                             np.array([x_p[i], y_p[i]]).reshape(1, -1))
592        logging.debug('Camera %s projection error (pixels): %.1f', i, err)
593        tol = _ALIGN_TOL * sensor_diag[i]
594        if err >= tol:
595          raise AssertionError(f'Camera {i} project location too different! '
596                               f'diff={err:.2f}, ATOL={tol:.2f} pixels')
597
598      # Check focal length and circle size if more than 1 focal length
599      if len(fl) > 1:
600        logging.debug('Circle radii (pixels); ref: %.1f, 2nd: %.1f',
601                      circle[i_ref]['r'], circle[i_2nd]['r'])
602        logging.debug('Focal lengths (diopters); ref: %.2f, 2nd: %.2f',
603                      fl[i_ref], fl[i_2nd])
604        logging.debug('Pixel size (um); ref: %.2f, 2nd: %.2f',
605                      pixel_sizes[i_ref], pixel_sizes[i_2nd])
606        if not math.isclose(circle[i_ref]['r']*pixel_sizes[i_ref]/fl[i_ref],
607                            circle[i_2nd]['r']*pixel_sizes[i_2nd]/fl[i_2nd],
608                            rel_tol=_CIRCLE_RTOL):
609          raise AssertionError(
610              f'Circle size scales improperly! RTOL: {_CIRCLE_RTOL} '
611              'Metric: radius*pixel_size/focal_length should be equal.')
612
613if __name__ == '__main__':
614  test_runner.main()
615