• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2016 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Image processing utilities using openCV."""
15
16
17import logging
18import math
19import os
20import unittest
21import cv2
22import numpy
23
24import capture_request_utils
25import image_processing_utils
26
27ANGLE_CHECK_TOL = 1  # degrees
28ANGLE_NUM_MIN = 10  # Minimum number of angles for find_angle() to be valid
29
30
31TEST_IMG_DIR = os.path.join(os.environ['CAMERA_ITS_TOP'], 'test_images')
32CHART_FILE = os.path.join(TEST_IMG_DIR, 'ISO12233.png')
33CHART_HEIGHT = 13.5  # cm
34CHART_DISTANCE_RFOV = 31.0  # cm
35CHART_DISTANCE_WFOV = 22.0  # cm
36CHART_SCALE_START = 0.65
37CHART_SCALE_STOP = 1.35
38CHART_SCALE_STEP = 0.025
39
40CIRCLE_AR_ATOL = 0.1  # circle aspect ratio tolerance
41CIRCLISH_ATOL = 0.10  # contour area vs ideal circle area & aspect ratio TOL
42CIRCLISH_LOW_RES_ATOL = 0.15  # loosen for low res images
43CIRCLE_MIN_PTS = 20
44CIRCLE_RADIUS_NUMPTS_THRESH = 2  # contour num_pts/radius: empirically ~3x
45CIRCLE_COLOR_ATOL = 0.01  # circle color fill tolerance
46
47CV2_CONTOUR_LINE_THICKNESS = 3  # for drawing contours if multiple circles found
48CV2_RED = (255, 0, 0)  # color in cv2 to draw lines
49
50FOV_THRESH_TELE25 = 25
51FOV_THRESH_TELE40 = 40
52FOV_THRESH_TELE = 60
53FOV_THRESH_WFOV = 90
54
55LOW_RES_IMG_THRESH = 320 * 240
56
57RGB_GRAY_WEIGHTS = (0.299, 0.587, 0.114)  # RGB to Gray conversion matrix
58
59SCALE_RFOV_IN_WFOV_BOX = 0.67
60SCALE_TELE_IN_WFOV_BOX = 0.5
61SCALE_TELE_IN_RFOV_BOX = 0.67
62SCALE_TELE40_IN_RFOV_BOX = 0.5
63SCALE_TELE25_IN_RFOV_BOX = 0.33
64
65SQUARE_AREA_MIN_REL = 0.05  # Minimum size for square relative to image area
66SQUARE_TOL = 0.05  # Square W vs H mismatch RTOL
67
68VGA_HEIGHT = 480
69VGA_WIDTH = 640
70
71
72def find_all_contours(img):
73  cv2_version = cv2.__version__
74  logging.debug('cv2_version: %s', cv2_version)
75  if cv2_version.startswith('3.'):  # OpenCV 3.x
76    _, contours, _ = cv2.findContours(img, cv2.RETR_TREE,
77                                      cv2.CHAIN_APPROX_SIMPLE)
78  else:  # OpenCV 2.x and 4.x
79    contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
80  return contours
81
82
83def calc_chart_scaling(chart_distance, camera_fov):
84  """Returns charts scaling factor.
85
86  Args:
87   chart_distance: float; distance in cm from camera of displayed chart
88   camera_fov: float; camera field of view.
89
90  Returns:
91   chart_scaling: float; scaling factor for chart
92  """
93  chart_scaling = 1.0
94  camera_fov = float(camera_fov)
95  if (FOV_THRESH_TELE < camera_fov < FOV_THRESH_WFOV and
96      numpy.isclose(chart_distance, CHART_DISTANCE_WFOV, rtol=0.1)):
97    chart_scaling = SCALE_RFOV_IN_WFOV_BOX
98  elif (camera_fov <= FOV_THRESH_TELE and
99        numpy.isclose(chart_distance, CHART_DISTANCE_WFOV, rtol=0.1)):
100    chart_scaling = SCALE_TELE_IN_WFOV_BOX
101  elif (camera_fov <= FOV_THRESH_TELE25 and
102        (numpy.isclose(chart_distance, CHART_DISTANCE_RFOV, rtol=0.1) or
103         chart_distance > CHART_DISTANCE_RFOV)):
104    chart_scaling = SCALE_TELE25_IN_RFOV_BOX
105  elif (camera_fov <= FOV_THRESH_TELE40 and
106        numpy.isclose(chart_distance, CHART_DISTANCE_RFOV, rtol=0.1)):
107    chart_scaling = SCALE_TELE40_IN_RFOV_BOX
108  elif (camera_fov <= FOV_THRESH_TELE and
109        numpy.isclose(chart_distance, CHART_DISTANCE_RFOV, rtol=0.1)):
110    chart_scaling = SCALE_TELE_IN_RFOV_BOX
111  return chart_scaling
112
113
114def scale_img(img, scale=1.0):
115  """Scale image based on a real number scale factor."""
116  dim = (int(img.shape[1] * scale), int(img.shape[0] * scale))
117  return cv2.resize(img.copy(), dim, interpolation=cv2.INTER_AREA)
118
119
120def gray_scale_img(img):
121  """Return gray scale version of image."""
122  if len(img.shape) == 2:
123    img_gray = img.copy()
124  elif len(img.shape) == 3:
125    if img.shape[2] == 1:
126      img_gray = img[:, :, 0].copy()
127    else:
128      img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
129  return img_gray
130
131
132class Chart(object):
133  """Definition for chart object.
134
135  Defines PNG reference file, chart, size, distance and scaling range.
136  """
137
138  def __init__(
139      self,
140      cam,
141      props,
142      log_path,
143      chart_file=None,
144      height=None,
145      distance=None,
146      scale_start=None,
147      scale_stop=None,
148      scale_step=None):
149    """Initial constructor for class.
150
151    Args:
152     cam: open ITS session
153     props: camera properties object
154     log_path: log path to store the captured images.
155     chart_file: str; absolute path to png file of chart
156     height: float; height in cm of displayed chart
157     distance: float; distance in cm from camera of displayed chart
158     scale_start: float; start value for scaling for chart search
159     scale_stop: float; stop value for scaling for chart search
160     scale_step: float; step value for scaling for chart search
161    """
162    self._file = chart_file or CHART_FILE
163    self._height = height or CHART_HEIGHT
164    self._distance = distance or CHART_DISTANCE_RFOV
165    self._scale_start = scale_start or CHART_SCALE_START
166    self._scale_stop = scale_stop or CHART_SCALE_STOP
167    self._scale_step = scale_step or CHART_SCALE_STEP
168    self.locate(cam, props, log_path)
169
170  def _set_scale_factors_to_one(self):
171    """Set scale factors to 1.0 for skipped tests."""
172    self.wnorm = 1.0
173    self.hnorm = 1.0
174    self.xnorm = 0.0
175    self.ynorm = 0.0
176    self.scale = 1.0
177
178  def _calc_scale_factors(self, cam, props, fmt, log_path):
179    """Take an image with s, e, & fd to find the chart location.
180
181    Args:
182     cam: An open its session.
183     props: Properties of cam
184     fmt: Image format for the capture
185     log_path: log path to save the captured images.
186
187    Returns:
188      template: numpy array; chart template for locator
189      img_3a: numpy array; RGB image for chart location
190      scale_factor: float; scaling factor for chart search
191    """
192    req = capture_request_utils.auto_capture_request()
193    cap_chart = image_processing_utils.stationary_lens_cap(cam, req, fmt)
194    img_3a = image_processing_utils.convert_capture_to_rgb_image(
195        cap_chart, props)
196    img_3a = image_processing_utils.rotate_img_per_argv(img_3a)
197    af_scene_name = os.path.join(log_path, 'af_scene.jpg')
198    image_processing_utils.write_image(img_3a, af_scene_name)
199    template = cv2.imread(self._file, cv2.IMREAD_ANYDEPTH)
200    focal_l = cap_chart['metadata']['android.lens.focalLength']
201    pixel_pitch = (
202        props['android.sensor.info.physicalSize']['height'] / img_3a.shape[0])
203    logging.debug('Chart distance: %.2fcm', self._distance)
204    logging.debug('Chart height: %.2fcm', self._height)
205    logging.debug('Focal length: %.2fmm', focal_l)
206    logging.debug('Pixel pitch: %.2fum', pixel_pitch * 1E3)
207    logging.debug('Template height: %dpixels', template.shape[0])
208    chart_pixel_h = self._height * focal_l / (self._distance * pixel_pitch)
209    scale_factor = template.shape[0] / chart_pixel_h
210    logging.debug('Chart/image scale factor = %.2f', scale_factor)
211    return template, img_3a, scale_factor
212
213  def locate(self, cam, props, log_path):
214    """Find the chart in the image, and append location to chart object.
215
216    Args:
217      cam: Open its session.
218      props: Camera properties object.
219      log_path: log path to store the captured images.
220
221    The values appended are:
222    xnorm: float; [0, 1] left loc of chart in scene
223    ynorm: float; [0, 1] top loc of chart in scene
224    wnorm: float; [0, 1] width of chart in scene
225    hnorm: float; [0, 1] height of chart in scene
226    scale: float; scale factor to extract chart
227    """
228    fmt = {'format': 'yuv', 'width': VGA_WIDTH, 'height': VGA_HEIGHT}
229    cam.do_3a()
230    chart, scene, s_factor = self._calc_scale_factors(cam, props, fmt, log_path)
231    scale_start = self._scale_start * s_factor
232    scale_stop = self._scale_stop * s_factor
233    scale_step = self._scale_step * s_factor
234    offset = scale_step / 2
235    self.scale = s_factor
236    logging.debug('scale start: %.3f, stop: %.3f, step: %.3f',
237                  scale_start, scale_stop, scale_step)
238    logging.debug('Used offset of %.3f to include stop value.', offset)
239    max_match = []
240    # check for normalized image
241    if numpy.amax(scene) <= 1.0:
242      scene = (scene * 255.0).astype(numpy.uint8)
243    scene_gray = gray_scale_img(scene)
244    logging.debug('Finding chart in scene...')
245    for scale in numpy.arange(scale_start, scale_stop + offset, scale_step):
246      scene_scaled = scale_img(scene_gray, scale)
247      if (scene_scaled.shape[0] < chart.shape[0] or
248          scene_scaled.shape[1] < chart.shape[1]):
249        logging.debug(
250            'Skipped scale %.3f. scene_scaled shape: %s, chart shape: %s',
251            scale, scene_scaled.shape, chart.shape)
252        continue
253      result = cv2.matchTemplate(scene_scaled, chart, cv2.TM_CCOEFF)
254      _, opt_val, _, top_left_scaled = cv2.minMaxLoc(result)
255      logging.debug(' scale factor: %.3f, opt val: %.f', scale, opt_val)
256      max_match.append((opt_val, scale, top_left_scaled))
257
258    # determine if optimization results are valid
259    opt_values = [x[0] for x in max_match]
260    if not opt_values or (2.0 * min(opt_values) > max(opt_values)):
261      estring = ('Warning: unable to find chart in scene!\n'
262                 'Check camera distance and self-reported '
263                 'pixel pitch, focal length and hyperfocal distance.')
264      logging.warning(estring)
265      self._set_scale_factors_to_one()
266    else:
267      if (max(opt_values) == opt_values[0] or
268          max(opt_values) == opt_values[len(opt_values) - 1]):
269        estring = ('Warning: Chart is at extreme range of locator.')
270        logging.warning(estring)
271      # find max and draw bbox
272      matched_scale_and_loc = max(max_match, key=lambda x: x[0])
273      self.scale = matched_scale_and_loc[1]
274      logging.debug('Optimum scale factor: %.3f', self.scale)
275      top_left_scaled = matched_scale_and_loc[2]
276      h, w = chart.shape
277      bottom_right_scaled = (top_left_scaled[0] + w, top_left_scaled[1] + h)
278      top_left = ((top_left_scaled[0] // self.scale),
279                  (top_left_scaled[1] // self.scale))
280      bottom_right = ((bottom_right_scaled[0] // self.scale),
281                      (bottom_right_scaled[1] // self.scale))
282      self.wnorm = ((bottom_right[0]) - top_left[0]) / scene.shape[1]
283      self.hnorm = ((bottom_right[1]) - top_left[1]) / scene.shape[0]
284      self.xnorm = (top_left[0]) / scene.shape[1]
285      self.ynorm = (top_left[1]) / scene.shape[0]
286
287
288def component_shape(contour):
289  """Measure the shape of a connected component.
290
291  Args:
292    contour: return from cv2.findContours. A list of pixel coordinates of
293    the contour.
294
295  Returns:
296    The most left, right, top, bottom pixel location, height, width, and
297    the center pixel location of the contour.
298  """
299  shape = {'left': numpy.inf, 'right': 0, 'top': numpy.inf, 'bottom': 0,
300           'width': 0, 'height': 0, 'ctx': 0, 'cty': 0}
301  for pt in contour:
302    if pt[0][0] < shape['left']:
303      shape['left'] = pt[0][0]
304    if pt[0][0] > shape['right']:
305      shape['right'] = pt[0][0]
306    if pt[0][1] < shape['top']:
307      shape['top'] = pt[0][1]
308    if pt[0][1] > shape['bottom']:
309      shape['bottom'] = pt[0][1]
310  shape['width'] = shape['right'] - shape['left'] + 1
311  shape['height'] = shape['bottom'] - shape['top'] + 1
312  shape['ctx'] = (shape['left'] + shape['right']) // 2
313  shape['cty'] = (shape['top'] + shape['bottom']) // 2
314  return shape
315
316
317def find_circle_fill_metric(shape, img_bw, color):
318  """Find the proportion of points matching a desired color on a shape's axes.
319
320  Args:
321    shape: dictionary returned by component_shape(...)
322    img_bw: binarized numpy image array
323    color: int of [0 or 255] 0 is black, 255 is white
324  Returns:
325    float: number of x, y axis points matching color / total x, y axis points
326  """
327  matching = 0
328  total = 0
329  for y in range(shape['top'], shape['bottom']):
330    total += 1
331    matching += 1 if img_bw[y][shape['ctx']] == color else 0
332  for x in range(shape['left'], shape['right']):
333    total += 1
334    matching += 1 if img_bw[shape['cty']][x] == color else 0
335  logging.debug('Found %d matching points out of %d', matching, total)
336  return matching / total
337
338
339def find_circle(img, img_name, min_area, color):
340  """Find the circle in the test image.
341
342  Args:
343    img: numpy image array in RGB, with pixel values in [0,255].
344    img_name: string with image info of format and size.
345    min_area: float of minimum area of circle to find
346    color: int of [0 or 255] 0 is black, 255 is white
347
348  Returns:
349    circle = {'x', 'y', 'r', 'w', 'h', 'x_offset', 'y_offset'}
350  """
351  circle = {}
352  img_size = img.shape
353  if img_size[0]*img_size[1] >= LOW_RES_IMG_THRESH:
354    circlish_atol = CIRCLISH_ATOL
355  else:
356    circlish_atol = CIRCLISH_LOW_RES_ATOL
357
358  # convert to gray-scale image
359  img_gray = numpy.dot(img[..., :3], RGB_GRAY_WEIGHTS)
360
361  # otsu threshold to binarize the image
362  _, img_bw = cv2.threshold(numpy.uint8(img_gray), 0, 255,
363                            cv2.THRESH_BINARY + cv2.THRESH_OTSU)
364
365  # find contours
366  contours = find_all_contours(255-img_bw)
367
368  # Check each contour and find the circle bigger than min_area
369  num_circles = 0
370  circle_contours = []
371  logging.debug('Initial number of contours: %d', len(contours))
372  for contour in contours:
373    area = cv2.contourArea(contour)
374    num_pts = len(contour)
375    if (area > img_size[0]*img_size[1]*min_area and
376        num_pts >= CIRCLE_MIN_PTS):
377      shape = component_shape(contour)
378      radius = (shape['width'] + shape['height']) / 4
379      colour = img_bw[shape['cty']][shape['ctx']]
380      circlish = (math.pi * radius**2) / area
381      aspect_ratio = shape['width'] / shape['height']
382      fill = find_circle_fill_metric(shape, img_bw, color)
383      logging.debug('Potential circle found. radius: %.2f, color: %d, '
384                    'circlish: %.3f, ar: %.3f, pts: %d, fill metric: %.3f',
385                    radius, colour, circlish, aspect_ratio, num_pts, fill)
386      if (colour == color and
387          math.isclose(1.0, circlish, abs_tol=circlish_atol) and
388          math.isclose(1.0, aspect_ratio, abs_tol=CIRCLE_AR_ATOL) and
389          num_pts/radius >= CIRCLE_RADIUS_NUMPTS_THRESH and
390          math.isclose(1.0, fill, abs_tol=CIRCLE_COLOR_ATOL)):
391        circle_contours.append(contour)
392
393        # Populate circle dictionary
394        circle['x'] = shape['ctx']
395        circle['y'] = shape['cty']
396        circle['r'] = (shape['width'] + shape['height']) / 4
397        circle['w'] = float(shape['width'])
398        circle['h'] = float(shape['height'])
399        circle['x_offset'] = (shape['ctx'] - img_size[1]//2) / circle['w']
400        circle['y_offset'] = (shape['cty'] - img_size[0]//2) / circle['h']
401        logging.debug('Num pts: %d', num_pts)
402        logging.debug('Aspect ratio: %.3f', aspect_ratio)
403        logging.debug('Circlish value: %.3f', circlish)
404        logging.debug('Location: %.1f x %.1f', circle['x'], circle['y'])
405        logging.debug('Radius: %.3f', circle['r'])
406        logging.debug('Circle center position wrt to image center:%.3fx%.3f',
407                      circle['x_offset'], circle['y_offset'])
408        num_circles += 1
409        # if more than one circle found, break
410        if num_circles == 2:
411          break
412
413  if num_circles == 0:
414    image_processing_utils.write_image(img/255, img_name, True)
415    raise AssertionError('No black circle detected. '
416                         'Please take pictures according to instructions.')
417
418  if num_circles > 1:
419    image_processing_utils.write_image(img/255, img_name, True)
420    cv2.drawContours(img, circle_contours, -1, CV2_RED,
421                     CV2_CONTOUR_LINE_THICKNESS)
422    img_name_parts = img_name.split('.')
423    image_processing_utils.write_image(
424        img/255, f'{img_name_parts[0]}_contours.{img_name_parts[1]}', True)
425    raise AssertionError('More than 1 black circle detected. '
426                         'Background of scene may be too complex.')
427
428  return circle
429
430
431def append_circle_center_to_img(circle, img, img_name):
432  """Append circle center and image center to image and save image.
433
434  Draws line from circle center to image center and then labels end-points.
435  Adjusts text positioning depending on circle center wrt image center.
436  Moves text position left/right half of up/down movement for visual aesthetics.
437
438  Args:
439    circle: dict with circle location vals.
440    img: numpy float image array in RGB, with pixel values in [0,255].
441    img_name: string with image info of format and size.
442  """
443  line_width_scaling_factor = 500
444  text_move_scaling_factor = 3
445  img_size = img.shape
446  img_center_x = img_size[1]//2
447  img_center_y = img_size[0]//2
448
449  # draw line from circle to image center
450  line_width = int(max(1, max(img_size)//line_width_scaling_factor))
451  font_size = line_width // 2
452  move_text_dist = line_width * text_move_scaling_factor
453  cv2.line(img, (circle['x'], circle['y']), (img_center_x, img_center_y),
454           CV2_RED, line_width)
455
456  # adjust text location
457  move_text_right_circle = -1
458  move_text_right_image = 2
459  if circle['x'] > img_center_x:
460    move_text_right_circle = 2
461    move_text_right_image = -1
462
463  move_text_down_circle = -1
464  move_text_down_image = 4
465  if circle['y'] > img_center_y:
466    move_text_down_circle = 4
467    move_text_down_image = -1
468
469  # add circles to end points and label
470  radius_pt = line_width * 2  # makes a dot 2x line width
471  filled_pt = -1  # cv2 value for a filled circle
472  # circle center
473  cv2.circle(img, (circle['x'], circle['y']), radius_pt, CV2_RED, filled_pt)
474  text_circle_x = move_text_dist * move_text_right_circle + circle['x']
475  text_circle_y = move_text_dist * move_text_down_circle + circle['y']
476  cv2.putText(img, 'circle center', (text_circle_x, text_circle_y),
477              cv2.FONT_HERSHEY_SIMPLEX, font_size, CV2_RED, line_width)
478  # image center
479  cv2.circle(img, (img_center_x, img_center_y), radius_pt, CV2_RED, filled_pt)
480  text_imgct_x = move_text_dist * move_text_right_image + img_center_x
481  text_imgct_y = move_text_dist * move_text_down_image + img_center_y
482  cv2.putText(img, 'image center', (text_imgct_x, text_imgct_y),
483              cv2.FONT_HERSHEY_SIMPLEX, font_size, CV2_RED, line_width)
484  image_processing_utils.write_image(img/255, img_name, True)  # [0, 1] values
485
486
487def get_angle(input_img):
488  """Computes anglular inclination of chessboard in input_img.
489
490  Args:
491    input_img (2D numpy.ndarray): Grayscale image stored as a 2D numpy array.
492  Returns:
493    Median angle of squares in degrees identified in the image.
494
495  Angle estimation algorithm description:
496    Input: 2D grayscale image of chessboard.
497    Output: Angle of rotation of chessboard perpendicular to
498            chessboard. Assumes chessboard and camera are parallel to
499            each other.
500
501    1) Use adaptive threshold to make image binary
502    2) Find countours
503    3) Filter out small contours
504    4) Filter out all non-square contours
505    5) Compute most common square shape.
506        The assumption here is that the most common square instances are the
507        chessboard squares. We've shown that with our current tuning, we can
508        robustly identify the squares on the sensor fusion chessboard.
509    6) Return median angle of most common square shape.
510
511  USAGE NOTE: This function has been tuned to work for the chessboard used in
512  the sensor_fusion tests. See images in test_images/rotated_chessboard/ for
513  sample captures. If this function is used with other chessboards, it may not
514  work as expected.
515  """
516  # Tuning parameters
517  square_area_min = (float)(input_img.shape[1] * SQUARE_AREA_MIN_REL)
518
519  # Creates copy of image to avoid modifying original.
520  img = numpy.array(input_img, copy=True)
521
522  # Scale pixel values from 0-1 to 0-255
523  img *= 255
524  img = img.astype(numpy.uint8)
525  img_thresh = cv2.adaptiveThreshold(
526      img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 201, 2)
527
528  # Find all contours.
529  contours = find_all_contours(img_thresh)
530
531  # Filter contours to squares only.
532  square_contours = []
533  for contour in contours:
534    rect = cv2.minAreaRect(contour)
535    _, (width, height), angle = rect
536
537    # Skip non-squares
538    if not numpy.isclose(width, height, rtol=SQUARE_TOL):
539      continue
540
541    # Remove very small contours: usually just tiny dots due to noise.
542    area = cv2.contourArea(contour)
543    if area < square_area_min:
544      continue
545
546    square_contours.append(contour)
547
548  areas = []
549  for contour in square_contours:
550    area = cv2.contourArea(contour)
551    areas.append(area)
552
553  median_area = numpy.median(areas)
554
555  filtered_squares = []
556  filtered_angles = []
557  for square in square_contours:
558    area = cv2.contourArea(square)
559    if not numpy.isclose(area, median_area, rtol=SQUARE_TOL):
560      continue
561
562    filtered_squares.append(square)
563    _, (width, height), angle = cv2.minAreaRect(square)
564    filtered_angles.append(angle)
565
566  if len(filtered_angles) < ANGLE_NUM_MIN:
567    logging.debug(
568        'A frame had too few angles to be processed. '
569        'Num of angles: %d, MIN: %d', len(filtered_angles), ANGLE_NUM_MIN)
570    return None
571
572  return numpy.median(filtered_angles)
573
574
575class Cv2ImageProcessingUtilsTests(unittest.TestCase):
576  """Unit tests for this module."""
577
578  def test_get_angle_identify_rotated_chessboard_angle(self):
579    """Unit test to check extracted angles from images."""
580    # Array of the image files and angles containing rotated chessboards.
581    test_cases = [
582        ('', 0),
583        ('_15_ccw', -15),
584        ('_30_ccw', -30),
585        ('_45_ccw', -45),
586        ('_60_ccw', -60),
587        ('_75_ccw', -75),
588    ]
589    test_fails = ''
590
591    # For each rotated image pair (normal, wide), check angle against expected.
592    for suffix, angle in test_cases:
593      # Define image paths.
594      normal_img_path = os.path.join(
595          TEST_IMG_DIR, f'rotated_chessboards/normal{suffix}.jpg')
596      wide_img_path = os.path.join(
597          TEST_IMG_DIR, f'rotated_chessboards/wide{suffix}.jpg')
598
599      # Load and color-convert images.
600      normal_img = cv2.cvtColor(cv2.imread(normal_img_path), cv2.COLOR_BGR2GRAY)
601      wide_img = cv2.cvtColor(cv2.imread(wide_img_path), cv2.COLOR_BGR2GRAY)
602
603      # Assert angle as expected.
604      normal = get_angle(normal_img)
605      wide = get_angle(wide_img)
606      valid_angles = (angle, angle+90)  # try both angle & +90 due to squares
607      e_msg = (f'\n Rotation angle test failed: {angle}, extracted normal: '
608               f'{normal:.2f}, wide: {wide:.2f}, valid_angles: {valid_angles}')
609      matched_angles = False
610      for a in valid_angles:
611        if (math.isclose(normal, a, abs_tol=ANGLE_CHECK_TOL) and
612            math.isclose(wide, a, abs_tol=ANGLE_CHECK_TOL)):
613          matched_angles = True
614
615      if not matched_angles:
616        test_fails += e_msg
617
618    self.assertEqual(len(test_fails), 0, test_fails)
619
620
621if __name__ == '__main__':
622  unittest.main()
623