1# Copyright 2014 The Android Open Source Project 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14"""Verifies 3 faces with different skin tones are detected.""" 15 16 17import logging 18import math 19import os.path 20 21import cv2 22from mobly import test_runner 23from scipy.spatial import distance 24 25import its_base_test 26import camera_properties_utils 27import capture_request_utils 28import image_processing_utils 29import its_session_utils 30import opencv_processing_utils 31 32_CV2_FACE_SCALE_FACTOR = 1.05 # 5% step for resizing image to find face 33_CV2_FACE_MIN_NEIGHBORS = 4 # recommended 3-6: higher for less faces 34_CV2_GREEN = (0, 1, 0) 35_CV2_RED = (1, 0, 0) 36_FACE_CENTER_MATCH_TOL_X = 10 # 10 pixels or ~1.5% in 640x480 image 37_FACE_CENTER_MATCH_TOL_Y = 20 # 20 pixels or ~4% in 640x480 image 38_FACE_CENTER_MIN_LOGGING_DIST = 50 39_FD_MODE_OFF, _FD_MODE_SIMPLE, _FD_MODE_FULL = 0, 1, 2 40_MIN_NUM_FACES_ALIGNED = 2 41_MIN_CENTER_DELTA = 15 42_NAME = os.path.splitext(os.path.basename(__file__))[0] 43_NUM_FACES = 3 44_NUM_TEST_FRAMES = 20 45_TEST_REQUIRED_MPC = 34 46_W, _H = 640, 480 47 48 49def eliminate_duplicate_centers(coordinates_list): 50 """Checks center coordinates of OpenCV's face rectangles 51 52 Method makes sure that the list of face rectangles' centers do not 53 contain duplicates from the same face. 54 55 Args: 56 coordinates_list: list; coordinates of face rectangles' centers 57 Returns: 58 non_duplicate_list: list; coordinates of face rectangles' centers 59 without duplicates on the same face 60 """ 61 output = set() 62 63 for i, xy1 in enumerate(coordinates_list): 64 for j, xy2 in enumerate(coordinates_list): 65 if distance.euclidean(xy1, xy2) < _MIN_CENTER_DELTA: 66 continue 67 if xy1 not in output: 68 output.add(xy1) 69 else: 70 output.add(xy2) 71 return list(output) 72 73 74def match_face_locations(faces_cropped, faces_opencv, mode, img, img_name): 75 """Assert face locations between two methods. 76 77 Method determines if center of opencv face boxes is within face detection 78 face boxes. Using math.hypot to measure the distance between the centers, 79 as math.dist is not available for python versions before 3.8. 80 81 Args: 82 faces_cropped: list of lists with (l, r, t, b) for each face. 83 faces_opencv: list of lists with (x, y, w, h) for each face. 84 mode: int indicating face detection mode 85 img: np image array 86 img_name: text string with path to image file 87 """ 88 # turn faces_opencv into list of center locations 89 faces_opencv_center = [(x+w//2, y+h//2) for (x, y, w, h) in faces_opencv] 90 cropped_faces_centers = [ 91 ((l+r)//2, (t+b)//2) for (l, r, t, b) in faces_cropped] 92 faces_opencv_center.sort(key=lambda t: [t[1], t[0]]) 93 cropped_faces_centers.sort(key=lambda t: [t[1], t[0]]) 94 logging.debug('cropped face centers: %s', str(cropped_faces_centers)) 95 logging.debug('opencv face center: %s', str(faces_opencv_center)) 96 faces_opencv_centers = [] 97 num_centers_aligned = 0 98 99 # eliminate duplicate openCV face rectangles' centers the same face 100 faces_opencv_centers = eliminate_duplicate_centers(faces_opencv_center) 101 logging.debug('opencv face centers: %s', str(faces_opencv_centers)) 102 103 for (x, y) in faces_opencv_centers: 104 for (x1, y1) in cropped_faces_centers: 105 centers_dist = math.hypot(x-x1, y-y1) 106 if centers_dist < _FACE_CENTER_MIN_LOGGING_DIST: 107 logging.debug('centers_dist: %.3f', centers_dist) 108 if (abs(x-x1) < _FACE_CENTER_MATCH_TOL_X and 109 abs(y-y1) < _FACE_CENTER_MATCH_TOL_Y): 110 num_centers_aligned += 1 111 112 # If test failed, save image with green AND OpenCV red rectangles 113 image_processing_utils.write_image(img, img_name) 114 if num_centers_aligned < _MIN_NUM_FACES_ALIGNED: 115 for (x, y, w, h) in faces_opencv: 116 cv2.rectangle(img, (x, y), (x+w, y+h), _CV2_RED, 2) 117 image_processing_utils.write_image(img, img_name) 118 logging.debug('centered: %s', str(num_centers_aligned)) 119 raise AssertionError(f'Mode {mode} face rectangles in wrong location(s)!. ' 120 f'Found {num_centers_aligned} rectangles near cropped ' 121 f'face centers, expected {_MIN_NUM_FACES_ALIGNED}') 122 123 124def check_face_bounding_box(rect, aw, ah, index): 125 """Checks face bounding box is within the active array area. 126 127 Args: 128 rect: dict; with face bounding box information 129 aw: int; active array width 130 ah: int; active array height 131 index: int to designate face number 132 """ 133 logging.debug('Checking bounding box in face %d: %s', index, str(rect)) 134 if (rect['top'] >= rect['bottom'] or 135 rect['left'] >= rect['right']): 136 raise AssertionError('Face coordinates incorrect! ' 137 f" t: {rect['top']}, b: {rect['bottom']}, " 138 f" l: {rect['left']}, r: {rect['right']}") 139 if (not 0 <= rect['top'] <= ah or 140 not 0 <= rect['bottom'] <= ah): 141 raise AssertionError('Face top/bottom outside of image height! ' 142 f"t: {rect['top']}, b: {rect['bottom']}, " 143 f"h: {ah}") 144 if (not 0 <= rect['left'] <= aw or 145 not 0 <= rect['right'] <= aw): 146 raise AssertionError('Face left/right outside of image width! ' 147 f"l: {rect['left']}, r: {rect['right']}, " 148 f" w: {aw}") 149 150 151def check_face_landmarks(face, fd_mode, index): 152 """Checks face landmarks fall within face bounding box. 153 154 Face ID should be -1 for SIMPLE and unique for FULL 155 Args: 156 face: dict from face detection algorithm 157 fd_mode: int of face detection mode 158 index: int to designate face number 159 """ 160 logging.debug('Checking landmarks in face %d: %s', index, str(face)) 161 if fd_mode == _FD_MODE_SIMPLE: 162 if 'leftEye' in face or 'rightEye' in face: 163 raise AssertionError('Eyes not supported in FD_MODE_SIMPLE.') 164 if 'mouth' in face: 165 raise AssertionError('Mouth not supported in FD_MODE_SIMPLE.') 166 if face['id'] != -1: 167 raise AssertionError('face_id should be -1 in FD_MODE_SIMPLE.') 168 elif fd_mode == _FD_MODE_FULL: 169 l, r = face['bounds']['left'], face['bounds']['right'] 170 t, b = face['bounds']['top'], face['bounds']['bottom'] 171 l_eye_x, l_eye_y = face['leftEye']['x'], face['leftEye']['y'] 172 r_eye_x, r_eye_y = face['rightEye']['x'], face['rightEye']['y'] 173 mouth_x, mouth_y = face['mouth']['x'], face['mouth']['y'] 174 if not l <= l_eye_x <= r: 175 raise AssertionError(f'Face l: {l}, r: {r}, left eye x: {l_eye_x}') 176 if not t <= l_eye_y <= b: 177 raise AssertionError(f'Face t: {t}, b: {b}, left eye y: {l_eye_y}') 178 if not l <= r_eye_x <= r: 179 raise AssertionError(f'Face l: {l}, r: {r}, right eye x: {r_eye_x}') 180 if not t <= r_eye_y <= b: 181 raise AssertionError(f'Face t: {t}, b: {b}, right eye y: {r_eye_y}') 182 if not l <= mouth_x <= r: 183 raise AssertionError(f'Face l: {l}, r: {r}, mouth x: {mouth_x}') 184 if not t <= mouth_y <= b: 185 raise AssertionError(f'Face t: {t}, b: {b}, mouth y: {mouth_y}') 186 else: 187 raise AssertionError(f'Unknown face detection mode: {fd_mode}.') 188 189 190def correct_faces_for_crop(faces, img, crop): 191 """Correct face rectangles for sensor crop. 192 193 Args: 194 faces: list of dicts with face information 195 img: np image array 196 crop: dict of crop region size with 'top, right, left, bottom' as keys 197 Returns: 198 list of face locations (left, right, top, bottom) corrected 199 """ 200 faces_corrected = [] 201 cw, ch = crop['right'] - crop['left'], crop['bottom'] - crop['top'] 202 logging.debug('crop region: %s', str(crop)) 203 w = img.shape[1] 204 h = img.shape[0] 205 for rect in [face['bounds'] for face in faces]: 206 logging.debug('rect: %s', str(rect)) 207 left = int(round((rect['left'] - crop['left']) * w / cw)) 208 right = int(round((rect['right'] - crop['left']) * w / cw)) 209 top = int(round((rect['top'] - crop['top']) * h / ch)) 210 bottom = int(round((rect['bottom'] - crop['top']) * h / ch)) 211 faces_corrected.append([left, right, top, bottom]) 212 logging.debug('faces_corrected: %s', str(faces_corrected)) 213 return faces_corrected 214 215 216class NumFacesTest(its_base_test.ItsBaseTest): 217 """Test face detection with different skin tones. 218 """ 219 220 def test_num_faces(self): 221 """Test face detection.""" 222 with its_session_utils.ItsSession( 223 device_id=self.dut.serial, 224 camera_id=self.camera_id, 225 hidden_physical_id=self.hidden_physical_id) as cam: 226 props = cam.get_camera_properties() 227 props = cam.override_with_hidden_physical_camera_props(props) 228 229 # Load chart for scene 230 its_session_utils.load_scene( 231 cam, props, self.scene, self.tablet, self.chart_distance, 232 log_path=self.log_path) 233 234 # Check media performance class 235 should_run = camera_properties_utils.face_detect(props) 236 media_performance_class = its_session_utils.get_media_performance_class( 237 self.dut.serial) 238 if media_performance_class >= _TEST_REQUIRED_MPC and not should_run: 239 its_session_utils.raise_mpc_assertion_error( 240 _TEST_REQUIRED_MPC, _NAME, media_performance_class) 241 242 # Check skip conditions 243 camera_properties_utils.skip_unless(should_run) 244 mono_camera = camera_properties_utils.mono_camera(props) 245 fd_modes = props['android.statistics.info.availableFaceDetectModes'] 246 a = props['android.sensor.info.activeArraySize'] 247 aw, ah = a['right'] - a['left'], a['bottom'] - a['top'] 248 logging.debug('active array size: %s', str(a)) 249 file_name_stem = os.path.join(self.log_path, _NAME) 250 251 cam.do_3a(mono_camera=mono_camera) 252 253 for fd_mode in fd_modes: 254 logging.debug('face detection mode: %d', fd_mode) 255 if not _FD_MODE_OFF <= fd_mode <= _FD_MODE_FULL: 256 raise AssertionError(f'FD mode {fd_mode} not in MODES! ' 257 f'OFF: {_FD_MODE_OFF}, FULL: {_FD_MODE_FULL}') 258 req = capture_request_utils.auto_capture_request() 259 req['android.statistics.faceDetectMode'] = fd_mode 260 fmt = {'format': 'yuv', 'width': _W, 'height': _H} 261 caps = cam.do_capture([req]*_NUM_TEST_FRAMES, fmt) 262 for i, cap in enumerate(caps): 263 fd_mode_cap = cap['metadata']['android.statistics.faceDetectMode'] 264 if fd_mode_cap != fd_mode: 265 raise AssertionError(f'metadata {fd_mode_cap} != req {fd_mode}') 266 267 faces = cap['metadata']['android.statistics.faces'] 268 # 0 faces should be returned for OFF mode 269 if fd_mode == _FD_MODE_OFF: 270 if faces: 271 raise AssertionError(f'Error: faces detected in OFF: {faces}') 272 continue 273 # Face detection could take several frames to warm up, 274 # but should detect the correct number of faces in last frame 275 if i == _NUM_TEST_FRAMES - 1: 276 img = image_processing_utils.convert_capture_to_rgb_image( 277 cap, props=props) 278 fnd_faces = len(faces) 279 logging.debug('Found %d face(s), expected %d.', 280 fnd_faces, _NUM_FACES) 281 282 # draw boxes around faces in green 283 crop_region = cap['metadata']['android.scaler.cropRegion'] 284 faces_cropped = correct_faces_for_crop(faces, img, crop_region) 285 for (l, r, t, b) in faces_cropped: 286 cv2.rectangle(img, (l, t), (r, b), _CV2_GREEN, 2) 287 288 # Save image with green rectangles 289 img_name = f'{file_name_stem}_fd_mode_{fd_mode}.jpg' 290 image_processing_utils.write_image(img, img_name) 291 if fnd_faces != _NUM_FACES: 292 raise AssertionError('Wrong num of faces found! Found: ' 293 f'{fnd_faces}, expected: {_NUM_FACES}') 294 # Reasonable scores for faces 295 face_scores = [face['score'] for face in faces] 296 for score in face_scores: 297 if not 1 <= score <= 100: 298 raise AssertionError(f'score not between [1:100]! {score}') 299 300 # Face bounds should be within active array 301 face_rectangles = [face['bounds'] for face in faces] 302 for j, rect in enumerate(face_rectangles): 303 check_face_bounding_box(rect, aw, ah, j) 304 305 # Face landmarks (if provided) are within face bounding box 306 vendor_api_level = its_session_utils.get_vendor_api_level( 307 self.dut.serial) 308 if vendor_api_level >= its_session_utils.ANDROID14_API_LEVEL: 309 for k, face in enumerate(faces): 310 check_face_landmarks(face, fd_mode, k) 311 312 # Match location of opencv and face detection mode faces 313 if self.scene == 'scene2_d': 314 logging.debug('Match face centers between opencv & faces') 315 faces_opencv = opencv_processing_utils.find_opencv_faces( 316 img, _CV2_FACE_SCALE_FACTOR, _CV2_FACE_MIN_NEIGHBORS) 317 if fd_mode: # non-zero value for ON 318 match_face_locations(faces_cropped, faces_opencv, 319 fd_mode, img, img_name) 320 321 if not faces: 322 continue 323 logging.debug('Frame %d face metadata:', i) 324 logging.debug(' Faces: %s', str(faces)) 325 326 327if __name__ == '__main__': 328 test_runner.main() 329