• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2024 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Verifies 3 faces with different skin tones are detected in preview."""
15
16
17import logging
18import os.path
19
20import cv2
21from mobly import test_runner
22
23import its_base_test
24import camera_properties_utils
25import image_processing_utils
26import its_session_utils
27import opencv_processing_utils
28import preview_processing_utils
29import video_processing_utils
30
31_CV2_GREEN = (0, 255, 0)
32_CV2_LINE_THICKNESS = 3
33_CV2_RADIUS = 10
34_FD_MODE_OFF, _FD_MODE_SIMPLE, _FD_MODE_FULL = 0, 1, 2
35_FRAME_INDEX = -1  # last frame
36_NAME = os.path.splitext(os.path.basename(__file__))[0]
37_PREVIEW_FACES_MIN_NUM = 3
38_PREVIEW_RECORDING_DURATION_SECONDS = 3
39_RGB_FULL_CHANNEL = 255
40_VALID_FD_MODES = {_FD_MODE_OFF, _FD_MODE_SIMPLE, _FD_MODE_FULL}
41
42
43def _check_face_landmarks(
44    crop, face, fd_mode, index, preview_img, preview_size):
45  """Checks face landmarks fall within face bounding box.
46
47  Face ID should be -1 for SIMPLE and unique for FULL
48  Args:
49    crop: dict; crop region size with 'top', 'right', 'left', 'bottom'
50        as keys to desired region of the sensor to read out.
51    face: dict; from face detection algorithm.
52    fd_mode: int; of face detection mode.
53    index: int; to designate face number.
54    preview_img: str; Numpy image array.
55    preview_size: str; preview size used for recording. Ex: 1920x1080.
56  """
57  logging.debug('Checking landmarks in face %d: %s', index, str(face))
58  if fd_mode == _FD_MODE_SIMPLE:
59    if 'leftEye' in face or 'rightEye' in face:
60      raise AssertionError('Eyes not supported in FD_MODE_SIMPLE.')
61    if 'mouth' in face:
62      raise AssertionError('Mouth not supported in FD_MODE_SIMPLE.')
63    if face['id'] != -1:
64      raise AssertionError('face_id should be -1 in FD_MODE_SIMPLE.')
65  elif fd_mode == _FD_MODE_FULL:
66    l, r = face['bounds']['left'], face['bounds']['right']
67    t, b = face['bounds']['top'], face['bounds']['bottom']
68    l_eye_x, l_eye_y = face['leftEye']['x'], face['leftEye']['y']
69    r_eye_x, r_eye_y = face['rightEye']['x'], face['rightEye']['y']
70    mouth_x, mouth_y = face['mouth']['x'], face['mouth']['y']
71    _draw_facial_features(crop, l_eye_x, l_eye_y, mouth_x, mouth_y,
72                          preview_img, preview_size, r_eye_x, r_eye_y)
73    if not l <= l_eye_x <= r:
74      raise AssertionError(f'Face l: {l}, r: {r}, left eye x: {l_eye_x}')
75    if not t <= l_eye_y <= b:
76      raise AssertionError(f'Face t: {t}, b: {b}, left eye y: {l_eye_y}')
77    if not l <= r_eye_x <= r:
78      raise AssertionError(f'Face l: {l}, r: {r}, right eye x: {r_eye_x}')
79    if not t <= r_eye_y <= b:
80      raise AssertionError(f'Face t: {t}, b: {b}, right eye y: {r_eye_y}')
81    if not l <= mouth_x <= r:
82      raise AssertionError(f'Face l: {l}, r: {r}, mouth x: {mouth_x}')
83    if not t <= mouth_y <= b:
84      raise AssertionError(f'Face t: {t}, b: {b}, mouth y: {mouth_y}')
85  else:
86    raise AssertionError(f'Unknown face detection mode: {fd_mode}.')
87
88
89def _do_preview_recording_and_retrieve_result(
90    dut, cam, preview_size, fd_mode, log_path):
91  """Issue a preview request and read back the preview recording object.
92
93  Args:
94    dut: obj; Android controller device object.
95    cam: obj; Camera obj.
96    preview_size: str; Preview resolution at which to record. Ex. "1920x1080".
97    fd_mode: int; STATISTICS_FACE_DETECT_MODE. Set if not None.
98    log_path: str; Log path to save preview recording.
99
100  Returns:
101    result: obj; Recording object.
102  """
103  # Record preview video with face detection.
104  result = cam.do_preview_recording(
105      preview_size, _PREVIEW_RECORDING_DURATION_SECONDS, stabilize=False,
106      zoom_ratio=None, face_detect_mode=fd_mode)
107  dut.adb.pull(
108      [result['recordedOutputPath'], log_path])
109  logging.debug('Preview recording with face detection is completed.')
110
111  return result
112
113
114def _draw_facial_features(crop, l_eye_x, l_eye_y, mouth_x, mouth_y,
115                          preview_img, preview_size, r_eye_x, r_eye_y):
116  """Mark facial features with green circles.
117
118  Args:
119    crop: dict; crop region size with 'top', 'right', 'left', 'bottom'
120        as keys to desired region of the sensor to read out.
121    l_eye_x: int; x-coordinate of the center of the left eye.
122    l_eye_y: int; y-coordinate of the center of the left eye.
123    mouth_x: int; x-coordinate of the center of the mouth.
124    mouth_y: int; y-coordinate of the center of the mouth.
125    preview_img: str; Numpy image array.
126    preview_size: str; preview size used for recording. ex: 1920x1080.
127    r_eye_x: int; x-coordinate of the center of the right eye.
128    r_eye_y: int; y-coordinate of the center of the right eye.
129  """
130  # Find out the size of active arrays and image.
131  aa_width = crop['right'] - crop['left']
132  aa_height = crop['bottom'] - crop['top']
133  img_width = int(preview_size.split('x')[0])
134  img_height = int(preview_size.split('x')[1])
135  # Convert sensor coordinates to image coordinates.
136  l_eye = image_processing_utils.convert_sensor_coords_to_image_coords(
137      aa_width, aa_height, (l_eye_x, l_eye_y), img_width, img_height)
138  r_eye = image_processing_utils.convert_sensor_coords_to_image_coords(
139      aa_width, aa_height, (r_eye_x, r_eye_y), img_width, img_height)
140  mouth = image_processing_utils.convert_sensor_coords_to_image_coords(
141      aa_width, aa_height, (mouth_x, mouth_y), img_width, img_height)
142  # Draw circles at the center of facial features.
143  cv2.circle(
144      preview_img, (int(l_eye[0]), int(l_eye[1])), _CV2_RADIUS,
145      _CV2_GREEN, _CV2_LINE_THICKNESS)
146  cv2.circle(
147      preview_img, (int(r_eye[0]), int(r_eye[1])), _CV2_RADIUS,
148      _CV2_GREEN, _CV2_LINE_THICKNESS)
149  cv2.circle(
150      preview_img, (int(mouth[0]), int(mouth[1])), _CV2_RADIUS,
151      _CV2_GREEN, _CV2_LINE_THICKNESS)
152
153
154def _draw_face_rectangles(result, faces, preview_img):
155  """Draw boxes around faces in green and save image.
156
157  Args:
158    result: obj; Recorded object returned from ItsService.
159    faces: list; List of dicts with face information.
160    preview_img: str; Numpy image array.
161  """
162  # draw boxes around faces in green
163  crop_region = result['captureMetadata'][_FRAME_INDEX][
164      'android.scaler.cropRegion']
165  faces_cropped = (
166      opencv_processing_utils.correct_faces_for_crop(
167          faces, preview_img, crop_region)
168      )
169  for (l, r, t, b) in faces_cropped:
170    cv2.rectangle(
171        preview_img, (l, t), (r, b), _CV2_GREEN, _CV2_LINE_THICKNESS)
172
173
174class PreviewNumFacesTest(its_base_test.ItsBaseTest):
175  """Test face detection with different skin tones in preview."""
176
177  def test_preview_num_faces(self):
178    """Test face detection."""
179    log_path = self.log_path
180    with its_session_utils.ItsSession(
181        device_id=self.dut.serial,
182        camera_id=self.camera_id,
183        hidden_physical_id=self.hidden_physical_id) as cam:
184      props = cam.get_camera_properties()
185      props = cam.override_with_hidden_physical_camera_props(props)
186
187      # Load chart for scene.
188      its_session_utils.load_scene(
189          cam, props, self.scene, self.tablet, self.chart_distance,
190          log_path=log_path)
191
192      # Check skip conditions.
193      should_run = (camera_properties_utils.face_detect(props) and
194                    (its_session_utils.get_first_api_level(self.dut.serial) >=
195                     its_session_utils.ANDROID16_API_LEVEL))
196      camera_properties_utils.skip_unless(should_run)
197      mono_camera = camera_properties_utils.mono_camera(props)
198      fd_modes = props['android.statistics.info.availableFaceDetectModes']
199
200      cam.do_3a(mono_camera=mono_camera)
201      for fd_mode in fd_modes:
202        logging.debug('Face detection mode: %d', fd_mode)
203        if fd_mode not in _VALID_FD_MODES:
204          raise AssertionError(f'FD mode {fd_mode} not in MODES! '
205                               f'MODES: {_VALID_FD_MODES}')
206
207        # Find largest preview size string and set as recording size.
208        preview_size = preview_processing_utils.get_max_preview_test_size(
209            cam, self.camera_id)
210        logging.debug('Preview size used for recording: %s', preview_size)
211
212        # Issue a preview request and read back the preview recording object.
213        result = _do_preview_recording_and_retrieve_result(
214            self.dut, cam, preview_size, fd_mode, log_path)
215        preview_file_name = (
216            result['recordedOutputPath'].split('/')[-1])
217        logging.debug('Recorded preview name: %s', preview_file_name)
218
219        # Get last key frames from the preview recording.
220        preview_img = (
221            video_processing_utils.extract_last_key_frame_from_recording(
222                log_path, preview_file_name))
223
224        # Check face detect mode is correctly set.
225        fd_mode_cap = (
226            result['captureMetadata'][_FRAME_INDEX][
227                'android.statistics.faceDetectMode'])
228        if fd_mode_cap != fd_mode:
229          raise AssertionError(f'metadata {fd_mode_cap} != req {fd_mode}')
230
231        # 0 faces should be returned for OFF mode.
232        # Skip remaining checks for _FD_MODE_OFF if no faces were detected.
233        faces = result['captureMetadata'][_FRAME_INDEX][
234            'android.statistics.faces']
235        if fd_mode == _FD_MODE_OFF:
236          if faces:
237            raise AssertionError(f'Error: faces detected in OFF: {faces}')
238          continue
239
240        # If front camera, flip preview image to match camera capture.
241        file_name_stem = os.path.join(log_path, _NAME)
242        if (props['android.lens.facing'] ==
243            camera_properties_utils.LENS_FACING['FRONT']):
244          preview_img = (
245              image_processing_utils.mirror_preview_image_by_sensor_orientation(
246                  props['android.sensor.orientation'], preview_img))
247        else:
248          file_name_stem = os.path.join(log_path, 'rear_preview')
249
250        # Draw boxes around faces in green and save image.
251        _draw_face_rectangles(result, faces, preview_img)
252
253        # Face landmarks (if provided) are within face bounding box.
254        crop = result['captureMetadata'][_FRAME_INDEX][
255            'android.scaler.cropRegion']
256        for i, face in enumerate(faces):
257          _check_face_landmarks(
258              crop, face, fd_mode, i, preview_img, preview_size)
259
260        # Save image with green rectangles.
261        img_name = f'{file_name_stem}_fd_mode_{fd_mode}.jpg'
262        image_processing_utils.write_image(
263            preview_img / _RGB_FULL_CHANNEL, img_name)
264
265        # Check if the expected number of faces were detected.
266        num_faces = len(faces)
267        if num_faces != _PREVIEW_FACES_MIN_NUM:
268          raise AssertionError(f'Face detection in preview found {num_faces}'
269                               f' faces, but expected {_PREVIEW_FACES_MIN_NUM}')
270        logging.debug('Face detection in preview found %d faces', num_faces)
271
272if __name__ == '__main__':
273  test_runner.main()
274
275