• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import errno
6import hashlib
7import logging
8import math
9import mmap
10import os
11import re
12
13from autotest_lib.client.common_lib import error
14from autotest_lib.client.common_lib import file_utils
15from autotest_lib.client.cros import chrome_binary_test
16from autotest_lib.client.cros.video import device_capability
17from autotest_lib.client.cros.video import helper_logger
18
19
20DOWNLOAD_BASE = ('http://commondatastorage.googleapis.com'
21                 '/chromiumos-test-assets-public/')
22
23VEA_BINARY = 'video_encode_accelerator_unittest'
24TIME_BINARY = '/usr/local/bin/time'
25
26# The format used for 'time': <real time> <kernel time> <user time>
27TIME_OUTPUT_FORMAT = '%e %S %U'
28
29FRAME_STATS_SUFFIX = 'frame-data.csv'
30TEST_LOG_SUFFIX = 'test.log'
31TIME_LOG_SUFFIX = 'time.log'
32
33# Performance keys:
34# FPS (i.e. encoder throughput)
35KEY_FPS = 'fps'
36# Encode latencies at the 50th, 75th, and 95th percentiles.
37# Encode latency is the delay from input of a frame to output of the encoded
38# bitstream.
39KEY_ENCODE_LATENCY_50 = 'encode_latency.50_percentile'
40KEY_ENCODE_LATENCY_75 = 'encode_latency.75_percentile'
41KEY_ENCODE_LATENCY_95 = 'encode_latency.95_percentile'
42# CPU usage in kernel space
43KEY_CPU_KERNEL_USAGE = 'cpu_usage.kernel'
44# CPU usage in user space
45KEY_CPU_USER_USAGE = 'cpu_usage.user'
46
47# Units of performance values:
48UNIT_MILLISECOND = 'milliseconds'
49UNIT_MICROSECOND = 'us'
50UNIT_RATIO = 'ratio'
51UNIT_FPS = 'fps'
52
53RE_FPS = re.compile(r'^Measured encoder FPS: ([+\-]?[0-9.]+)$', re.MULTILINE)
54RE_ENCODE_LATENCY_50 = re.compile(
55    r'^Encode latency for the 50th percentile: (\d+) us$',
56    re.MULTILINE)
57RE_ENCODE_LATENCY_75 = re.compile(
58    r'^Encode latency for the 75th percentile: (\d+) us$',
59    re.MULTILINE)
60RE_ENCODE_LATENCY_95 = re.compile(
61    r'^Encode latency for the 95th percentile: (\d+) us$',
62    re.MULTILINE)
63
64
65def _remove_if_exists(filepath):
66    try:
67        os.remove(filepath)
68    except OSError, e:
69        if e.errno != errno.ENOENT:  # no such file
70            raise
71
72
73class video_VEAPerf(chrome_binary_test.ChromeBinaryTest):
74    """
75    This test monitors several performance metrics reported by Chrome test
76    binary, video_encode_accelerator_unittest.
77    """
78
79    version = 1
80
81    def _logperf(self, test_name, key, value, units, higher_is_better=False):
82        description = '%s.%s' % (test_name, key)
83        self.output_perf_value(
84                description=description, value=value, units=units,
85                higher_is_better=higher_is_better)
86
87
88    def _analyze_fps(self, test_name, log_file):
89        """
90        Analyzes FPS info from result log file.
91        """
92        with open(log_file, 'r') as f:
93            mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
94            fps = [float(m.group(1)) for m in RE_FPS.finditer(mm)]
95            mm.close()
96        if len(fps) != 1:
97            raise error.TestError('Parsing FPS failed w/ %d occurrence(s).' %
98                                  len(fps))
99        self._logperf(test_name, KEY_FPS, fps[0], UNIT_FPS, True)
100
101
102    def _analyze_encode_latency(self, test_name, log_file):
103        """
104        Analyzes encode latency from result log file.
105        """
106        with open(log_file, 'r') as f:
107            mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
108            latency_50 = [int(m.group(1)) for m in
109                          RE_ENCODE_LATENCY_50.finditer(mm)]
110            latency_75 = [int(m.group(1)) for m in
111                          RE_ENCODE_LATENCY_75.finditer(mm)]
112            latency_95 = [int(m.group(1)) for m in
113                          RE_ENCODE_LATENCY_95.finditer(mm)]
114            mm.close()
115        if any([len(l) != 1 for l in [latency_50, latency_75, latency_95]]):
116            raise error.TestError('Parsing encode latency failed.')
117        self._logperf(test_name, KEY_ENCODE_LATENCY_50, latency_50[0],
118                      UNIT_MICROSECOND)
119        self._logperf(test_name, KEY_ENCODE_LATENCY_75, latency_75[0],
120                      UNIT_MICROSECOND)
121        self._logperf(test_name, KEY_ENCODE_LATENCY_95, latency_95[0],
122                      UNIT_MICROSECOND)
123
124
125    def _analyze_cpu_usage(self, test_name, time_log_file):
126        """
127        Analyzes CPU usage from the output of 'time' command.
128        """
129        with open(time_log_file) as f:
130            content = f.read()
131        r, s, u = (float(x) for x in content.split())
132        self._logperf(test_name, KEY_CPU_USER_USAGE, u / r, UNIT_RATIO)
133        self._logperf(test_name, KEY_CPU_KERNEL_USAGE, s / r, UNIT_RATIO)
134
135
136    def _analyze_frame_stats(self, test_name, frame_stats_file):
137        """
138        Analyzes quality from --frame_stats output CSV. Assumes YUV420 (for MSE
139        samples per channel).
140        """
141        def mse_to_psnr(samples, peak, mse):
142            """
143            Generate PSNR from MSE for a frame.
144            """
145            MAX_PSNR = 100.0
146            # Prevent a divide-by-zero, MSE at 0 is perfect quality (no error).
147            if mse == 0:
148                return MAX_PSNR
149            psnr = 10.0 * math.log10(peak * peak * samples / float(mse))
150            return min(psnr, MAX_PSNR)
151
152        frame_ssim = {'y': [], 'u': [], 'v': [], 'combined': []}
153        frame_psnr = {'y': [], 'u': [], 'v': [], 'combined': []}
154        for line in open(frame_stats_file):
155            (frame, width, height,
156                ssim_y, ssim_u, ssim_v, mse_y, mse_u, mse_v) = line.split(',')
157            # Skip CSV header.
158            if frame == 'frame':
159                continue
160            frame = int(frame)
161            width = int(width)
162            height = int(height)
163            ssim_y = float(ssim_y)
164            ssim_u = float(ssim_u)
165            ssim_v = float(ssim_v)
166            mse_y = int(mse_y)
167            mse_u = int(mse_u)
168            mse_v = int(mse_v)
169
170            frame_ssim['y'].append(ssim_y)
171            frame_ssim['u'].append(ssim_u)
172            frame_ssim['v'].append(ssim_v)
173            # Weighting of YUV channels for SSIM taken from libvpx.
174            frame_ssim['combined'].append(
175                0.8 * ssim_y + 0.1 * (ssim_u + ssim_v))
176
177            # Samples per MSE score assumes YUV420 subsampling.
178            frame_psnr['y'].append(
179                mse_to_psnr(width * height * 4 / 4, 255, mse_y))
180            frame_psnr['u'].append(
181                mse_to_psnr(width * height * 1 / 4, 255, mse_u))
182            frame_psnr['v'].append(
183                mse_to_psnr(width * height * 1 / 4, 255, mse_v))
184            frame_psnr['combined'].append(
185                mse_to_psnr(
186                    width * height * 6 / 4, 255, mse_y + mse_u + mse_v))
187
188        for channel in ['y', 'u', 'v', 'combined']:
189            # Log stats with a key similar to 'quality.ssim.y.max'. For combined
190            # stats the channel is omitted ('quality.ssim.max').
191            key = 'quality.%s'
192            if channel is not 'combined':
193                key += '.' + channel
194            key += '.%s'
195            for (stat, func) in [('min', min), ('max', max),
196                                 ('avg', lambda x: sum(x) / len(x))]:
197                self._logperf(test_name, key % ('ssim', stat),
198                              func(frame_ssim[channel]), None,
199                              higher_is_better=True)
200                self._logperf(test_name, key % ('psnr', stat),
201                              func(frame_psnr[channel]), None,
202                              higher_is_better=True)
203
204
205    def _get_profile_name(self, profile):
206        """
207        Gets profile name from a profile index.
208        """
209        if profile == 1:
210            return 'h264'
211        elif profile == 11:
212            return 'vp8'
213        else:
214            raise error.TestError('Internal error.')
215
216
217    def _convert_test_name(self, path, on_cloud, profile):
218        """Converts source path to test name and output video file name.
219
220        For example: for the path on cloud
221            "tulip2/tulip2-1280x720-1b95123232922fe0067869c74e19cd09.yuv"
222
223        We will derive the test case's name as "tulip2-1280x720.vp8" or
224        "tulip2-1280x720.h264" depending on the profile. The MD5 checksum in
225        path will be stripped.
226
227        For the local file, we use the base name directly.
228
229        @param path: The local path or download path.
230        @param on_cloud: Whether the file is on cloud.
231        @param profile: Profile index.
232
233        @returns a pair of (test name, output video file name)
234        """
235        s = os.path.basename(path)
236        name = s[:s.rfind('-' if on_cloud else '.')]
237        profile_name = self._get_profile_name(profile)
238        return (name + '_' + profile_name, name + '.' + profile_name)
239
240
241    def _download_video(self, path_on_cloud, local_file):
242        url = '%s%s' % (DOWNLOAD_BASE, path_on_cloud)
243        logging.info('download "%s" to "%s"', url, local_file)
244
245        file_utils.download_file(url, local_file)
246
247        with open(local_file, 'r') as r:
248            md5sum = hashlib.md5(r.read()).hexdigest()
249            if md5sum not in path_on_cloud:
250                raise error.TestError('unmatched md5 sum: %s' % md5sum)
251
252
253    def _get_result_filename(self, test_name, subtype, suffix):
254        return os.path.join(self.resultsdir,
255                            '%s_%s_%s' % (test_name, subtype, suffix))
256
257
258    def _get_vea_unittest_args(self, test_stream_data, test_log_file):
259        vea_args = [
260            '--test_stream_data=%s' % test_stream_data,
261            '--output_log="%s"' % test_log_file,
262            '--ozone-platform=gbm',
263            helper_logger.chrome_vmodule_flag()]
264        return vea_args
265
266
267    def _run_test_case(self, test_name, test_stream_data):
268        """
269        Runs a VEA unit test.
270
271        @param test_name: Name of this test case.
272        @param test_stream_data: Parameter to --test_stream_data in vea_unittest.
273        """
274        # Get FPS.
275        test_log_file = self._get_result_filename(test_name, 'fullspeed',
276                                                  TEST_LOG_SUFFIX)
277        vea_args = self._get_vea_unittest_args(test_stream_data, test_log_file)
278        vea_args += ['--gtest_filter=EncoderPerf/*/0']
279        self.run_chrome_test_binary(VEA_BINARY, ' '.join(vea_args))
280        self._analyze_fps(test_name, test_log_file)
281
282        # Get CPU usage and encode latency under specified frame rate.
283        test_log_file = self._get_result_filename(test_name, 'fixedspeed',
284                                                  TEST_LOG_SUFFIX)
285        time_log_file = self._get_result_filename(test_name, 'fixedspeed',
286                                                  TIME_LOG_SUFFIX)
287        vea_args = self._get_vea_unittest_args(test_stream_data, test_log_file)
288        vea_args += ['--gtest_filter=SimpleEncode/*/0',
289                     '--run_at_fps',
290                     '--measure_latency']
291        time_cmd = ('%s -f "%s" -o "%s" ' %
292                    (TIME_BINARY, TIME_OUTPUT_FORMAT, time_log_file))
293        self.run_chrome_test_binary(VEA_BINARY, ' '.join(vea_args),
294                                    prefix=time_cmd)
295        self._analyze_encode_latency(test_name, test_log_file)
296        self._analyze_cpu_usage(test_name, time_log_file)
297
298        # TODO(pbos): Measure quality at more bitrates.
299        # Generate SSIM/PSNR scores (objective quality metrics).
300        test_log_file = self._get_result_filename(test_name, 'quality',
301                                                  TEST_LOG_SUFFIX)
302        frame_stats_file = self._get_result_filename(test_name, 'quality',
303                                                    FRAME_STATS_SUFFIX)
304        vea_args = self._get_vea_unittest_args(test_stream_data, test_log_file)
305        vea_args += ['--gtest_filter=SimpleEncode/*/0',
306                     '--frame_stats="%s"' % frame_stats_file]
307        self.run_chrome_test_binary(VEA_BINARY, ' '.join(vea_args))
308        self._analyze_frame_stats(test_name, frame_stats_file)
309
310
311    @helper_logger.video_log_wrapper
312    @chrome_binary_test.nuke_chrome
313    def run_once(self, test_cases, required_cap):
314        """
315        Tests ChromeOS video hardware encoder performance.
316        """
317        last_error = None
318        device_capability.DeviceCapability().ensure_capability(required_cap)
319        for (path, on_cloud, width, height, requested_bit_rate,
320             profile, requested_frame_rate) in test_cases:
321            try:
322                test_name, output_name = self._convert_test_name(
323                    path, on_cloud, profile)
324                if on_cloud:
325                    input_path = os.path.join(self.tmpdir,
326                                              os.path.basename(path))
327                    self._download_video(path, input_path)
328                else:
329                    input_path = os.path.join(self.cr_source_dir, path)
330                output_path = os.path.join(self.tmpdir, output_name)
331                test_stream_data = '%s:%s:%s:%s:%s:%s:%s' % (
332                    input_path, width, height, profile, output_path,
333                    requested_bit_rate, requested_frame_rate)
334                self._run_test_case(test_name, test_stream_data)
335            except Exception as last_error:
336                # Log the error and continue to the next test case.
337                logging.exception(last_error)
338            finally:
339                if on_cloud:
340                    _remove_if_exists(input_path)
341                _remove_if_exists(output_path)
342
343        if last_error:
344            raise last_error
345