• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Lint as: python2, python3
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6from __future__ import absolute_import
7from __future__ import division
8from __future__ import print_function
9
10import json
11import logging
12import numbers
13import os
14import tempfile
15import six
16
17import numpy
18
19from autotest_lib.client.common_lib import error, utils
20from autotest_lib.client.common_lib.cros import dev_server
21
22TELEMETRY_RUN_BENCHMARKS_SCRIPT = 'tools/perf/run_benchmark'
23TELEMETRY_RUN_TESTS_SCRIPT = 'tools/telemetry/run_tests'
24TELEMETRY_RUN_GPU_TESTS_SCRIPT = 'content/test/gpu/run_gpu_integration_test.py'
25TELEMETRY_TIMEOUT_MINS = 150
26
27DUT_CHROME_ROOT = '/usr/local/telemetry/src'
28
29CHART_JSON_RESULT = 'results-chart.json'
30HISTOGRAM_SET_RESULT = 'histograms.json'
31PROFILE_ARTIFACTS = 'artifacts'
32
33# Result Statuses
34SUCCESS_STATUS = 'SUCCESS'
35WARNING_STATUS = 'WARNING'
36FAILED_STATUS = 'FAILED'
37
38# A list of telemetry tests that cannot run on dut.
39ON_DUT_BLOCKLIST = [
40        'loading.desktop',  # crbug/882299
41        'rendering.desktop',  # crbug/882291
42]
43
44
45class TelemetryResult(object):
46    """Class to represent the results of a telemetry run.
47
48    This class represents the results of a telemetry run, whether it ran
49    successful, failed or had warnings.
50    """
51
52    def __init__(self, exit_code=0, stdout='', stderr=''):
53        """Initializes this TelemetryResultObject instance.
54
55        @param status: Status of the telemtry run.
56        @param stdout: Stdout of the telemetry run.
57        @param stderr: Stderr of the telemetry run.
58        """
59        if exit_code == 0:
60            self.status = SUCCESS_STATUS
61        else:
62            self.status = FAILED_STATUS
63
64        self._stdout = stdout
65        self._stderr = stderr
66        self.output = '\n'.join([stdout, stderr])
67
68
69class TelemetryRunner(object):
70    """Class responsible for telemetry for a given build.
71
72    This class will extract and install telemetry on the devserver and is
73    responsible for executing the telemetry benchmarks and returning their
74    output to the caller.
75    """
76
77    def __init__(self, host, local=False, telemetry_on_dut=True):
78        """Initializes this telemetry runner instance.
79
80        If telemetry is not installed for this build, it will be.
81
82        Basically, the following commands on the local pc on which test_that
83        will be executed, depending on the 4 possible combinations of
84        local x telemetry_on_dut:
85
86        local=True, telemetry_on_dut=False:
87        python2 run_benchmark --browser=cros-chrome --remote=[dut] [test]
88
89        local=True, telemetry_on_dut=True:
90        ssh [dut] python2 run_benchmark --browser=system [test]
91
92        local=False, telemetry_on_dut=False:
93        ssh [devserver] python2 run_benchmark --browser=cros-chrome
94        --remote=[dut] [test]
95
96        local=False, telemetry_on_dut=True:
97        ssh [devserver] ssh [dut] python2 run_benchmark --browser=system [test]
98
99        @param host: Host where the test will be run.
100        @param local: If set, no devserver will be used, test will be run
101                      locally.
102                      If not set, "ssh [devserver] " will be appended to test
103                      commands.
104        @param telemetry_on_dut: If set, telemetry itself (the test harness)
105                                 will run on dut.
106                                 It decides browser=[system|cros-chrome]
107        """
108        self._host = host
109        self._devserver = None
110        self._telemetry_path = None
111        self._perf_value_writer = None
112        self._telemetry_on_dut = telemetry_on_dut
113        # TODO (llozano crbug.com/324964). Remove conditional code.
114        # Use a class hierarchy instead.
115        if local:
116            self._setup_local_telemetry()
117        else:
118            self._setup_devserver_telemetry()
119        self._benchmark_deps = None
120
121        logging.debug('Telemetry Path: %s', self._telemetry_path)
122
123    def _setup_devserver_telemetry(self):
124        """Setup Telemetry to use the devserver."""
125        logging.debug('Setting up telemetry for devserver testing')
126        logging.debug('Grabbing build from AFE.')
127        info = self._host.host_info_store.get()
128        if not info.build:
129            logging.error('Unable to locate build label for host: %s.',
130                          self._host.host_port)
131            raise error.AutotestError(
132                    'Failed to grab build for host %s.' % self._host.host_port)
133
134        logging.debug('Setting up telemetry for build: %s', info.build)
135
136        self._devserver = dev_server.ImageServer.resolve(
137                info.build, hostname=self._host.hostname)
138        self._devserver.stage_artifacts(info.build, ['autotest_packages'])
139        self._telemetry_path = self._devserver.setup_telemetry(
140                build=info.build)
141
142    def _setup_local_telemetry(self):
143        """Setup Telemetry to use local path to its sources.
144
145        First look for chrome source root, either externally mounted, or inside
146        the chroot.  Prefer chrome-src-internal source tree to chrome-src.
147        """
148        TELEMETRY_DIR = 'src'
149        CHROME_LOCAL_SRC = '/var/cache/chromeos-cache/distfiles/target/'
150        CHROME_EXTERNAL_SRC = os.path.expanduser('~/chrome_root/')
151
152        logging.debug('Setting up telemetry for local testing')
153
154        sources_list = ('chrome-src-internal', 'chrome-src')
155        dir_list = [CHROME_EXTERNAL_SRC]
156        dir_list.extend(
157                [os.path.join(CHROME_LOCAL_SRC, x) for x in sources_list])
158        if 'CHROME_ROOT' in os.environ:
159            dir_list.insert(0, os.environ['CHROME_ROOT'])
160
161        telemetry_src = ''
162        for dir in dir_list:
163            if os.path.exists(dir):
164                telemetry_src = os.path.join(dir, TELEMETRY_DIR)
165                break
166        else:
167            raise error.TestError('Telemetry source directory not found.')
168
169        self._devserver = None
170        self._telemetry_path = telemetry_src
171
172    def _get_telemetry_cmd(self, script, test_or_benchmark, output_format,
173                           *args, **kwargs):
174        """Build command to execute telemetry based on script and benchmark.
175
176        @param script: Telemetry script we want to run. For example:
177                       [path_to_telemetry_src]/src/tools/telemetry/run_tests.
178        @param test_or_benchmark: Name of the test or benchmark we want to run,
179                                  with the page_set (if required) as part of
180                                  the string.
181        @param output_format: Format of the json result file: histogram or
182                              chart-json.
183        @param args: additional list of arguments to pass to the script.
184        @param kwargs: additional list of keyword arguments to pass to the
185                       script.
186
187        @returns Full telemetry command to execute the script.
188        """
189        telemetry_cmd = []
190        if self._devserver:
191            devserver_hostname = self._devserver.hostname
192            telemetry_cmd.extend(['ssh', devserver_hostname])
193
194        no_verbose = kwargs.get('no_verbose', False)
195
196        output_dir = (DUT_CHROME_ROOT
197                      if self._telemetry_on_dut else self._telemetry_path)
198        # Create a temp directory to hold single test run.
199        if self._perf_value_writer:
200            output_dir = os.path.join(
201                    output_dir, self._perf_value_writer.tmpdir.strip('/'))
202
203        if self._telemetry_on_dut:
204            telemetry_cmd.extend([
205                    self._host.ssh_command(
206                            alive_interval=900, connection_attempts=4),
207                    'python2',
208                    script,
209                    '--output-format=%s' % output_format,
210                    '--output-dir=%s' % output_dir,
211                    '--browser=system',
212            ])
213        else:
214            telemetry_cmd.extend([
215                    'python2',
216                    script,
217                    '--browser=cros-chrome',
218                    '--output-format=%s' % output_format,
219                    '--output-dir=%s' % output_dir,
220                    '--remote=%s' % self._host.hostname,
221            ])
222            if self._host.host_port != self._host.hostname:
223                # If the user specify a different port for the DUT, we should
224                # use different telemetry argument to set it up.
225                #
226                # e.g. When user is running experiments with ssh port
227                # forwarding, they specify remote as 127.0.0.1:2222. Now
228                # host_port is 127.0.0.1:2222 and hostname is 127.0.0.1
229                # port is 2222
230                telemetry_cmd.append('--remote-ssh-port=%s' % self._host.port)
231
232        if not no_verbose:
233            telemetry_cmd.append('--verbose')
234        telemetry_cmd.extend(args)
235        telemetry_cmd.append(test_or_benchmark)
236
237        return ' '.join(telemetry_cmd)
238
239    def _scp_telemetry_results_cmd(self, perf_results_dir, output_format,
240                                   artifacts):
241        """Build command to copy the telemetry results from the devserver.
242
243        @param perf_results_dir: directory path where test output is to be
244                                 collected.
245        @param output_format: Format of the json result file: histogram or
246                              chart-json.
247        @param artifacts: Whether we want to copy artifacts directory.
248
249        @returns SCP command to copy the results json to the specified
250                 directory.
251        """
252        if not perf_results_dir:
253            return ''
254
255        output_filename = CHART_JSON_RESULT
256        if output_format == 'histograms':
257            output_filename = HISTOGRAM_SET_RESULT
258        scp_cmd = []
259        if self._telemetry_on_dut:
260            scp_cmd.extend(['scp', '-r'])
261            scp_cmd.append(
262                    self._host.make_ssh_options(
263                            alive_interval=900, connection_attempts=4))
264            if not self._host.is_default_port:
265                scp_cmd.append('-P %d' % self._host.port)
266            src = 'root@%s:%s' % (self._host.hostname, DUT_CHROME_ROOT)
267        else:
268            # Use rsync --remove-source-file to move rather than copy from
269            # server. This is because each run will generate certain artifacts
270            # and will not be removed after, making result size getting larger.
271            # We don't do this for results on DUT because 1) rsync doesn't work
272            # 2) DUT will be reflashed frequently and no need to worry about
273            # result size.
274            scp_cmd.extend(['rsync', '-avz', '--remove-source-files'])
275            devserver_hostname = ''
276            if self._devserver:
277                devserver_hostname = self._devserver.hostname + ':'
278            src = '%s%s' % (devserver_hostname, self._telemetry_path)
279
280        if self._perf_value_writer:
281            src = os.path.join(src, self._perf_value_writer.tmpdir.strip('/'))
282
283        scp_cmd.append(os.path.join(src, output_filename))
284
285        # Copy artifacts back to result directory if needed.
286        if artifacts:
287            scp_cmd.append(os.path.join(src, PROFILE_ARTIFACTS))
288
289        scp_cmd.append(perf_results_dir)
290        return ' '.join(scp_cmd)
291
292    def _run_cmd(self, cmd):
293        """Execute an command in a external shell and capture the output.
294
295        @param cmd: String of is a valid shell command.
296
297        @returns The standard out, standard error and the integer exit code of
298                 the executed command.
299        """
300        logging.debug('Running: %s', cmd)
301
302        output = six.StringIO()
303        error_output = six.StringIO()
304        exit_code = 0
305        try:
306            result = utils.run(
307                    cmd,
308                    stdout_tee=output,
309                    stderr_tee=error_output,
310                    timeout=TELEMETRY_TIMEOUT_MINS * 60)
311            exit_code = result.exit_status
312        except error.CmdError as e:
313            logging.debug('Error occurred executing.')
314            exit_code = e.result_obj.exit_status
315
316        stdout = output.getvalue()
317        stderr = error_output.getvalue()
318        logging.debug('Completed with exit code: %d.\nstdout:%s\n'
319                      'stderr:%s', exit_code, stdout, stderr)
320        return stdout, stderr, exit_code
321
322    def _run_telemetry(self, script, test_or_benchmark, output_format, *args,
323                       **kwargs):
324        """Runs telemetry on a dut.
325
326        @param script: Telemetry script we want to run. For example:
327                       [path_to_telemetry_src]/src/tools/telemetry/run_tests.
328        @param test_or_benchmark: Name of the test or benchmark we want to run,
329                                 with the page_set (if required) as part of the
330                                 string.
331        @param args: additional list of arguments to pass to the script.
332        @param kwargs: additional list of keyword arguments to pass to the
333                       script.
334
335        @returns A TelemetryResult Instance with the results of this telemetry
336                 execution.
337        """
338        # TODO (sbasi crbug.com/239933) add support for incognito mode.
339
340        telemetry_cmd = self._get_telemetry_cmd(script, test_or_benchmark,
341                                                output_format, *args, **kwargs)
342        logging.info('Running Telemetry: %s', telemetry_cmd)
343
344        stdout, stderr, exit_code = self._run_cmd(telemetry_cmd)
345
346        return TelemetryResult(
347                exit_code=exit_code, stdout=stdout, stderr=stderr)
348
349    def _run_scp(self, perf_results_dir, output_format, artifacts=False):
350        """Runs telemetry on a dut.
351
352        @param perf_results_dir: The local directory that results are being
353                                 collected.
354        @param output_format: Format of the json result file.
355        @param artifacts: Whether we want to copy artifacts directory.
356        """
357        scp_cmd = self._scp_telemetry_results_cmd(perf_results_dir,
358                                                  output_format, artifacts)
359        logging.debug('Retrieving Results: %s', scp_cmd)
360        _, _, exit_code = self._run_cmd(scp_cmd)
361        if exit_code != 0:
362            raise error.TestFail('Unable to retrieve results.')
363
364        if output_format == 'histograms':
365            # Converts to chart json format.
366            input_filename = os.path.join(perf_results_dir,
367                                          HISTOGRAM_SET_RESULT)
368            output_filename = os.path.join(perf_results_dir, CHART_JSON_RESULT)
369            histograms = json.loads(open(input_filename).read())
370            chartjson = TelemetryRunner.convert_chart_json(histograms)
371            with open(output_filename, 'w') as fout:
372                fout.write(json.dumps(chartjson, indent=2))
373
374    def _run_test(self, script, test, *args):
375        """Runs a telemetry test on a dut.
376
377        @param script: Which telemetry test script we want to run. Can be
378                       telemetry's base test script or the Chrome OS specific
379                       test script.
380        @param test: Telemetry test we want to run.
381        @param args: additional list of arguments to pass to the script.
382
383        @returns A TelemetryResult Instance with the results of this telemetry
384                 execution.
385        """
386        logging.debug('Running telemetry test: %s', test)
387        telemetry_script = os.path.join(self._telemetry_path, script)
388        result = self._run_telemetry(telemetry_script, test, 'chartjson',
389                                     *args)
390        if result.status is FAILED_STATUS:
391            raise error.TestFail('Telemetry test %s failed.' % test)
392        return result
393
394    def run_telemetry_test(self, test, *args):
395        """Runs a telemetry test on a dut.
396
397        @param test: Telemetry test we want to run.
398        @param args: additional list of arguments to pass to the telemetry
399                     execution script.
400
401        @returns A TelemetryResult Instance with the results of this telemetry
402                 execution.
403        """
404        return self._run_test(TELEMETRY_RUN_TESTS_SCRIPT, test, *args)
405
406    def run_telemetry_benchmark(self,
407                                benchmark,
408                                perf_value_writer=None,
409                                *args,
410                                **kwargs):
411        """Runs a telemetry benchmark on a dut.
412
413        @param benchmark: Benchmark we want to run.
414        @param perf_value_writer: Should be an instance with the function
415                                  output_perf_value(), if None, no perf value
416                                  will be written. Typically this will be the
417                                  job object from an autotest test.
418        @param args: additional list of arguments to pass to the telemetry
419                     execution script.
420        @param kwargs: additional list of keyword arguments to pass to the
421                       telemetry execution script.
422
423        @returns A TelemetryResult Instance with the results of this telemetry
424                 execution.
425        """
426        logging.debug('Running telemetry benchmark: %s', benchmark)
427
428        self._perf_value_writer = perf_value_writer
429
430        if benchmark in ON_DUT_BLOCKLIST:
431            self._telemetry_on_dut = False
432
433        output_format = kwargs.get('ex_output_format', '')
434
435        if not output_format:
436            output_format = 'histograms'
437
438        if self._telemetry_on_dut:
439            telemetry_script = os.path.join(DUT_CHROME_ROOT,
440                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)
441            self._ensure_deps(self._host, benchmark)
442        else:
443            telemetry_script = os.path.join(self._telemetry_path,
444                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)
445
446        result = self._run_telemetry(telemetry_script, benchmark,
447                                     output_format, *args, **kwargs)
448
449        if result.status is WARNING_STATUS:
450            raise error.TestWarn('Telemetry Benchmark: %s'
451                                 ' exited with Warnings.\nOutput:\n%s\n' %
452                                 (benchmark, result.output))
453        elif result.status is FAILED_STATUS:
454            raise error.TestFail('Telemetry Benchmark: %s'
455                                 ' failed to run.\nOutput:\n%s\n' %
456                                 (benchmark, result.output))
457        elif '[  PASSED  ] 0 tests.' in result.output:
458            raise error.TestWarn('Telemetry Benchmark: %s exited successfully,'
459                                 ' but no test actually passed.\nOutput\n%s\n'
460                                 % (benchmark, result.output))
461        if perf_value_writer:
462            artifacts = kwargs.get('artifacts', False)
463            self._run_scp(perf_value_writer.resultsdir, output_format,
464                          artifacts)
465        return result
466
467    def run_gpu_integration_test(self, test, *args):
468        """Runs a gpu test on a dut.
469
470        @param test: Gpu test we want to run.
471        @param args: additional list of arguments to pass to the telemetry
472                     execution script.
473
474        @returns A TelemetryResult instance with the results of this telemetry
475                 execution.
476        """
477        script = os.path.join(DUT_CHROME_ROOT, TELEMETRY_RUN_GPU_TESTS_SCRIPT)
478        cmd = []
479        if self._devserver:
480            devserver_hostname = self._devserver.hostname
481            cmd.extend(['ssh', devserver_hostname])
482
483        cmd.extend([
484                self._host.ssh_command(
485                        alive_interval=900, connection_attempts=4), 'python2',
486                script
487        ])
488        cmd.extend(args)
489        cmd.append(test)
490        cmd = ' '.join(cmd)
491        stdout, stderr, exit_code = self._run_cmd(cmd)
492
493        if exit_code:
494            raise error.TestFail('Gpu Integration Test: %s'
495                                 ' failed to run.' % test)
496
497        return TelemetryResult(
498                exit_code=exit_code, stdout=stdout, stderr=stderr)
499
500    def _ensure_deps(self, dut, test_name):
501        """
502        Ensure the dependencies are locally available on DUT.
503
504        @param dut: The autotest host object representing DUT.
505        @param test_name: Name of the telemetry test.
506        """
507        # Get DEPs using host's telemetry.
508        # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
509        # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
510        fetch_path = os.path.join(self._telemetry_path, 'tools', 'perf',
511                                  'fetch_benchmark_deps.py')
512        # Use a temporary file for |deps_path| to avoid race conditions. The
513        # created temporary file is assigned to |self._benchmark_deps| to make
514        # it valid until |self| is destroyed.
515        self._benchmark_deps = tempfile.NamedTemporaryFile(
516                prefix='fetch_benchmark_deps_result.', suffix='.json')
517        deps_path = self._benchmark_deps.name
518        format_fetch = ('python2 %s --output-deps=%s %s')
519        command_fetch = format_fetch % (fetch_path, deps_path, test_name)
520        command_get = 'cat %s' % deps_path
521
522        if self._devserver:
523            devserver_hostname = self._devserver.url().split(
524                    'http://')[1].split(':')[0]
525            command_fetch = 'ssh %s %s' % (devserver_hostname, command_fetch)
526            command_get = 'ssh %s %s' % (devserver_hostname, command_get)
527
528        logging.info('Getting DEPs: %s', command_fetch)
529        _, _, exit_code = self._run_cmd(command_fetch)
530        if exit_code != 0:
531            raise error.TestFail('Error occurred while fetching DEPs.')
532        stdout, _, exit_code = self._run_cmd(command_get)
533        if exit_code != 0:
534            raise error.TestFail('Error occurred while getting DEPs.')
535
536        # Download DEPs to DUT.
537        # send_file() relies on rsync over ssh. Couldn't be better.
538        deps = json.loads(stdout)
539        for dep in deps[test_name]:
540            src = os.path.join(self._telemetry_path, dep)
541            dst = os.path.join(DUT_CHROME_ROOT, dep)
542            if self._devserver:
543                logging.info('Copying: %s -> %s', src, dst)
544                rsync_cmd = utils.sh_escape(
545                        'rsync %s %s %s:%s' % (self._host.rsync_options(), src,
546                                               self._host.hostname, dst))
547                utils.run('ssh %s "%s"' % (devserver_hostname, rsync_cmd))
548            else:
549                if not os.path.isfile(src):
550                    raise error.TestFail('Error occurred while saving DEPs.')
551                logging.info('Copying: %s -> %s', src, dst)
552                dut.send_file(src, dst)
553
554    @staticmethod
555    def convert_chart_json(histogram_set):
556        """
557        Convert from histogram set to chart json format.
558
559        @param histogram_set: result in histogram set format.
560
561        @returns result in chart json format.
562        """
563        value_map = {}
564
565        # Gets generic set values.
566        for obj in histogram_set:
567            if 'type' in obj and obj['type'] == 'GenericSet':
568                value_map[obj['guid']] = obj['values']
569
570        charts = {}
571        benchmark_name = ''
572        benchmark_desc = ''
573
574        # Checks the unit test for how this conversion works.
575        for obj in histogram_set:
576            if 'name' not in obj or 'sampleValues' not in obj:
577                continue
578            metric_name = obj['name']
579            diagnostics = obj['diagnostics']
580            if 'stories' in diagnostics:
581                story_name = value_map[diagnostics['stories']][0]
582            else:
583                story_name = 'default'
584            local_benchmark_name = value_map[diagnostics['benchmarks']][0]
585            if benchmark_name == '':
586                benchmark_name = local_benchmark_name
587                if 'benchmarkDescriptions' in diagnostics:
588                    benchmark_desc = value_map[
589                            diagnostics['benchmarkDescriptions']][0]
590            if benchmark_name != local_benchmark_name:
591                logging.warning(
592                        'There are more than 1 benchmark names in the'
593                        'result. old: %s, new: %s', benchmark_name,
594                        local_benchmark_name)
595                continue
596
597            unit = obj['unit']
598            smaller_postfixes = ('_smallerIsBetter', '-')
599            bigger_postfixes = ('_biggerIsBetter', '+')
600            all_postfixes = smaller_postfixes + bigger_postfixes
601
602            improvement = 'up'
603            for postfix in smaller_postfixes:
604                if unit.endswith(postfix):
605                    improvement = 'down'
606            for postfix in all_postfixes:
607                if unit.endswith(postfix):
608                    unit = unit[:-len(postfix)]
609                    break
610
611            if unit == 'unitless':
612                unit = 'score'
613
614            values = [
615                    x for x in obj['sampleValues']
616                    if isinstance(x, numbers.Number)
617            ]
618            if metric_name not in charts:
619                charts[metric_name] = {}
620            charts[metric_name][story_name] = {
621                    'improvement_direction': improvement,
622                    'name': metric_name,
623                    'std': numpy.std(values),
624                    'type': 'list_of_scalar_values',
625                    'units': unit,
626                    'values': values
627            }
628
629        # Adds summaries.
630        for metric_name in charts:
631            values = []
632            metric_content = charts[metric_name]
633            for story_name in metric_content:
634                story_content = metric_content[story_name]
635                values += story_content['values']
636                metric_type = story_content['type']
637                units = story_content['units']
638                improvement = story_content['improvement_direction']
639            values.sort()
640            std = numpy.std(values)
641            metric_content['summary'] = {
642                    'improvement_direction': improvement,
643                    'name': metric_name,
644                    'std': std,
645                    'type': metric_type,
646                    'units': units,
647                    'values': values
648            }
649
650        benchmark_metadata = {
651                'description': benchmark_desc,
652                'name': benchmark_name,
653                'type': 'telemetry_benchmark'
654        }
655        return {
656                'benchmark_description': benchmark_desc,
657                'benchmark_metadata': benchmark_metadata,
658                'benchmark_name': benchmark_name,
659                'charts': charts,
660                'format_version': 1.0
661        }
662