• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4import logging
5import os
6import re
7import shutil
8import StringIO
9
10import common
11from autotest_lib.client.common_lib import error
12from autotest_lib.server import test
13from autotest_lib.server import utils
14from autotest_lib.site_utils import test_runner_utils
15
16
17TELEMETRY_TIMEOUT_MINS = 60
18WAIT_FOR_CMD_TIMEOUT_SECS = 60
19DUT_COMMON_SSH_OPTIONS = ['-o StrictHostKeyChecking=no',
20                          '-o UserKnownHostsFile=/dev/null',
21                          '-o BatchMode=yes',
22                          '-o ConnectTimeout=30',
23                          '-o ServerAliveInterval=900',
24                          '-o ServerAliveCountMax=3',
25                          '-o ConnectionAttempts=4',
26                          '-o Protocol=2']
27DUT_SCP_OPTIONS = ' '.join(DUT_COMMON_SSH_OPTIONS)
28
29CHROME_SRC_ROOT = '/var/cache/chromeos-cache/distfiles/target/'
30CLIENT_CHROME_ROOT = '/usr/local/telemetry/src'
31RUN_BENCHMARK  = 'tools/perf/run_benchmark'
32
33RSA_KEY = '-i %s' % test_runner_utils.TEST_KEY_PATH
34DUT_CHROME_RESULTS_DIR = '/usr/local/telemetry/src/tools/perf'
35
36# Result Statuses
37SUCCESS_STATUS = 'SUCCESS'
38WARNING_STATUS = 'WARNING'
39FAILED_STATUS = 'FAILED'
40
41# Regex for the RESULT output lines understood by chrome buildbot.
42# Keep in sync with
43# chromium/tools/build/scripts/slave/performance_log_processor.py.
44RESULTS_REGEX = re.compile(r'(?P<IMPORTANT>\*)?RESULT '
45                           r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
46                           r'(?P<VALUE>[\{\[]?[-\d\., ]+[\}\]]?)('
47                           r' ?(?P<UNITS>.+))?')
48HISTOGRAM_REGEX = re.compile(r'(?P<IMPORTANT>\*)?HISTOGRAM '
49                             r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
50                             r'(?P<VALUE_JSON>{.*})(?P<UNITS>.+)?')
51
52_RUN_BACKGROUND_TEMPLATE = '(%(cmd)s) </dev/null >/dev/null 2>&1 & echo -n $!'
53
54_WAIT_CMD_TEMPLATE = """\
55to=%(timeout)d; \
56while test ${to} -ne 0; do \
57  ps %(pid)d >/dev/null || break; \
58  sleep 1; \
59  to=$((to - 1)); \
60done; \
61! ps %(pid)d >/dev/null \
62"""
63
64
65def _run_in_background(host, cmd, stdout, stderr, timeout):
66    """Launch command on host; return without waiting for it to finish.
67
68    @param host: A host object representing where the command runs.
69    @param cmd: The command to run.
70
71    @return The result of launching this command, which contains pid info.
72    """
73    background_cmd = _RUN_BACKGROUND_TEMPLATE % {'cmd': cmd}
74    logging.info('BACKGROUND CMD: %s', background_cmd)
75    return host.run(background_cmd,
76                stdout_tee=stdout,
77                stderr_tee=stderr,
78                timeout=timeout)
79
80
81def _wait_for_process(host, pid, timeout=-1):
82    """Waits for a process on the DUT to terminate.
83
84    @param host: A host object representing the DUT.
85    @param pid: The process ID (integer).
86    @param timeout: Number of seconds to wait; default is wait forever.
87    """
88    wait_cmd = _WAIT_CMD_TEMPLATE % {'pid': pid, 'timeout': timeout}
89    host.run(wait_cmd, ignore_status=True).exit_status
90
91
92def _kill_perf(host):
93    """Kills perf on the DUT.
94
95    @param host: A host object representing the DUT.
96    """
97    # Note that here -2 equals -INT. ChromeOS release image cannot recognize
98    # -INT, so we need to specify it here.
99    kill_cmd = 'killall -2 perf'
100    logging.info('Killing perf using: %s', kill_cmd)
101    host.run(kill_cmd, ignore_status=True).exit_status
102
103
104def _find_chrome_root_dir():
105    # Look for chrome source root, either externally mounted, or inside
106    # the chroot.  Prefer chrome-src-internal source tree to chrome-src.
107    sources_list = ('chrome-src-internal', 'chrome-src')
108
109    dir_list = [os.path.join(CHROME_SRC_ROOT, x) for x in sources_list]
110    if 'CHROME_ROOT' in os.environ:
111        dir_list.insert(0, os.environ['CHROME_ROOT'])
112
113    for dir in dir_list:
114        if os.path.exists(dir):
115            chrome_root_dir = dir
116            break
117    else:
118        raise error.TestError('Chrome source directory not found.')
119
120    logging.info('Using Chrome source tree at %s', chrome_root_dir)
121    return os.path.join(chrome_root_dir, 'src')
122
123
124def _ensure_deps(dut, test_name):
125    """
126    Ensure the dependencies are locally available on DUT.
127
128    @param dut: The autotest host object representing DUT.
129    @param test_name: Name of the telemetry test.
130    """
131    # Get DEPs using host's telemetry.
132    chrome_root_dir = _find_chrome_root_dir()
133    format_string = ('python %s/tools/perf/fetch_benchmark_deps.py %s')
134    command = format_string % (chrome_root_dir, test_name)
135    logging.info('Getting DEPs: %s', command)
136    stdout = StringIO.StringIO()
137    stderr = StringIO.StringIO()
138    try:
139        result = utils.run(command, stdout_tee=stdout,
140                           stderr_tee=stderr)
141    except error.CmdError as e:
142        logging.debug('Error occurred getting DEPs: %s\n %s\n',
143                      stdout.getvalue(), stderr.getvalue())
144        raise error.TestFail('Error occurred while getting DEPs.')
145
146    # Download DEPs to DUT.
147    # send_file() relies on rsync over ssh. Couldn't be better.
148    stdout_str = stdout.getvalue()
149    stdout.close()
150    stderr.close()
151    for dep in stdout_str.split():
152        src = os.path.join(chrome_root_dir, dep)
153        dst = os.path.join(CLIENT_CHROME_ROOT, dep)
154        if not os.path.isfile(src):
155            raise error.TestFail('Error occurred while saving DEPs.')
156        logging.info('Copying: %s -> %s', src, dst)
157        try:
158            dut.send_file(src, dst)
159        except:
160            raise error.TestFail('Error occurred while sending DEPs to dut.\n')
161
162
163class telemetry_Crosperf(test.test):
164    """Run one or more telemetry benchmarks under the crosperf script."""
165    version = 1
166
167    def scp_telemetry_results(self, client_ip, dut, file, host_dir):
168        """Copy telemetry results from dut.
169
170        @param client_ip: The ip address of the DUT
171        @param dut: The autotest host object representing DUT.
172        @param file: The file to copy from DUT.
173        @param host_dir: The directory on host to put the file .
174
175        @returns status code for scp command.
176        """
177        cmd=[]
178        src = ('root@%s:%s/%s' %
179               (dut.hostname if dut else client_ip,
180                DUT_CHROME_RESULTS_DIR,
181                file))
182        cmd.extend(['scp', DUT_SCP_OPTIONS, RSA_KEY, '-v',
183                    src, host_dir])
184        command = ' '.join(cmd)
185
186        logging.debug('Retrieving Results: %s', command)
187        try:
188            result = utils.run(command,
189                               timeout=WAIT_FOR_CMD_TIMEOUT_SECS)
190            exit_code = result.exit_status
191        except Exception as e:
192            logging.error('Failed to retrieve results: %s', e)
193            raise
194
195        logging.debug('command return value: %d', exit_code)
196        return exit_code
197
198    def run_once(self, args, client_ip='', dut=None):
199        """
200        Run a single telemetry test.
201
202        @param args: A dictionary of the arguments that were passed
203                to this test.
204        @param client_ip: The ip address of the DUT
205        @param dut: The autotest host object representing DUT.
206
207        @returns A TelemetryResult instance with the results of this execution.
208        """
209        test_name = args.get('test', '')
210        test_args = args.get('test_args', '')
211        profiler_args = args.get('profiler_args', '')
212
213        # Decide whether the test will run locally or by a remote server.
214        if args.get('run_local', 'false').lower() == 'true':
215            # The telemetry scripts will run on DUT.
216            _ensure_deps(dut, test_name)
217            format_string = ('python %s --browser=system '
218                             '--output-format=chartjson %s %s')
219            command = format_string % (os.path.join(CLIENT_CHROME_ROOT,
220                                                    RUN_BENCHMARK),
221                                       test_args, test_name)
222            runner = dut
223        else:
224            # The telemetry scripts will run on server.
225            format_string = ('python %s --browser=cros-chrome --remote=%s '
226                             '--output-dir="%s" '
227                             '--output-format=chartjson %s %s')
228            command = format_string % (os.path.join(_find_chrome_root_dir(),
229                                                    RUN_BENCHMARK),
230                                       client_ip, self.resultsdir, test_args,
231                                       test_name)
232            runner = utils
233
234        # Run the test. And collect profile if needed.
235        stdout = StringIO.StringIO()
236        stderr = StringIO.StringIO()
237        try:
238            # If profiler_args specified, we want to add several more options
239            # to the command so that run_benchmark will collect system wide
240            # profiles.
241            if profiler_args:
242                command += ' --interval-profiling-period=story_run' \
243                           ' --interval-profiling-target=system_wide' \
244                           ' --interval-profiler-options="%s"' \
245                           % (profiler_args)
246
247            logging.info('BENCHMARK CMD: %s', command)
248            # Run benchmark at background and get pid of it.
249            result = _run_in_background(runner, command, stdout, stderr,
250                                        WAIT_FOR_CMD_TIMEOUT_SECS)
251            benchmark_pid = int(result.stdout.rstrip())
252
253            # Wait until benchmark run finished
254            _wait_for_process(runner, benchmark_pid,
255                              TELEMETRY_TIMEOUT_MINS * 60)
256
257            # If no command error happens, set exit_code to 0
258            exit_code = 0
259
260        except error.CmdError as e:
261            logging.debug('Error occurred executing telemetry.')
262            exit_code = e.result_obj.exit_status
263            raise error.TestFail('An error occurred while executing '
264                                 'telemetry test.')
265        except:
266            logging.debug('Telemetry aborted with unknown error.')
267            exit_code = -1
268            raise
269        finally:
270            # Make sure perf on DUT is gone in case of any unexpected thing
271            # happens above. We don't want some perf process continues to run
272            # on DUT to fill up the disk after we finish.
273            try:
274                _kill_perf(dut)
275            except:
276                pass
277            stdout_str = stdout.getvalue()
278            stderr_str = stderr.getvalue()
279            stdout.close()
280            stderr.close()
281            logging.info('Telemetry completed with exit code: %d.'
282                         '\nstdout:%s\nstderr:%s', exit_code,
283                         stdout_str, stderr_str)
284
285        # Copy the results-chart.json file into the test_that results
286        # directory, if necessary.
287        if args.get('run_local', 'false').lower() == 'true':
288            result = self.scp_telemetry_results(client_ip, dut,
289                                                'results-chart.json',
290                                                self.resultsdir)
291        else:
292            filepath = os.path.join(self.resultsdir, 'results-chart.json')
293            if not os.path.exists(filepath):
294                exit_code = -1
295                raise RuntimeError('Missing results file: %s' % filepath)
296
297        # Copy the perf data file into the test_that profiling directory,
298        # if necessary. It always comes from DUT.
299        if profiler_args:
300            filepath = os.path.join(self.resultsdir, 'artifacts')
301            perf_exist = False
302            for filename in os.listdir(filepath):
303                if filename.endswith('perf.data'):
304                    perf_exist = True
305                    shutil.copyfile(os.path.join(filepath, filename),
306                                    os.path.join(self.profdir, 'perf.data'))
307            if not perf_exist:
308                exit_code = -1
309                raise error.TestFail('Error: No profiles collected, test may '
310                                     'not run correctly.')
311
312        return result
313