• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2013 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Parses the command line, discovers the appropriate benchmarks, and runs them.
6
7Handles benchmark configuration, but all the logic for
8actually running the benchmark is in Benchmark and PageRunner."""
9
10import argparse
11import hashlib
12import json
13import logging
14import os
15import sys
16
17from telemetry import benchmark
18from telemetry.core import discover
19from telemetry import decorators
20from telemetry.internal.browser import browser_finder
21from telemetry.internal.browser import browser_options
22from telemetry.internal.util import binary_manager
23from telemetry.internal.util import command_line
24from telemetry.internal.util import ps_util
25from telemetry.util import matching
26
27
28# Right now, we only have one of each of our power perf bots. This means that
29# all eligible Telemetry benchmarks are run unsharded, which results in very
30# long (12h) cycle times. We'd like to reduce the number of tests that we run
31# on each bot drastically until we get more of the same hardware to shard tests
32# with, but we can't do so until we've verified that the hardware configuration
33# is a viable one for Chrome Telemetry tests. This is done by seeing at least
34# one all-green test run. As this happens for each bot, we'll add it to this
35# whitelist, making it eligible to run only BattOr power tests.
36GOOD_POWER_PERF_BOT_WHITELIST = [
37  "Mac Power Dual-GPU Perf",
38  "Mac Power Low-End Perf"
39]
40
41
42DEFAULT_LOG_FORMAT = (
43  '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d  '
44  '%(message)s')
45
46
47def _IsBenchmarkEnabled(benchmark_class, possible_browser):
48  return (issubclass(benchmark_class, benchmark.Benchmark) and
49          not benchmark_class.ShouldDisable(possible_browser) and
50          decorators.IsEnabled(benchmark_class, possible_browser)[0])
51
52
53def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
54  """ Print benchmarks that are not filtered in the same order of benchmarks in
55  the |benchmarks| list.
56
57  Args:
58    benchmarks: the list of benchmarks to be printed (in the same order of the
59      list).
60    possible_browser: the possible_browser instance that's used for checking
61      which benchmarks are enabled.
62    output_pipe: the stream in which benchmarks are printed on.
63  """
64  if not benchmarks:
65    print >> output_pipe, 'No benchmarks found!'
66    return
67  b = None  # Need this to stop pylint from complaining undefined variable.
68  if any(not issubclass(b, benchmark.Benchmark) for b in benchmarks):
69    assert False, '|benchmarks| param contains non benchmark class: %s' % b
70
71  # Align the benchmark names to the longest one.
72  format_string = '  %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
73  disabled_benchmarks = []
74
75  print >> output_pipe, 'Available benchmarks %sare:' % (
76      'for %s ' % possible_browser.browser_type if possible_browser else '')
77
78  # Sort the benchmarks by benchmark name.
79  benchmarks = sorted(benchmarks, key=lambda b: b.Name())
80  for b in benchmarks:
81    if not possible_browser or _IsBenchmarkEnabled(b, possible_browser):
82      print >> output_pipe, format_string % (b.Name(), b.Description())
83    else:
84      disabled_benchmarks.append(b)
85
86  if disabled_benchmarks:
87    print >> output_pipe, (
88        '\nDisabled benchmarks for %s are (force run with -d):' %
89        possible_browser.browser_type)
90    for b in disabled_benchmarks:
91      print >> output_pipe, format_string % (b.Name(), b.Description())
92  print >> output_pipe, (
93      'Pass --browser to list benchmarks for another browser.\n')
94
95
96class Help(command_line.OptparseCommand):
97  """Display help information about a command"""
98
99  usage = '[command]'
100
101  def __init__(self, commands):
102    self._all_commands = commands
103
104  def Run(self, args):
105    if len(args.positional_args) == 1:
106      commands = _MatchingCommands(args.positional_args[0], self._all_commands)
107      if len(commands) == 1:
108        command = commands[0]
109        parser = command.CreateParser()
110        command.AddCommandLineArgs(parser, None)
111        parser.print_help()
112        return 0
113
114    print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
115    print >> sys.stderr, 'Available commands are:'
116    for command in self._all_commands:
117      print >> sys.stderr, '  %-10s %s' % (
118          command.Name(), command.Description())
119    print >> sys.stderr, ('"%s help <command>" to see usage information '
120                          'for a specific command.' % _ScriptName())
121    return 0
122
123
124class List(command_line.OptparseCommand):
125  """Lists the available benchmarks"""
126
127  usage = '[benchmark_name] [<options>]'
128
129  @classmethod
130  def CreateParser(cls):
131    options = browser_options.BrowserFinderOptions()
132    parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
133    return parser
134
135  @classmethod
136  def AddCommandLineArgs(cls, parser, _):
137    parser.add_option('-j', '--json-output-file', type='string')
138    parser.add_option('-n', '--num-shards', type='int', default=1)
139
140  @classmethod
141  def ProcessCommandLineArgs(cls, parser, args, environment):
142    if not args.positional_args:
143      args.benchmarks = _Benchmarks(environment)
144    elif len(args.positional_args) == 1:
145      args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
146                                            environment, exact_matches=False)
147    else:
148      parser.error('Must provide at most one benchmark name.')
149
150  def Run(self, args):
151    possible_browser = browser_finder.FindBrowser(args)
152    if args.browser_type in (
153        'release', 'release_x64', 'debug', 'debug_x64', 'canary',
154        'android-chromium', 'android-chrome'):
155      args.browser_type = 'reference'
156      possible_reference_browser = browser_finder.FindBrowser(args)
157    else:
158      possible_reference_browser = None
159    if args.json_output_file:
160      with open(args.json_output_file, 'w') as f:
161        f.write(_GetJsonBenchmarkList(possible_browser,
162                                      possible_reference_browser,
163                                      args.benchmarks, args.num_shards))
164    else:
165      PrintBenchmarkList(args.benchmarks, possible_browser)
166    return 0
167
168
169class Run(command_line.OptparseCommand):
170  """Run one or more benchmarks (default)"""
171
172  usage = 'benchmark_name [page_set] [<options>]'
173
174  @classmethod
175  def CreateParser(cls):
176    options = browser_options.BrowserFinderOptions()
177    parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
178    return parser
179
180  @classmethod
181  def AddCommandLineArgs(cls, parser, environment):
182    benchmark.AddCommandLineArgs(parser)
183
184    # Allow benchmarks to add their own command line options.
185    matching_benchmarks = []
186    for arg in sys.argv[1:]:
187      matching_benchmarks += _MatchBenchmarkName(arg, environment)
188
189    if matching_benchmarks:
190      # TODO(dtu): After move to argparse, add command-line args for all
191      # benchmarks to subparser. Using subparsers will avoid duplicate
192      # arguments.
193      matching_benchmark = matching_benchmarks.pop()
194      matching_benchmark.AddCommandLineArgs(parser)
195      # The benchmark's options override the defaults!
196      matching_benchmark.SetArgumentDefaults(parser)
197
198  @classmethod
199  def ProcessCommandLineArgs(cls, parser, args, environment):
200    all_benchmarks = _Benchmarks(environment)
201    if not args.positional_args:
202      possible_browser = (
203          browser_finder.FindBrowser(args) if args.browser_type else None)
204      PrintBenchmarkList(all_benchmarks, possible_browser)
205      sys.exit(-1)
206
207    input_benchmark_name = args.positional_args[0]
208    matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
209    if not matching_benchmarks:
210      print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
211      print >> sys.stderr
212      most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
213          all_benchmarks, input_benchmark_name, lambda x: x.Name())
214      if most_likely_matched_benchmarks:
215        print >> sys.stderr, 'Do you mean any of those benchmarks below?'
216        PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
217      sys.exit(-1)
218
219    if len(matching_benchmarks) > 1:
220      print >> sys.stderr, ('Multiple benchmarks named "%s".' %
221                            input_benchmark_name)
222      print >> sys.stderr, 'Did you mean one of these?'
223      print >> sys.stderr
224      PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
225      sys.exit(-1)
226
227    benchmark_class = matching_benchmarks.pop()
228    if len(args.positional_args) > 1:
229      parser.error('Too many arguments.')
230
231    assert issubclass(benchmark_class, benchmark.Benchmark), (
232        'Trying to run a non-Benchmark?!')
233
234    benchmark.ProcessCommandLineArgs(parser, args)
235    benchmark_class.ProcessCommandLineArgs(parser, args)
236
237    cls._benchmark = benchmark_class
238
239  def Run(self, args):
240    return min(255, self._benchmark().Run(args))
241
242
243def _ScriptName():
244  return os.path.basename(sys.argv[0])
245
246
247def _MatchingCommands(string, commands):
248  return [command for command in commands
249         if command.Name().startswith(string)]
250
251@decorators.Cache
252def _Benchmarks(environment):
253  benchmarks = []
254  for search_dir in environment.benchmark_dirs:
255    benchmarks += discover.DiscoverClasses(search_dir,
256                                           environment.top_level_dir,
257                                           benchmark.Benchmark,
258                                           index_by_class_name=True).values()
259  return benchmarks
260
261def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
262  def _Matches(input_string, search_string):
263    if search_string.startswith(input_string):
264      return True
265    for part in search_string.split('.'):
266      if part.startswith(input_string):
267        return True
268    return False
269
270  # Exact matching.
271  if exact_matches:
272    # Don't add aliases to search dict, only allow exact matching for them.
273    if input_benchmark_name in environment.benchmark_aliases:
274      exact_match = environment.benchmark_aliases[input_benchmark_name]
275    else:
276      exact_match = input_benchmark_name
277
278    for benchmark_class in _Benchmarks(environment):
279      if exact_match == benchmark_class.Name():
280        return [benchmark_class]
281    return []
282
283  # Fuzzy matching.
284  return [benchmark_class for benchmark_class in _Benchmarks(environment)
285          if _Matches(input_benchmark_name, benchmark_class.Name())]
286
287
288def GetBenchmarkByName(name, environment):
289  matched = _MatchBenchmarkName(name, environment, exact_matches=True)
290  # With exact_matches, len(matched) is either 0 or 1.
291  if len(matched) == 0:
292    return None
293  return matched[0]
294
295
296def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
297                          benchmark_classes, num_shards):
298  """Returns a list of all enabled benchmarks in a JSON format expected by
299  buildbots.
300
301  JSON format:
302  { "version": <int>,
303    "steps": {
304      <string>: {
305        "device_affinity": <int>,
306        "cmd": <string>,
307        "perf_dashboard_id": <string>,
308      },
309      ...
310    }
311  }
312  """
313  # TODO(charliea): Remove this once we have more power perf bots.
314  only_run_battor_benchmarks = False
315  print 'Environment variables: ', os.environ
316  if os.environ.get('BUILDBOT_BUILDERNAME') in GOOD_POWER_PERF_BOT_WHITELIST:
317    only_run_battor_benchmarks = True
318
319  output = {
320    'version': 1,
321    'steps': {
322    }
323  }
324  for benchmark_class in benchmark_classes:
325    if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
326      continue
327
328    base_name = benchmark_class.Name()
329    # TODO(charliea): Remove this once we have more power perf bots.
330    # Only run battor power benchmarks to reduce the cycle time of this bot.
331    # TODO(rnephew): Enable media.* and power.* tests when Mac BattOr issue
332    # is solved.
333    if only_run_battor_benchmarks and not base_name.startswith('battor'):
334      continue
335    base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
336                '-v', '--output-format=chartjson', '--upload-results',
337                base_name]
338    perf_dashboard_id = base_name
339
340    # Based on the current timings, we shift the result of the hash function to
341    # achieve better load balancing. Those shift values are to be revised when
342    # necessary. The shift value is calculated such that the total cycle time
343    # is minimized.
344    hash_shift = {
345      2 : 47,  # for old desktop configurations with 2 slaves
346      5 : 56,  # for new desktop configurations with 5 slaves
347      21 : 43  # for Android 3 slaves 7 devices configurations
348    }
349    shift = hash_shift.get(num_shards, 0)
350    base_name_hash = hashlib.sha1(base_name).hexdigest()
351    device_affinity = (int(base_name_hash, 16) >> shift) % num_shards
352
353    output['steps'][base_name] = {
354      'cmd': ' '.join(base_cmd + [
355            '--browser=%s' % possible_browser.browser_type]),
356      'device_affinity': device_affinity,
357      'perf_dashboard_id': perf_dashboard_id,
358    }
359    if (possible_reference_browser and
360        _IsBenchmarkEnabled(benchmark_class, possible_reference_browser)):
361      output['steps'][base_name + '.reference'] = {
362        'cmd': ' '.join(base_cmd + [
363              '--browser=reference', '--output-trace-tag=_ref']),
364        'device_affinity': device_affinity,
365        'perf_dashboard_id': perf_dashboard_id,
366      }
367
368  # Make sure that page_cycler_v2.typical_25 is assigned to the same device
369  # as page_cycler.typical_25 benchmark.
370  # TODO(nednguyen): remove this hack when crbug.com/618156 is resolved.
371  if ('page_cycler_v2.typical_25' in output['steps'] and
372      'page_cycler.typical_25' in output['steps']):
373    output['steps']['page_cycler_v2.typical_25']['device_affinity'] = (
374      output['steps']['page_cycler.typical_25']['device_affinity'])
375
376  return json.dumps(output, indent=2, sort_keys=True)
377
378
379def main(environment, extra_commands=None, **log_config_kwargs):
380  # The log level is set in browser_options.
381  log_config_kwargs.pop('level', None)
382  log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
383  logging.basicConfig(**log_config_kwargs)
384
385  ps_util.EnableListingStrayProcessesUponExitHook()
386
387  # Get the command name from the command line.
388  if len(sys.argv) > 1 and sys.argv[1] == '--help':
389    sys.argv[1] = 'help'
390
391  command_name = 'run'
392  for arg in sys.argv[1:]:
393    if not arg.startswith('-'):
394      command_name = arg
395      break
396
397  # TODO(eakuefner): Remove this hack after we port to argparse.
398  if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
399    command_name = 'run'
400    sys.argv[2] = '--help'
401
402  if extra_commands is None:
403    extra_commands = []
404  all_commands = [Help, List, Run] + extra_commands
405
406  # Validate and interpret the command name.
407  commands = _MatchingCommands(command_name, all_commands)
408  if len(commands) > 1:
409    print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
410                          % (command_name, _ScriptName()))
411    for command in commands:
412      print >> sys.stderr, '  %-10s %s' % (
413          command.Name(), command.Description())
414    return 1
415  if commands:
416    command = commands[0]
417  else:
418    command = Run
419
420  binary_manager.InitDependencyManager(environment.client_configs)
421
422  # Parse and run the command.
423  parser = command.CreateParser()
424  command.AddCommandLineArgs(parser, environment)
425
426  # Set the default chrome root variable.
427  parser.set_defaults(chrome_root=environment.default_chrome_root)
428
429
430  if isinstance(parser, argparse.ArgumentParser):
431    commandline_args = sys.argv[1:]
432    options, args = parser.parse_known_args(commandline_args[1:])
433    command.ProcessCommandLineArgs(parser, options, args, environment)
434  else:
435    options, args = parser.parse_args()
436    if commands:
437      args = args[1:]
438    options.positional_args = args
439    command.ProcessCommandLineArgs(parser, options, environment)
440
441  if command == Help:
442    command_instance = command(all_commands)
443  else:
444    command_instance = command()
445  if isinstance(command_instance, command_line.OptparseCommand):
446    return command_instance.Run(options)
447  else:
448    return command_instance.Run(options, args)
449