• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright (c) 2013 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Performance Test Bisect Tool
7
8This script bisects a series of changelists using binary search. It starts at
9a bad revision where a performance metric has regressed, and asks for a last
10known-good revision. It will then binary search across this revision range by
11syncing, building, and running a performance test. If the change is
12suspected to occur as a result of WebKit/V8 changes, the script will
13further bisect changes to those depots and attempt to narrow down the revision
14range.
15
16
17An example usage (using svn cl's):
18
19./tools/bisect-perf-regression.py -c\
20"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21-g 168222 -b 168232 -m shutdown/simple-user-quit
22
23Be aware that if you're using the git workflow and specify an svn revision,
24the script will attempt to find the git SHA1 where svn changes up to that
25revision were merged in.
26
27
28An example usage (using git hashes):
29
30./tools/bisect-perf-regression.py -c\
31"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32-g 1f6e67861535121c5c819c16a666f2436c207e7b\
33-b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34-m shutdown/simple-user-quit
35
36"""
37
38import copy
39import datetime
40import errno
41import imp
42import math
43import optparse
44import os
45import re
46import shlex
47import shutil
48import StringIO
49import subprocess
50import sys
51import time
52
53import bisect_utils
54
55
56# The additional repositories that might need to be bisected.
57# If the repository has any dependant repositories (such as skia/src needs
58# skia/include and skia/gyp to be updated), specify them in the 'depends'
59# so that they're synced appropriately.
60# Format is:
61# src: path to the working directory.
62# recurse: True if this repositry will get bisected.
63# depends: A list of other repositories that are actually part of the same
64#   repository in svn.
65# svn: Needed for git workflow to resolve hashes to svn revisions.
66# from: Parent depot that must be bisected before this is bisected.
67DEPOT_DEPS_NAME = {
68  'chromium' : {
69    "src" : "src",
70    "recurse" : True,
71    "depends" : None,
72    "from" : ['cros', 'android-chrome'],
73    'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision='
74  },
75  'webkit' : {
76    "src" : "src/third_party/WebKit",
77    "recurse" : True,
78    "depends" : None,
79    "from" : ['chromium'],
80    'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision='
81  },
82  'angle' : {
83    "src" : "src/third_party/angle",
84    "src_old" : "src/third_party/angle_dx11",
85    "recurse" : True,
86    "depends" : None,
87    "from" : ['chromium'],
88    "platform": 'nt',
89  },
90  'v8' : {
91    "src" : "src/v8",
92    "recurse" : True,
93    "depends" : None,
94    "from" : ['chromium'],
95    "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
96    'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
97  },
98  'v8_bleeding_edge' : {
99    "src" : "src/v8_bleeding_edge",
100    "recurse" : True,
101    "depends" : None,
102    "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
103    "from" : ['v8'],
104    'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105  },
106  'skia/src' : {
107    "src" : "src/third_party/skia/src",
108    "recurse" : True,
109    "svn" : "http://skia.googlecode.com/svn/trunk/src",
110    "depends" : ['skia/include', 'skia/gyp'],
111    "from" : ['chromium'],
112    'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
113  },
114  'skia/include' : {
115    "src" : "src/third_party/skia/include",
116    "recurse" : False,
117    "svn" : "http://skia.googlecode.com/svn/trunk/include",
118    "depends" : None,
119    "from" : ['chromium'],
120    'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
121  },
122  'skia/gyp' : {
123    "src" : "src/third_party/skia/gyp",
124    "recurse" : False,
125    "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
126    "depends" : None,
127    "from" : ['chromium'],
128    'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
129  },
130}
131
132DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
133CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
134CROS_VERSION_PATTERN = 'new version number from %s'
135CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
136CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
137                                  'testing_rsa')
138CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
139                                    'mod_for_test_scripts', 'ssh_keys',
140                                    'testing_rsa')
141
142BUILD_RESULT_SUCCEED = 0
143BUILD_RESULT_FAIL = 1
144BUILD_RESULT_SKIPPED = 2
145
146
147def _AddAdditionalDepotInfo(depot_info):
148  """Adds additional depot info to the global depot variables."""
149  global DEPOT_DEPS_NAME
150  global DEPOT_NAMES
151  DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
152      depot_info.items())
153  DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
154
155
156def CalculateTruncatedMean(data_set, truncate_percent):
157  """Calculates the truncated mean of a set of values.
158
159  Args:
160    data_set: Set of values to use in calculation.
161    truncate_percent: The % from the upper/lower portions of the data set to
162        discard, expressed as a value in [0, 1].
163
164  Returns:
165    The truncated mean as a float.
166  """
167  if len(data_set) > 2:
168    data_set = sorted(data_set)
169
170    discard_num_float = len(data_set) * truncate_percent
171    discard_num_int = int(math.floor(discard_num_float))
172    kept_weight = len(data_set) - discard_num_float * 2
173
174    data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
175
176    weight_left = 1.0 - (discard_num_float - discard_num_int)
177
178    if weight_left < 1:
179      # If the % to discard leaves a fractional portion, need to weight those
180      # values.
181      unweighted_vals = data_set[1:len(data_set)-1]
182      weighted_vals = [data_set[0], data_set[len(data_set)-1]]
183      weighted_vals = [w * weight_left for w in weighted_vals]
184      data_set = weighted_vals + unweighted_vals
185  else:
186    kept_weight = len(data_set)
187
188  truncated_mean = reduce(lambda x, y: float(x) + float(y),
189                          data_set) / kept_weight
190
191  return truncated_mean
192
193
194def CalculateStandardDeviation(v):
195  if len(v) == 1:
196    return 0.0
197
198  mean = CalculateTruncatedMean(v, 0.0)
199  variances = [float(x) - mean for x in v]
200  variances = [x * x for x in variances]
201  variance = reduce(lambda x, y: float(x) + float(y), variances) / (len(v) - 1)
202  std_dev = math.sqrt(variance)
203
204  return std_dev
205
206
207def CalculatePooledStandardError(work_sets):
208  numerator = 0.0
209  denominator1 = 0.0
210  denominator2 = 0.0
211
212  for current_set in work_sets:
213    std_dev = CalculateStandardDeviation(current_set)
214    numerator += (len(current_set) - 1) * std_dev ** 2
215    denominator1 += len(current_set) - 1
216    denominator2 += 1.0 / len(current_set)
217
218  if denominator1:
219    return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
220  return 0.0
221
222
223def CalculateStandardError(v):
224  if len(v) <= 1:
225    return 0.0
226
227  std_dev = CalculateStandardDeviation(v)
228
229  return std_dev / math.sqrt(len(v))
230
231
232def IsStringFloat(string_to_check):
233  """Checks whether or not the given string can be converted to a floating
234  point number.
235
236  Args:
237    string_to_check: Input string to check if it can be converted to a float.
238
239  Returns:
240    True if the string can be converted to a float.
241  """
242  try:
243    float(string_to_check)
244
245    return True
246  except ValueError:
247    return False
248
249
250def IsStringInt(string_to_check):
251  """Checks whether or not the given string can be converted to a integer.
252
253  Args:
254    string_to_check: Input string to check if it can be converted to an int.
255
256  Returns:
257    True if the string can be converted to an int.
258  """
259  try:
260    int(string_to_check)
261
262    return True
263  except ValueError:
264    return False
265
266
267def IsWindows():
268  """Checks whether or not the script is running on Windows.
269
270  Returns:
271    True if running on Windows.
272  """
273  return os.name == 'nt'
274
275
276def RunProcess(command):
277  """Run an arbitrary command. If output from the call is needed, use
278  RunProcessAndRetrieveOutput instead.
279
280  Args:
281    command: A list containing the command and args to execute.
282
283  Returns:
284    The return code of the call.
285  """
286  # On Windows, use shell=True to get PATH interpretation.
287  shell = IsWindows()
288  return subprocess.call(command, shell=shell)
289
290
291def RunProcessAndRetrieveOutput(command, cwd=None):
292  """Run an arbitrary command, returning its output and return code. Since
293  output is collected via communicate(), there will be no output until the
294  call terminates. If you need output while the program runs (ie. so
295  that the buildbot doesn't terminate the script), consider RunProcess().
296
297  Args:
298    command: A list containing the command and args to execute.
299
300  Returns:
301    A tuple of the output and return code.
302  """
303  # On Windows, use shell=True to get PATH interpretation.
304  shell = IsWindows()
305  proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE, cwd=cwd)
306
307  (output, _) = proc.communicate()
308
309  return (output, proc.returncode)
310
311
312def RunGit(command, cwd=None):
313  """Run a git subcommand, returning its output and return code.
314
315  Args:
316    command: A list containing the args to git.
317
318  Returns:
319    A tuple of the output and return code.
320  """
321  command = ['git'] + command
322
323  return RunProcessAndRetrieveOutput(command, cwd=cwd)
324
325
326def CheckRunGit(command, cwd=None):
327  """Run a git subcommand, returning its output and return code. Asserts if
328  the return code of the call is non-zero.
329
330  Args:
331    command: A list containing the args to git.
332
333  Returns:
334    A tuple of the output and return code.
335  """
336  (output, return_code) = RunGit(command, cwd=cwd)
337
338  assert not return_code, 'An error occurred while running'\
339                          ' "git %s"' % ' '.join(command)
340  return output
341
342
343def SetBuildSystemDefault(build_system):
344  """Sets up any environment variables needed to build with the specified build
345  system.
346
347  Args:
348    build_system: A string specifying build system. Currently only 'ninja' or
349        'make' are supported."""
350  if build_system == 'ninja':
351    gyp_var = os.getenv('GYP_GENERATORS')
352
353    if not gyp_var or not 'ninja' in gyp_var:
354      if gyp_var:
355        os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
356      else:
357        os.environ['GYP_GENERATORS'] = 'ninja'
358
359      if IsWindows():
360        os.environ['GYP_DEFINES'] = 'component=shared_library '\
361            'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
362            'chromium_win_pch=0'
363  elif build_system == 'make':
364    os.environ['GYP_GENERATORS'] = 'make'
365  else:
366    raise RuntimeError('%s build not supported.' % build_system)
367
368
369def BuildWithMake(threads, targets):
370  cmd = ['make', 'BUILDTYPE=Release']
371
372  if threads:
373    cmd.append('-j%d' % threads)
374
375  cmd += targets
376
377  return_code = RunProcess(cmd)
378
379  return not return_code
380
381
382def BuildWithNinja(threads, targets):
383  cmd = ['ninja', '-C', os.path.join('out', 'Release')]
384
385  if threads:
386    cmd.append('-j%d' % threads)
387
388  cmd += targets
389
390  return_code = RunProcess(cmd)
391
392  return not return_code
393
394
395def BuildWithVisualStudio(targets):
396  path_to_devenv = os.path.abspath(
397      os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
398  path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
399  cmd = [path_to_devenv, '/build', 'Release', path_to_sln]
400
401  for t in targets:
402    cmd.extend(['/Project', t])
403
404  return_code = RunProcess(cmd)
405
406  return not return_code
407
408
409class Builder(object):
410  """Builder is used by the bisect script to build relevant targets and deploy.
411  """
412  def __init__(self, opts):
413    """Performs setup for building with target build system.
414
415    Args:
416        opts: Options parsed from command line.
417    """
418    if IsWindows():
419      if not opts.build_preference:
420        opts.build_preference = 'msvs'
421
422      if opts.build_preference == 'msvs':
423        if not os.getenv('VS100COMNTOOLS'):
424          raise RuntimeError(
425              'Path to visual studio could not be determined.')
426      else:
427        SetBuildSystemDefault(opts.build_preference)
428    else:
429      if not opts.build_preference:
430        if 'ninja' in os.getenv('GYP_GENERATORS'):
431          opts.build_preference = 'ninja'
432        else:
433          opts.build_preference = 'make'
434
435      SetBuildSystemDefault(opts.build_preference)
436
437    if not bisect_utils.SetupPlatformBuildEnvironment(opts):
438      raise RuntimeError('Failed to set platform environment.')
439
440    bisect_utils.RunGClient(['runhooks'])
441
442  @staticmethod
443  def FromOpts(opts):
444    builder = None
445    if opts.target_platform == 'cros':
446      builder = CrosBuilder(opts)
447    elif opts.target_platform == 'android':
448      builder = AndroidBuilder(opts)
449    elif opts.target_platform == 'android-chrome':
450      builder = AndroidChromeBuilder(opts)
451    else:
452      builder = DesktopBuilder(opts)
453    return builder
454
455  def Build(self, depot, opts):
456    raise NotImplementedError()
457
458
459class DesktopBuilder(Builder):
460  """DesktopBuilder is used to build Chromium on linux/mac/windows."""
461  def __init__(self, opts):
462    super(DesktopBuilder, self).__init__(opts)
463
464  def Build(self, depot, opts):
465    """Builds chromium_builder_perf target using options passed into
466    the script.
467
468    Args:
469        depot: Current depot being bisected.
470        opts: The options parsed from the command line.
471
472    Returns:
473        True if build was successful.
474    """
475    targets = ['chromium_builder_perf']
476
477    threads = None
478    if opts.use_goma:
479      threads = 64
480
481    build_success = False
482    if opts.build_preference == 'make':
483      build_success = BuildWithMake(threads, targets)
484    elif opts.build_preference == 'ninja':
485      build_success = BuildWithNinja(threads, targets)
486    elif opts.build_preference == 'msvs':
487      assert IsWindows(), 'msvs is only supported on Windows.'
488      build_success = BuildWithVisualStudio(targets)
489    else:
490      assert False, 'No build system defined.'
491    return build_success
492
493
494class AndroidBuilder(Builder):
495  """AndroidBuilder is used to build on android."""
496  def __init__(self, opts):
497    super(AndroidBuilder, self).__init__(opts)
498
499  def _GetTargets(self):
500    return ['chromium_testshell', 'cc_perftests_apk', 'android_tools']
501
502  def Build(self, depot, opts):
503    """Builds the android content shell and other necessary tools using options
504    passed into the script.
505
506    Args:
507        depot: Current depot being bisected.
508        opts: The options parsed from the command line.
509
510    Returns:
511        True if build was successful.
512    """
513    threads = None
514    if opts.use_goma:
515      threads = 64
516
517    build_success = False
518    if opts.build_preference == 'ninja':
519      build_success = BuildWithNinja(threads, self._GetTargets())
520    else:
521      assert False, 'No build system defined.'
522
523    return build_success
524
525
526class AndroidChromeBuilder(AndroidBuilder):
527  """AndroidBuilder is used to build on android's chrome."""
528  def __init__(self, opts):
529    super(AndroidChromeBuilder, self).__init__(opts)
530
531  def _GetTargets(self):
532    return AndroidBuilder._GetTargets(self) + ['chrome_apk']
533
534
535class CrosBuilder(Builder):
536  """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
537  target platform."""
538  def __init__(self, opts):
539    super(CrosBuilder, self).__init__(opts)
540
541  def ImageToTarget(self, opts):
542    """Installs latest image to target specified by opts.cros_remote_ip.
543
544    Args:
545        opts: Program options containing cros_board and cros_remote_ip.
546
547    Returns:
548        True if successful.
549    """
550    try:
551      # Keys will most likely be set to 0640 after wiping the chroot.
552      os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
553      os.chmod(CROS_TEST_KEY_PATH, 0600)
554      cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
555             '--remote=%s' % opts.cros_remote_ip,
556             '--board=%s' % opts.cros_board, '--test', '--verbose']
557
558      return_code = RunProcess(cmd)
559      return not return_code
560    except OSError, e:
561      return False
562
563  def BuildPackages(self, opts, depot):
564    """Builds packages for cros.
565
566    Args:
567        opts: Program options containing cros_board.
568        depot: The depot being bisected.
569
570    Returns:
571        True if successful.
572    """
573    cmd = [CROS_SDK_PATH]
574
575    if depot != 'cros':
576      path_to_chrome = os.path.join(os.getcwd(), '..')
577      cmd += ['--chrome_root=%s' % path_to_chrome]
578
579    cmd += ['--']
580
581    if depot != 'cros':
582      cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
583
584    cmd += ['BUILDTYPE=Release', './build_packages',
585        '--board=%s' % opts.cros_board]
586    return_code = RunProcess(cmd)
587
588    return not return_code
589
590  def BuildImage(self, opts, depot):
591    """Builds test image for cros.
592
593    Args:
594        opts: Program options containing cros_board.
595        depot: The depot being bisected.
596
597    Returns:
598        True if successful.
599    """
600    cmd = [CROS_SDK_PATH]
601
602    if depot != 'cros':
603      path_to_chrome = os.path.join(os.getcwd(), '..')
604      cmd += ['--chrome_root=%s' % path_to_chrome]
605
606    cmd += ['--']
607
608    if depot != 'cros':
609      cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
610
611    cmd += ['BUILDTYPE=Release', '--', './build_image',
612        '--board=%s' % opts.cros_board, 'test']
613
614    return_code = RunProcess(cmd)
615
616    return not return_code
617
618  def Build(self, depot, opts):
619    """Builds targets using options passed into the script.
620
621    Args:
622        depot: Current depot being bisected.
623        opts: The options parsed from the command line.
624
625    Returns:
626        True if build was successful.
627    """
628    if self.BuildPackages(opts, depot):
629      if self.BuildImage(opts, depot):
630        return self.ImageToTarget(opts)
631    return False
632
633
634class SourceControl(object):
635  """SourceControl is an abstraction over the underlying source control
636  system used for chromium. For now only git is supported, but in the
637  future, the svn workflow could be added as well."""
638  def __init__(self):
639    super(SourceControl, self).__init__()
640
641  def SyncToRevisionWithGClient(self, revision):
642    """Uses gclient to sync to the specified revision.
643
644    ie. gclient sync --revision <revision>
645
646    Args:
647      revision: The git SHA1 or svn CL (depending on workflow).
648
649    Returns:
650      The return code of the call.
651    """
652    return bisect_utils.RunGClient(['sync', '--revision',
653        revision, '--verbose', '--nohooks', '--reset', '--force'])
654
655  def SyncToRevisionWithRepo(self, timestamp):
656    """Uses repo to sync all the underlying git depots to the specified
657    time.
658
659    Args:
660      timestamp: The unix timestamp to sync to.
661
662    Returns:
663      The return code of the call.
664    """
665    return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
666
667
668class GitSourceControl(SourceControl):
669  """GitSourceControl is used to query the underlying source control. """
670  def __init__(self, opts):
671    super(GitSourceControl, self).__init__()
672    self.opts = opts
673
674  def IsGit(self):
675    return True
676
677  def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
678    """Retrieves a list of revisions between |revision_range_start| and
679    |revision_range_end|.
680
681    Args:
682      revision_range_end: The SHA1 for the end of the range.
683      revision_range_start: The SHA1 for the beginning of the range.
684
685    Returns:
686      A list of the revisions between |revision_range_start| and
687      |revision_range_end| (inclusive).
688    """
689    revision_range = '%s..%s' % (revision_range_start, revision_range_end)
690    cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
691    log_output = CheckRunGit(cmd, cwd=cwd)
692
693    revision_hash_list = log_output.split()
694    revision_hash_list.append(revision_range_start)
695
696    return revision_hash_list
697
698  def SyncToRevision(self, revision, sync_client=None):
699    """Syncs to the specified revision.
700
701    Args:
702      revision: The revision to sync to.
703      use_gclient: Specifies whether or not we should sync using gclient or
704        just use source control directly.
705
706    Returns:
707      True if successful.
708    """
709
710    if not sync_client:
711      results = RunGit(['checkout', revision])[1]
712    elif sync_client == 'gclient':
713      results = self.SyncToRevisionWithGClient(revision)
714    elif sync_client == 'repo':
715      results = self.SyncToRevisionWithRepo(revision)
716
717    return not results
718
719  def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
720    """If an SVN revision is supplied, try to resolve it to a git SHA1.
721
722    Args:
723      revision_to_check: The user supplied revision string that may need to be
724        resolved to a git SHA1.
725      depot: The depot the revision_to_check is from.
726      search: The number of changelists to try if the first fails to resolve
727        to a git hash. If the value is negative, the function will search
728        backwards chronologically, otherwise it will search forward.
729
730    Returns:
731      A string containing a git SHA1 hash, otherwise None.
732    """
733    # Android-chrome is git only, so no need to resolve this to anything else.
734    if depot == 'android-chrome':
735      return revision_to_check
736
737    if depot != 'cros':
738      if not IsStringInt(revision_to_check):
739        return revision_to_check
740
741      depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
742
743      if depot != 'chromium':
744        depot_svn = DEPOT_DEPS_NAME[depot]['svn']
745
746      svn_revision = int(revision_to_check)
747      git_revision = None
748
749      if search > 0:
750        search_range = xrange(svn_revision, svn_revision + search, 1)
751      else:
752        search_range = xrange(svn_revision, svn_revision + search, -1)
753
754      for i in search_range:
755        svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
756        cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
757               'origin/master']
758
759        (log_output, return_code) = RunGit(cmd, cwd=cwd)
760
761        assert not return_code, 'An error occurred while running'\
762                                ' "git %s"' % ' '.join(cmd)
763
764        if not return_code:
765          log_output = log_output.strip()
766
767          if log_output:
768            git_revision = log_output
769
770            break
771
772      return git_revision
773    else:
774      if IsStringInt(revision_to_check):
775        return int(revision_to_check)
776      else:
777        cwd = os.getcwd()
778        os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
779            'chromiumos-overlay'))
780        pattern = CROS_VERSION_PATTERN % revision_to_check
781        cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
782
783        git_revision = None
784
785        log_output = CheckRunGit(cmd, cwd=cwd)
786        if log_output:
787          git_revision = log_output
788          git_revision = int(log_output.strip())
789        os.chdir(cwd)
790
791        return git_revision
792
793  def IsInProperBranch(self):
794    """Confirms they're in the master branch for performing the bisection.
795    This is needed or gclient will fail to sync properly.
796
797    Returns:
798      True if the current branch on src is 'master'
799    """
800    cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
801    log_output = CheckRunGit(cmd)
802    log_output = log_output.strip()
803
804    return log_output == "master"
805
806  def SVNFindRev(self, revision):
807    """Maps directly to the 'git svn find-rev' command.
808
809    Args:
810      revision: The git SHA1 to use.
811
812    Returns:
813      An integer changelist #, otherwise None.
814    """
815
816    cmd = ['svn', 'find-rev', revision]
817
818    output = CheckRunGit(cmd)
819    svn_revision = output.strip()
820
821    if IsStringInt(svn_revision):
822      return int(svn_revision)
823
824    return None
825
826  def QueryRevisionInfo(self, revision, cwd=None):
827    """Gathers information on a particular revision, such as author's name,
828    email, subject, and date.
829
830    Args:
831      revision: Revision you want to gather information on.
832    Returns:
833      A dict in the following format:
834      {
835        'author': %s,
836        'email': %s,
837        'date': %s,
838        'subject': %s,
839        'body': %s,
840      }
841    """
842    commit_info = {}
843
844    formats = ['%cN', '%cE', '%s', '%cD', '%b']
845    targets = ['author', 'email', 'subject', 'date', 'body']
846
847    for i in xrange(len(formats)):
848      cmd = ['log', '--format=%s' % formats[i], '-1', revision]
849      output = CheckRunGit(cmd, cwd=cwd)
850      commit_info[targets[i]] = output.rstrip()
851
852    return commit_info
853
854  def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
855    """Performs a checkout on a file at the given revision.
856
857    Returns:
858      True if successful.
859    """
860    return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
861
862  def RevertFileToHead(self, file_name):
863    """Unstages a file and returns it to HEAD.
864
865    Returns:
866      True if successful.
867    """
868    # Reset doesn't seem to return 0 on success.
869    RunGit(['reset', 'HEAD', bisect_utils.FILE_DEPS_GIT])
870
871    return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
872
873  def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
874    """Returns a list of commits that modified this file.
875
876    Args:
877        filename: Name of file.
878        revision_start: Start of revision range.
879        revision_end: End of revision range.
880
881    Returns:
882        Returns a list of commits that touched this file.
883    """
884    cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
885           filename]
886    output = CheckRunGit(cmd)
887
888    return [o for o in output.split('\n') if o]
889
890class BisectPerformanceMetrics(object):
891  """BisectPerformanceMetrics performs a bisection against a list of range
892  of revisions to narrow down where performance regressions may have
893  occurred."""
894
895  def __init__(self, source_control, opts):
896    super(BisectPerformanceMetrics, self).__init__()
897
898    self.opts = opts
899    self.source_control = source_control
900    self.src_cwd = os.getcwd()
901    self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
902    self.depot_cwd = {}
903    self.cleanup_commands = []
904    self.warnings = []
905    self.builder = Builder.FromOpts(opts)
906
907    # This always starts true since the script grabs latest first.
908    self.was_blink = True
909
910    for d in DEPOT_NAMES:
911      # The working directory of each depot is just the path to the depot, but
912      # since we're already in 'src', we can skip that part.
913
914      self.depot_cwd[d] = os.path.join(
915          self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
916
917  def PerformCleanup(self):
918    """Performs cleanup when script is finished."""
919    os.chdir(self.src_cwd)
920    for c in self.cleanup_commands:
921      if c[0] == 'mv':
922        shutil.move(c[1], c[2])
923      else:
924        assert False, 'Invalid cleanup command.'
925
926  def GetRevisionList(self, depot, bad_revision, good_revision):
927    """Retrieves a list of all the commits between the bad revision and
928    last known good revision."""
929
930    revision_work_list = []
931
932    if depot == 'cros':
933      revision_range_start = good_revision
934      revision_range_end = bad_revision
935
936      cwd = os.getcwd()
937      self.ChangeToDepotWorkingDirectory('cros')
938
939      # Print the commit timestamps for every commit in the revision time
940      # range. We'll sort them and bisect by that. There is a remote chance that
941      # 2 (or more) commits will share the exact same timestamp, but it's
942      # probably safe to ignore that case.
943      cmd = ['repo', 'forall', '-c',
944          'git log --format=%%ct --before=%d --after=%d' % (
945          revision_range_end, revision_range_start)]
946      (output, return_code) = RunProcessAndRetrieveOutput(cmd)
947
948      assert not return_code, 'An error occurred while running'\
949                              ' "%s"' % ' '.join(cmd)
950
951      os.chdir(cwd)
952
953      revision_work_list = list(set(
954          [int(o) for o in output.split('\n') if IsStringInt(o)]))
955      revision_work_list = sorted(revision_work_list, reverse=True)
956    else:
957      cwd = self._GetDepotDirectory(depot)
958      revision_work_list = self.source_control.GetRevisionList(bad_revision,
959          good_revision, cwd=cwd)
960
961    return revision_work_list
962
963  def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
964    svn_revision = self.source_control.SVNFindRev(revision)
965
966    if IsStringInt(svn_revision):
967      # V8 is tricky to bisect, in that there are only a few instances when
968      # we can dive into bleeding_edge and get back a meaningful result.
969      # Try to detect a V8 "business as usual" case, which is when:
970      #  1. trunk revision N has description "Version X.Y.Z"
971      #  2. bleeding_edge revision (N-1) has description "Prepare push to
972      #     trunk. Now working on X.Y.(Z+1)."
973      v8_dir = self._GetDepotDirectory('v8')
974      v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
975
976      revision_info = self.source_control.QueryRevisionInfo(revision,
977          cwd=v8_dir)
978
979      version_re = re.compile("Version (?P<values>[0-9,.]+)")
980
981      regex_results = version_re.search(revision_info['subject'])
982
983      if regex_results:
984        version = regex_results.group('values')
985
986        git_revision = self.source_control.ResolveToRevision(
987            int(svn_revision) - 1, 'v8_bleeding_edge', -1,
988            cwd=v8_bleeding_edge_dir)
989
990        if git_revision:
991          revision_info = self.source_control.QueryRevisionInfo(git_revision,
992              cwd=v8_bleeding_edge_dir)
993
994          if 'Prepare push to trunk' in revision_info['subject']:
995            return git_revision
996    return None
997
998  def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
999    cwd = self._GetDepotDirectory('v8')
1000    cmd = ['log', '--format=%ct', '-1', revision]
1001    output = CheckRunGit(cmd, cwd=cwd)
1002    commit_time = int(output)
1003    commits = []
1004
1005    if search_forward:
1006      cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1007          'origin/master']
1008      output = CheckRunGit(cmd, cwd=cwd)
1009      output = output.split()
1010      commits = output
1011      commits = reversed(commits)
1012    else:
1013      cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1014          'origin/master']
1015      output = CheckRunGit(cmd, cwd=cwd)
1016      output = output.split()
1017      commits = output
1018
1019    bleeding_edge_revision = None
1020
1021    for c in commits:
1022      bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1023      if bleeding_edge_revision:
1024        break
1025
1026    return bleeding_edge_revision
1027
1028  def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1029    """Parses the DEPS file to determine WebKit/v8/etc... versions.
1030
1031    Returns:
1032      A dict in the format {depot:revision} if successful, otherwise None.
1033    """
1034
1035    cwd = os.getcwd()
1036    self.ChangeToDepotWorkingDirectory(depot)
1037
1038    results = {}
1039
1040    if depot == 'chromium' or depot == 'android-chrome':
1041      locals = {'Var': lambda _: locals["vars"][_],
1042                'From': lambda *args: None}
1043      execfile(bisect_utils.FILE_DEPS_GIT, {}, locals)
1044
1045      os.chdir(cwd)
1046
1047      rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1048
1049      for d in DEPOT_NAMES:
1050        if DEPOT_DEPS_NAME[d].has_key('platform'):
1051          if DEPOT_DEPS_NAME[d]['platform'] != os.name:
1052            continue
1053
1054        if (DEPOT_DEPS_NAME[d]['recurse'] and
1055            depot in DEPOT_DEPS_NAME[d]['from']):
1056          if (locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']) or
1057              locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old'])):
1058            if locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']):
1059              re_results = rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src']])
1060              self.depot_cwd[d] =\
1061                  os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1062            elif locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old']):
1063              re_results =\
1064                  rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src_old']])
1065              self.depot_cwd[d] =\
1066                  os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src_old'][4:])
1067
1068            if re_results:
1069              results[d] = re_results.group('revision')
1070            else:
1071              print 'Couldn\'t parse revision for %s.' % d
1072              print
1073              return None
1074          else:
1075            print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1076            print
1077            return None
1078    elif depot == 'cros':
1079      cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1080             'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1081             CROS_CHROMEOS_PATTERN]
1082      (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1083
1084      assert not return_code, 'An error occurred while running'\
1085                              ' "%s"' % ' '.join(cmd)
1086
1087      if len(output) > CROS_CHROMEOS_PATTERN:
1088        output = output[len(CROS_CHROMEOS_PATTERN):]
1089
1090      if len(output) > 1:
1091        output = output.split('_')[0]
1092
1093        if len(output) > 3:
1094          contents = output.split('.')
1095
1096          version = contents[2]
1097
1098          if contents[3] != '0':
1099            warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' %\
1100                (version, contents[3], version)
1101            if not warningText in self.warnings:
1102              self.warnings.append(warningText)
1103
1104          cwd = os.getcwd()
1105          self.ChangeToDepotWorkingDirectory('chromium')
1106          return_code = CheckRunGit(['log', '-1', '--format=%H',
1107              '--author=chrome-release@google.com', '--grep=to %s' % version,
1108              'origin/master'])
1109          os.chdir(cwd)
1110
1111          results['chromium'] = output.strip()
1112    elif depot == 'v8':
1113      # We can't try to map the trunk revision to bleeding edge yet, because
1114      # we don't know which direction to try to search in. Have to wait until
1115      # the bisect has narrowed the results down to 2 v8 rolls.
1116      results['v8_bleeding_edge'] = None
1117
1118    return results
1119
1120  def BuildCurrentRevision(self, depot):
1121    """Builds chrome and performance_ui_tests on the current revision.
1122
1123    Returns:
1124      True if the build was successful.
1125    """
1126    if self.opts.debug_ignore_build:
1127      return True
1128
1129    cwd = os.getcwd()
1130    os.chdir(self.src_cwd)
1131
1132    build_success = self.builder.Build(depot, self.opts)
1133
1134    os.chdir(cwd)
1135
1136    return build_success
1137
1138  def RunGClientHooks(self):
1139    """Runs gclient with runhooks command.
1140
1141    Returns:
1142      True if gclient reports no errors.
1143    """
1144
1145    if self.opts.debug_ignore_build:
1146      return True
1147
1148    return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1149
1150  def TryParseHistogramValuesFromOutput(self, metric, text):
1151    """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1152
1153    Args:
1154      metric: The metric as a list of [<trace>, <value>] strings.
1155      text: The text to parse the metric values from.
1156
1157    Returns:
1158      A list of floating point numbers found.
1159    """
1160    metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1161
1162    text_lines = text.split('\n')
1163    values_list = []
1164
1165    for current_line in text_lines:
1166      if metric_formatted in current_line:
1167        current_line = current_line[len(metric_formatted):]
1168
1169        try:
1170          histogram_values = eval(current_line)
1171
1172          for b in histogram_values['buckets']:
1173            average_for_bucket = float(b['high'] + b['low']) * 0.5
1174            # Extends the list with N-elements with the average for that bucket.
1175            values_list.extend([average_for_bucket] * b['count'])
1176        except:
1177          pass
1178
1179    return values_list
1180
1181  def TryParseResultValuesFromOutput(self, metric, text):
1182    """Attempts to parse a metric in the format RESULT <graph: <trace>.
1183
1184    Args:
1185      metric: The metric as a list of [<trace>, <value>] strings.
1186      text: The text to parse the metric values from.
1187
1188    Returns:
1189      A list of floating point numbers found.
1190    """
1191    # Format is: RESULT <graph>: <trace>= <value> <units>
1192    metric_formatted = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1193
1194    text_lines = text.split('\n')
1195    values_list = []
1196
1197    for current_line in text_lines:
1198      # Parse the output from the performance test for the metric we're
1199      # interested in.
1200      metric_re = metric_formatted +\
1201                  "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1202      metric_re = re.compile(metric_re)
1203      regex_results = metric_re.search(current_line)
1204
1205      if not regex_results is None:
1206        values_list += [regex_results.group('values')]
1207      else:
1208        metric_re = metric_formatted +\
1209                    "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1210        metric_re = re.compile(metric_re)
1211        regex_results = metric_re.search(current_line)
1212
1213        if not regex_results is None:
1214          metric_values = regex_results.group('values')
1215
1216          values_list += metric_values.split(',')
1217
1218    values_list = [float(v) for v in values_list if IsStringFloat(v)]
1219
1220    # If the metric is times/t, we need to sum the timings in order to get
1221    # similar regression results as the try-bots.
1222    metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1223        ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1224
1225    if metric in metrics_to_sum:
1226      if values_list:
1227        values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1228
1229    return values_list
1230
1231  def ParseMetricValuesFromOutput(self, metric, text):
1232    """Parses output from performance_ui_tests and retrieves the results for
1233    a given metric.
1234
1235    Args:
1236      metric: The metric as a list of [<trace>, <value>] strings.
1237      text: The text to parse the metric values from.
1238
1239    Returns:
1240      A list of floating point numbers found.
1241    """
1242    metric_values = self.TryParseResultValuesFromOutput(metric, text)
1243
1244    if not metric_values:
1245      metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
1246
1247    return metric_values
1248
1249  def _GenerateProfileIfNecessary(self, command_args):
1250    """Checks the command line of the performance test for dependencies on
1251    profile generation, and runs tools/perf/generate_profile as necessary.
1252
1253    Args:
1254      command_args: Command line being passed to performance test, as a list.
1255
1256    Returns:
1257      False if profile generation was necessary and failed, otherwise True.
1258    """
1259
1260    if '--profile-dir' in ' '.join(command_args):
1261      # If we were using python 2.7+, we could just use the argparse
1262      # module's parse_known_args to grab --profile-dir. Since some of the
1263      # bots still run 2.6, have to grab the arguments manually.
1264      arg_dict = {}
1265      args_to_parse = ['--profile-dir', '--browser']
1266
1267      for arg_to_parse in args_to_parse:
1268        for i, current_arg in enumerate(command_args):
1269          if arg_to_parse in current_arg:
1270            current_arg_split = current_arg.split('=')
1271
1272            # Check 2 cases, --arg=<val> and --arg <val>
1273            if len(current_arg_split) == 2:
1274              arg_dict[arg_to_parse] = current_arg_split[1]
1275            elif i + 1 < len(command_args):
1276              arg_dict[arg_to_parse] = command_args[i+1]
1277
1278      path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1279
1280      if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1281        profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1282        return not RunProcess(['python', path_to_generate,
1283            '--profile-type-to-generate', profile_type,
1284            '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1285      return False
1286    return True
1287
1288  def RunPerformanceTestAndParseResults(self, command_to_run, metric,
1289      reset_on_first_run=False, upload_on_last_run=False, results_label=None):
1290    """Runs a performance test on the current revision by executing the
1291    'command_to_run' and parses the results.
1292
1293    Args:
1294      command_to_run: The command to be run to execute the performance test.
1295      metric: The metric to parse out from the results of the performance test.
1296
1297    Returns:
1298      On success, it will return a tuple of the average value of the metric,
1299      and a success code of 0.
1300    """
1301
1302    if self.opts.debug_ignore_perf_test:
1303      return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1304
1305    if IsWindows():
1306      command_to_run = command_to_run.replace('/', r'\\')
1307
1308    args = shlex.split(command_to_run)
1309
1310    if not self._GenerateProfileIfNecessary(args):
1311      return ('Failed to generate profile for performance test.', -1)
1312
1313    # If running a telemetry test for cros, insert the remote ip, and
1314    # identity parameters.
1315    is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1316    if self.opts.target_platform == 'cros' and is_telemetry:
1317      args.append('--remote=%s' % self.opts.cros_remote_ip)
1318      args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1319
1320    cwd = os.getcwd()
1321    os.chdir(self.src_cwd)
1322
1323    start_time = time.time()
1324
1325    metric_values = []
1326    output_of_all_runs = ''
1327    for i in xrange(self.opts.repeat_test_count):
1328      # Can ignore the return code since if the tests fail, it won't return 0.
1329      try:
1330        current_args = copy.copy(args)
1331        if is_telemetry:
1332          if i == 0 and reset_on_first_run:
1333            current_args.append('--reset-results')
1334          elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1335            current_args.append('--upload-results')
1336          if results_label:
1337            current_args.append('--results-label=%s' % results_label)
1338        (output, return_code) = RunProcessAndRetrieveOutput(current_args)
1339      except OSError, e:
1340        if e.errno == errno.ENOENT:
1341          err_text  = ("Something went wrong running the performance test. "
1342              "Please review the command line:\n\n")
1343          if 'src/' in ' '.join(args):
1344            err_text += ("Check that you haven't accidentally specified a path "
1345                "with src/ in the command.\n\n")
1346          err_text += ' '.join(args)
1347          err_text += '\n'
1348
1349          return (err_text, -1)
1350        raise
1351
1352      output_of_all_runs += output
1353      if self.opts.output_buildbot_annotations:
1354        print output
1355
1356      metric_values += self.ParseMetricValuesFromOutput(metric, output)
1357
1358      elapsed_minutes = (time.time() - start_time) / 60.0
1359
1360      if elapsed_minutes >= self.opts.max_time_minutes or not metric_values:
1361        break
1362
1363    os.chdir(cwd)
1364
1365    # Need to get the average value if there were multiple values.
1366    if metric_values:
1367      truncated_mean = CalculateTruncatedMean(metric_values,
1368          self.opts.truncate_percent)
1369      standard_err = CalculateStandardError(metric_values)
1370      standard_dev = CalculateStandardDeviation(metric_values)
1371
1372      values = {
1373        'mean': truncated_mean,
1374        'std_err': standard_err,
1375        'std_dev': standard_dev,
1376        'values': metric_values,
1377      }
1378
1379      print 'Results of performance test: %12f %12f' % (
1380          truncated_mean, standard_err)
1381      print
1382      return (values, 0, output_of_all_runs)
1383    else:
1384      return ('Invalid metric specified, or no values returned from '
1385          'performance test.', -1, output_of_all_runs)
1386
1387  def FindAllRevisionsToSync(self, revision, depot):
1388    """Finds all dependant revisions and depots that need to be synced for a
1389    given revision. This is only useful in the git workflow, as an svn depot
1390    may be split into multiple mirrors.
1391
1392    ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1393    skia/include. To sync skia/src properly, one has to find the proper
1394    revisions in skia/gyp and skia/include.
1395
1396    Args:
1397      revision: The revision to sync to.
1398      depot: The depot in use at the moment (probably skia).
1399
1400    Returns:
1401      A list of [depot, revision] pairs that need to be synced.
1402    """
1403    revisions_to_sync = [[depot, revision]]
1404
1405    is_base = ((depot == 'chromium') or (depot == 'cros') or
1406        (depot == 'android-chrome'))
1407
1408    # Some SVN depots were split into multiple git depots, so we need to
1409    # figure out for each mirror which git revision to grab. There's no
1410    # guarantee that the SVN revision will exist for each of the dependant
1411    # depots, so we have to grep the git logs and grab the next earlier one.
1412    if not is_base and\
1413       DEPOT_DEPS_NAME[depot]['depends'] and\
1414       self.source_control.IsGit():
1415      svn_rev = self.source_control.SVNFindRev(revision)
1416
1417      for d in DEPOT_DEPS_NAME[depot]['depends']:
1418        self.ChangeToDepotWorkingDirectory(d)
1419
1420        dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
1421
1422        if dependant_rev:
1423          revisions_to_sync.append([d, dependant_rev])
1424
1425      num_resolved = len(revisions_to_sync)
1426      num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
1427
1428      self.ChangeToDepotWorkingDirectory(depot)
1429
1430      if not ((num_resolved - 1) == num_needed):
1431        return None
1432
1433    return revisions_to_sync
1434
1435  def PerformPreBuildCleanup(self):
1436    """Performs necessary cleanup between runs."""
1437    print 'Cleaning up between runs.'
1438    print
1439
1440    # Having these pyc files around between runs can confuse the
1441    # perf tests and cause them to crash.
1442    for (path, dir, files) in os.walk(self.src_cwd):
1443      for cur_file in files:
1444        if cur_file.endswith('.pyc'):
1445          path_to_file = os.path.join(path, cur_file)
1446          os.remove(path_to_file)
1447
1448  def PerformWebkitDirectoryCleanup(self, revision):
1449    """If the script is switching between Blink and WebKit during bisect,
1450    its faster to just delete the directory rather than leave it up to git
1451    to sync.
1452
1453    Returns:
1454      True if successful.
1455    """
1456    if not self.source_control.CheckoutFileAtRevision(
1457        bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
1458      return False
1459
1460    cwd = os.getcwd()
1461    os.chdir(self.src_cwd)
1462
1463    is_blink = bisect_utils.IsDepsFileBlink()
1464
1465    os.chdir(cwd)
1466
1467    if not self.source_control.RevertFileToHead(
1468        bisect_utils.FILE_DEPS_GIT):
1469      return False
1470
1471    if self.was_blink != is_blink:
1472      self.was_blink = is_blink
1473      return bisect_utils.RemoveThirdPartyWebkitDirectory()
1474    return True
1475
1476  def PerformCrosChrootCleanup(self):
1477    """Deletes the chroot.
1478
1479    Returns:
1480        True if successful.
1481    """
1482    cwd = os.getcwd()
1483    self.ChangeToDepotWorkingDirectory('cros')
1484    cmd = [CROS_SDK_PATH, '--delete']
1485    return_code = RunProcess(cmd)
1486    os.chdir(cwd)
1487    return not return_code
1488
1489  def CreateCrosChroot(self):
1490    """Creates a new chroot.
1491
1492    Returns:
1493        True if successful.
1494    """
1495    cwd = os.getcwd()
1496    self.ChangeToDepotWorkingDirectory('cros')
1497    cmd = [CROS_SDK_PATH, '--create']
1498    return_code = RunProcess(cmd)
1499    os.chdir(cwd)
1500    return not return_code
1501
1502  def PerformPreSyncCleanup(self, revision, depot):
1503    """Performs any necessary cleanup before syncing.
1504
1505    Returns:
1506      True if successful.
1507    """
1508    if depot == 'chromium':
1509      if not bisect_utils.RemoveThirdPartyLibjingleDirectory():
1510        return False
1511      return self.PerformWebkitDirectoryCleanup(revision)
1512    elif depot == 'cros':
1513      return self.PerformCrosChrootCleanup()
1514    return True
1515
1516  def RunPostSync(self, depot):
1517    """Performs any work after syncing.
1518
1519    Returns:
1520      True if successful.
1521    """
1522    if self.opts.target_platform == 'android':
1523      if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
1524          path_to_src=self.src_cwd):
1525        return False
1526
1527    if depot == 'cros':
1528      return self.CreateCrosChroot()
1529    else:
1530      return self.RunGClientHooks()
1531    return True
1532
1533  def ShouldSkipRevision(self, depot, revision):
1534    """Some commits can be safely skipped (such as a DEPS roll), since the tool
1535    is git based those changes would have no effect.
1536
1537    Args:
1538      depot: The depot being bisected.
1539      revision: Current revision we're synced to.
1540
1541    Returns:
1542      True if we should skip building/testing this revision.
1543    """
1544    if depot == 'chromium':
1545      if self.source_control.IsGit():
1546        cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
1547        output = CheckRunGit(cmd)
1548
1549        files = output.splitlines()
1550
1551        if len(files) == 1 and files[0] == 'DEPS':
1552          return True
1553
1554    return False
1555
1556  def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
1557      skippable=False):
1558    """Performs a full sync/build/run of the specified revision.
1559
1560    Args:
1561      revision: The revision to sync to.
1562      depot: The depot that's being used at the moment (src, webkit, etc.)
1563      command_to_run: The command to execute the performance test.
1564      metric: The performance metric being tested.
1565
1566    Returns:
1567      On success, a tuple containing the results of the performance test.
1568      Otherwise, a tuple with the error message.
1569    """
1570    sync_client = None
1571    if depot == 'chromium' or depot == 'android-chrome':
1572      sync_client = 'gclient'
1573    elif depot == 'cros':
1574      sync_client = 'repo'
1575
1576    revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
1577
1578    if not revisions_to_sync:
1579      return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
1580
1581    if not self.PerformPreSyncCleanup(revision, depot):
1582      return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
1583
1584    success = True
1585
1586    if not self.opts.debug_ignore_sync:
1587      for r in revisions_to_sync:
1588        self.ChangeToDepotWorkingDirectory(r[0])
1589
1590        if sync_client:
1591          self.PerformPreBuildCleanup()
1592
1593        # If you're using gclient to sync, you need to specify the depot you
1594        # want so that all the dependencies sync properly as well.
1595        # ie. gclient sync src@<SHA1>
1596        current_revision = r[1]
1597        if sync_client == 'gclient':
1598          current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
1599              current_revision)
1600        if not self.source_control.SyncToRevision(current_revision,
1601            sync_client):
1602          success = False
1603
1604          break
1605
1606    if success:
1607      success = self.RunPostSync(depot)
1608
1609      if success:
1610        if skippable and self.ShouldSkipRevision(depot, revision):
1611          return ('Skipped revision: [%s]' % str(revision),
1612              BUILD_RESULT_SKIPPED)
1613
1614        start_build_time = time.time()
1615        if self.BuildCurrentRevision(depot):
1616          after_build_time = time.time()
1617          results = self.RunPerformanceTestAndParseResults(command_to_run,
1618                                                           metric)
1619
1620          if results[1] == 0:
1621            external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
1622                depot, revision)
1623
1624            if not external_revisions is None:
1625              return (results[0], results[1], external_revisions,
1626                  time.time() - after_build_time, time.time() -
1627                  start_build_time)
1628            else:
1629              return ('Failed to parse DEPS file for external revisions.',
1630                  BUILD_RESULT_FAIL)
1631          else:
1632            return results
1633        else:
1634          return ('Failed to build revision: [%s]' % (str(revision, )),
1635              BUILD_RESULT_FAIL)
1636      else:
1637        return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
1638    else:
1639      return ('Failed to sync revision: [%s]' % (str(revision, )),
1640          BUILD_RESULT_FAIL)
1641
1642  def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
1643    """Given known good and bad values, decide if the current_value passed
1644    or failed.
1645
1646    Args:
1647      current_value: The value of the metric being checked.
1648      known_bad_value: The reference value for a "failed" run.
1649      known_good_value: The reference value for a "passed" run.
1650
1651    Returns:
1652      True if the current_value is closer to the known_good_value than the
1653      known_bad_value.
1654    """
1655    dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
1656    dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
1657
1658    return dist_to_good_value < dist_to_bad_value
1659
1660  def _GetDepotDirectory(self, depot_name):
1661    if depot_name == 'chromium':
1662      return self.src_cwd
1663    elif depot_name == 'cros':
1664      return self.cros_cwd
1665    elif depot_name in DEPOT_NAMES:
1666      return self.depot_cwd[depot_name]
1667    else:
1668      assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
1669                    ' was added without proper support?' %\
1670                    (depot_name,)
1671
1672  def ChangeToDepotWorkingDirectory(self, depot_name):
1673    """Given a depot, changes to the appropriate working directory.
1674
1675    Args:
1676      depot_name: The name of the depot (see DEPOT_NAMES).
1677    """
1678    os.chdir(self._GetDepotDirectory(depot_name))
1679
1680  def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
1681    r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
1682        search_forward=True)
1683    r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
1684        search_forward=False)
1685    min_revision_data['external']['v8_bleeding_edge'] = r1
1686    max_revision_data['external']['v8_bleeding_edge'] = r2
1687
1688    if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
1689            min_revision_data['revision']) or
1690        not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
1691            max_revision_data['revision'])):
1692      self.warnings.append('Trunk revisions in V8 did not map directly to '
1693          'bleeding_edge. Attempted to expand the range to find V8 rolls which '
1694          'did map directly to bleeding_edge revisions, but results might not '
1695          'be valid.')
1696
1697  def _FindNextDepotToBisect(self, current_depot, current_revision,
1698      min_revision_data, max_revision_data):
1699    """Given the state of the bisect, decides which depot the script should
1700    dive into next (if any).
1701
1702    Args:
1703      current_depot: Current depot being bisected.
1704      current_revision: Current revision synced to.
1705      min_revision_data: Data about the earliest revision in the bisect range.
1706      max_revision_data: Data about the latest revision in the bisect range.
1707
1708    Returns:
1709      The depot to bisect next, or None.
1710    """
1711    external_depot = None
1712    for next_depot in DEPOT_NAMES:
1713      if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
1714        if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
1715          continue
1716
1717      if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
1718          min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
1719        continue
1720
1721      if current_depot == 'v8':
1722        # We grab the bleeding_edge info here rather than earlier because we
1723        # finally have the revision range. From that we can search forwards and
1724        # backwards to try to match trunk revisions to bleeding_edge.
1725        self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
1726
1727      if (min_revision_data['external'][next_depot] ==
1728          max_revision_data['external'][next_depot]):
1729        continue
1730
1731      if (min_revision_data['external'][next_depot] and
1732          max_revision_data['external'][next_depot]):
1733        external_depot = next_depot
1734        break
1735
1736    return external_depot
1737
1738  def PrepareToBisectOnDepot(self,
1739                             current_depot,
1740                             end_revision,
1741                             start_revision,
1742                             previous_depot,
1743                             previous_revision):
1744    """Changes to the appropriate directory and gathers a list of revisions
1745    to bisect between |start_revision| and |end_revision|.
1746
1747    Args:
1748      current_depot: The depot we want to bisect.
1749      end_revision: End of the revision range.
1750      start_revision: Start of the revision range.
1751      previous_depot: The depot we were previously bisecting.
1752      previous_revision: The last revision we synced to on |previous_depot|.
1753
1754    Returns:
1755      A list containing the revisions between |start_revision| and
1756      |end_revision| inclusive.
1757    """
1758    # Change into working directory of external library to run
1759    # subsequent commands.
1760    self.ChangeToDepotWorkingDirectory(current_depot)
1761
1762    # V8 (and possibly others) is merged in periodically. Bisecting
1763    # this directory directly won't give much good info.
1764    if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
1765      config_path = os.path.join(self.src_cwd, '..')
1766      if bisect_utils.RunGClientAndCreateConfig(self.opts,
1767          DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
1768        return []
1769      if bisect_utils.RunGClient(
1770          ['sync', '--revision', previous_revision], cwd=self.src_cwd):
1771        return []
1772
1773    if current_depot == 'v8_bleeding_edge':
1774      self.ChangeToDepotWorkingDirectory('chromium')
1775
1776      shutil.move('v8', 'v8.bak')
1777      shutil.move('v8_bleeding_edge', 'v8')
1778
1779      self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
1780      self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
1781
1782      self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
1783      self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
1784
1785      self.ChangeToDepotWorkingDirectory(current_depot)
1786
1787    depot_revision_list = self.GetRevisionList(current_depot,
1788                                               end_revision,
1789                                               start_revision)
1790
1791    self.ChangeToDepotWorkingDirectory('chromium')
1792
1793    return depot_revision_list
1794
1795  def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
1796    """Gathers reference values by running the performance tests on the
1797    known good and bad revisions.
1798
1799    Args:
1800      good_rev: The last known good revision where the performance regression
1801        has not occurred yet.
1802      bad_rev: A revision where the performance regression has already occurred.
1803      cmd: The command to execute the performance test.
1804      metric: The metric being tested for regression.
1805
1806    Returns:
1807      A tuple with the results of building and running each revision.
1808    """
1809    bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
1810                                                   target_depot,
1811                                                   cmd,
1812                                                   metric)
1813
1814    good_run_results = None
1815
1816    if not bad_run_results[1]:
1817      good_run_results = self.SyncBuildAndRunRevision(good_rev,
1818                                                      target_depot,
1819                                                      cmd,
1820                                                      metric)
1821
1822    return (bad_run_results, good_run_results)
1823
1824  def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
1825    """Adds new revisions to the revision_data dict and initializes them.
1826
1827    Args:
1828      revisions: List of revisions to add.
1829      depot: Depot that's currently in use (src, webkit, etc...)
1830      sort: Sorting key for displaying revisions.
1831      revision_data: A dict to add the new revisions into. Existing revisions
1832        will have their sort keys offset.
1833    """
1834
1835    num_depot_revisions = len(revisions)
1836
1837    for k, v in revision_data.iteritems():
1838      if v['sort'] > sort:
1839        v['sort'] += num_depot_revisions
1840
1841    for i in xrange(num_depot_revisions):
1842      r = revisions[i]
1843
1844      revision_data[r] = {'revision' : r,
1845                          'depot' : depot,
1846                          'value' : None,
1847                          'perf_time' : 0,
1848                          'build_time' : 0,
1849                          'passed' : '?',
1850                          'sort' : i + sort + 1}
1851
1852  def PrintRevisionsToBisectMessage(self, revision_list, depot):
1853    if self.opts.output_buildbot_annotations:
1854      step_name = 'Bisection Range: [%s - %s]' % (
1855          revision_list[len(revision_list)-1], revision_list[0])
1856      bisect_utils.OutputAnnotationStepStart(step_name)
1857
1858    print
1859    print 'Revisions to bisect on [%s]:' % depot
1860    for revision_id in revision_list:
1861      print '  -> %s' % (revision_id, )
1862    print
1863
1864    if self.opts.output_buildbot_annotations:
1865      bisect_utils.OutputAnnotationStepClosed()
1866
1867  def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
1868    """Checks to see if changes to DEPS file occurred, and that the revision
1869    range also includes the change to .DEPS.git. If it doesn't, attempts to
1870    expand the revision range to include it.
1871
1872    Args:
1873        bad_rev: First known bad revision.
1874        good_revision: Last known good revision.
1875
1876    Returns:
1877        A tuple with the new bad and good revisions.
1878    """
1879    if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
1880      changes_to_deps = self.source_control.QueryFileRevisionHistory(
1881          'DEPS', good_revision, bad_revision)
1882
1883      if changes_to_deps:
1884        # DEPS file was changed, search from the oldest change to DEPS file to
1885        # bad_revision to see if there are matching .DEPS.git changes.
1886        oldest_deps_change = changes_to_deps[-1]
1887        changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
1888            bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
1889
1890        if len(changes_to_deps) != len(changes_to_gitdeps):
1891          # Grab the timestamp of the last DEPS change
1892          cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
1893          output = CheckRunGit(cmd)
1894          commit_time = int(output)
1895
1896          # Try looking for a commit that touches the .DEPS.git file in the
1897          # next 15 minutes after the DEPS file change.
1898          cmd = ['log', '--format=%H', '-1',
1899              '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
1900              'origin/master', bisect_utils.FILE_DEPS_GIT]
1901          output = CheckRunGit(cmd)
1902          output = output.strip()
1903          if output:
1904            self.warnings.append('Detected change to DEPS and modified '
1905                'revision range to include change to .DEPS.git')
1906            return (output, good_revision)
1907          else:
1908            self.warnings.append('Detected change to DEPS but couldn\'t find '
1909                'matching change to .DEPS.git')
1910    return (bad_revision, good_revision)
1911
1912  def CheckIfRevisionsInProperOrder(self,
1913                                    target_depot,
1914                                    good_revision,
1915                                    bad_revision):
1916    """Checks that |good_revision| is an earlier revision than |bad_revision|.
1917
1918    Args:
1919        good_revision: Number/tag of the known good revision.
1920        bad_revision: Number/tag of the known bad revision.
1921
1922    Returns:
1923        True if the revisions are in the proper order (good earlier than bad).
1924    """
1925    if self.source_control.IsGit() and target_depot != 'cros':
1926      cmd = ['log', '--format=%ct', '-1', good_revision]
1927      cwd = self._GetDepotDirectory(target_depot)
1928
1929      output = CheckRunGit(cmd, cwd=cwd)
1930      good_commit_time = int(output)
1931
1932      cmd = ['log', '--format=%ct', '-1', bad_revision]
1933      output = CheckRunGit(cmd, cwd=cwd)
1934      bad_commit_time = int(output)
1935
1936      return good_commit_time <= bad_commit_time
1937    else:
1938      # Cros/svn use integers
1939      return int(good_revision) <= int(bad_revision)
1940
1941  def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
1942    """Given known good and bad revisions, run a binary search on all
1943    intermediate revisions to determine the CL where the performance regression
1944    occurred.
1945
1946    Args:
1947        command_to_run: Specify the command to execute the performance test.
1948        good_revision: Number/tag of the known good revision.
1949        bad_revision: Number/tag of the known bad revision.
1950        metric: The performance metric to monitor.
1951
1952    Returns:
1953        A dict with 2 members, 'revision_data' and 'error'. On success,
1954        'revision_data' will contain a dict mapping revision ids to
1955        data about that revision. Each piece of revision data consists of a
1956        dict with the following keys:
1957
1958        'passed': Represents whether the performance test was successful at
1959            that revision. Possible values include: 1 (passed), 0 (failed),
1960            '?' (skipped), 'F' (build failed).
1961        'depot': The depot that this revision is from (ie. WebKit)
1962        'external': If the revision is a 'src' revision, 'external' contains
1963            the revisions of each of the external libraries.
1964        'sort': A sort value for sorting the dict in order of commits.
1965
1966        For example:
1967        {
1968          'error':None,
1969          'revision_data':
1970          {
1971            'CL #1':
1972            {
1973              'passed':False,
1974              'depot':'chromium',
1975              'external':None,
1976              'sort':0
1977            }
1978          }
1979        }
1980
1981        If an error occurred, the 'error' field will contain the message and
1982        'revision_data' will be empty.
1983    """
1984
1985    results = {'revision_data' : {},
1986               'error' : None}
1987
1988    # Choose depot to bisect first
1989    target_depot = 'chromium'
1990    if self.opts.target_platform == 'cros':
1991      target_depot = 'cros'
1992    elif self.opts.target_platform == 'android-chrome':
1993      target_depot = 'android-chrome'
1994
1995    cwd = os.getcwd()
1996    self.ChangeToDepotWorkingDirectory(target_depot)
1997
1998    # If they passed SVN CL's, etc... we can try match them to git SHA1's.
1999    bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2000                                                         target_depot, 100)
2001    good_revision = self.source_control.ResolveToRevision(good_revision_in,
2002                                                          target_depot, -100)
2003
2004    os.chdir(cwd)
2005
2006
2007    if bad_revision is None:
2008      results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2009      return results
2010
2011    if good_revision is None:
2012      results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2013      return results
2014
2015    # Check that they didn't accidentally swap good and bad revisions.
2016    if not self.CheckIfRevisionsInProperOrder(
2017        target_depot, good_revision, bad_revision):
2018      results['error'] = 'bad_revision < good_revision, did you swap these '\
2019          'by mistake?'
2020      return results
2021
2022    (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2023        bad_revision, good_revision)
2024
2025    if self.opts.output_buildbot_annotations:
2026      bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2027
2028    print 'Gathering revision range for bisection.'
2029
2030    # Retrieve a list of revisions to do bisection on.
2031    src_revision_list = self.GetRevisionList(target_depot,
2032                                             bad_revision,
2033                                             good_revision)
2034
2035    if self.opts.output_buildbot_annotations:
2036      bisect_utils.OutputAnnotationStepClosed()
2037
2038    if src_revision_list:
2039      # revision_data will store information about a revision such as the
2040      # depot it came from, the webkit/V8 revision at that time,
2041      # performance timing, build state, etc...
2042      revision_data = results['revision_data']
2043
2044      # revision_list is the list we're binary searching through at the moment.
2045      revision_list = []
2046
2047      sort_key_ids = 0
2048
2049      for current_revision_id in src_revision_list:
2050        sort_key_ids += 1
2051
2052        revision_data[current_revision_id] = {'value' : None,
2053                                              'passed' : '?',
2054                                              'depot' : target_depot,
2055                                              'external' : None,
2056                                              'perf_time' : 0,
2057                                              'build_time' : 0,
2058                                              'sort' : sort_key_ids}
2059        revision_list.append(current_revision_id)
2060
2061      min_revision = 0
2062      max_revision = len(revision_list) - 1
2063
2064      self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2065
2066      if self.opts.output_buildbot_annotations:
2067        bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2068
2069      print 'Gathering reference values for bisection.'
2070
2071      # Perform the performance tests on the good and bad revisions, to get
2072      # reference values.
2073      (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2074                                                               bad_revision,
2075                                                               command_to_run,
2076                                                               metric,
2077                                                               target_depot)
2078
2079      if self.opts.output_buildbot_annotations:
2080        bisect_utils.OutputAnnotationStepClosed()
2081
2082      if bad_results[1]:
2083        results['error'] = ('An error occurred while building and running '
2084            'the \'bad\' reference value. The bisect cannot continue without '
2085            'a working \'bad\' revision to start from.\n\nError: %s' %
2086                bad_results[0])
2087        return results
2088
2089      if good_results[1]:
2090        results['error'] = ('An error occurred while building and running '
2091            'the \'good\' reference value. The bisect cannot continue without '
2092            'a working \'good\' revision to start from.\n\nError: %s' %
2093                good_results[0])
2094        return results
2095
2096
2097      # We need these reference values to determine if later runs should be
2098      # classified as pass or fail.
2099      known_bad_value = bad_results[0]
2100      known_good_value = good_results[0]
2101
2102      # Can just mark the good and bad revisions explicitly here since we
2103      # already know the results.
2104      bad_revision_data = revision_data[revision_list[0]]
2105      bad_revision_data['external'] = bad_results[2]
2106      bad_revision_data['perf_time'] = bad_results[3]
2107      bad_revision_data['build_time'] = bad_results[4]
2108      bad_revision_data['passed'] = False
2109      bad_revision_data['value'] = known_bad_value
2110
2111      good_revision_data = revision_data[revision_list[max_revision]]
2112      good_revision_data['external'] = good_results[2]
2113      good_revision_data['perf_time'] = good_results[3]
2114      good_revision_data['build_time'] = good_results[4]
2115      good_revision_data['passed'] = True
2116      good_revision_data['value'] = known_good_value
2117
2118      next_revision_depot = target_depot
2119
2120      while True:
2121        if not revision_list:
2122          break
2123
2124        min_revision_data = revision_data[revision_list[min_revision]]
2125        max_revision_data = revision_data[revision_list[max_revision]]
2126
2127        if max_revision - min_revision <= 1:
2128          current_depot = min_revision_data['depot']
2129          if min_revision_data['passed'] == '?':
2130            next_revision_index = min_revision
2131          elif max_revision_data['passed'] == '?':
2132            next_revision_index = max_revision
2133          elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2134            previous_revision = revision_list[min_revision]
2135            # If there were changes to any of the external libraries we track,
2136            # should bisect the changes there as well.
2137            external_depot = self._FindNextDepotToBisect(current_depot,
2138                previous_revision, min_revision_data, max_revision_data)
2139
2140            # If there was no change in any of the external depots, the search
2141            # is over.
2142            if not external_depot:
2143              if current_depot == 'v8':
2144                self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2145                    'continue any further. The script can only bisect into '
2146                    'V8\'s bleeding_edge repository if both the current and '
2147                    'previous revisions in trunk map directly to revisions in '
2148                    'bleeding_edge.')
2149              break
2150
2151            earliest_revision = max_revision_data['external'][external_depot]
2152            latest_revision = min_revision_data['external'][external_depot]
2153
2154            new_revision_list = self.PrepareToBisectOnDepot(external_depot,
2155                                                            latest_revision,
2156                                                            earliest_revision,
2157                                                            next_revision_depot,
2158                                                            previous_revision)
2159
2160            if not new_revision_list:
2161              results['error'] = 'An error occurred attempting to retrieve'\
2162                                 ' revision range: [%s..%s]' %\
2163                                 (earliest_revision, latest_revision)
2164              return results
2165
2166            self.AddRevisionsIntoRevisionData(new_revision_list,
2167                                              external_depot,
2168                                              min_revision_data['sort'],
2169                                              revision_data)
2170
2171            # Reset the bisection and perform it on the newly inserted
2172            # changelists.
2173            revision_list = new_revision_list
2174            min_revision = 0
2175            max_revision = len(revision_list) - 1
2176            sort_key_ids += len(revision_list)
2177
2178            print 'Regression in metric:%s appears to be the result of changes'\
2179                  ' in [%s].' % (metric, external_depot)
2180
2181            self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2182
2183            continue
2184          else:
2185            break
2186        else:
2187          next_revision_index = int((max_revision - min_revision) / 2) +\
2188                                min_revision
2189
2190        next_revision_id = revision_list[next_revision_index]
2191        next_revision_data = revision_data[next_revision_id]
2192        next_revision_depot = next_revision_data['depot']
2193
2194        self.ChangeToDepotWorkingDirectory(next_revision_depot)
2195
2196        if self.opts.output_buildbot_annotations:
2197          step_name = 'Working on [%s]' % next_revision_id
2198          bisect_utils.OutputAnnotationStepStart(step_name)
2199
2200        print 'Working on revision: [%s]' % next_revision_id
2201
2202        run_results = self.SyncBuildAndRunRevision(next_revision_id,
2203                                                   next_revision_depot,
2204                                                   command_to_run,
2205                                                   metric, skippable=True)
2206
2207        # If the build is successful, check whether or not the metric
2208        # had regressed.
2209        if not run_results[1]:
2210          if len(run_results) > 2:
2211            next_revision_data['external'] = run_results[2]
2212            next_revision_data['perf_time'] = run_results[3]
2213            next_revision_data['build_time'] = run_results[4]
2214
2215          passed_regression = self.CheckIfRunPassed(run_results[0],
2216                                                    known_good_value,
2217                                                    known_bad_value)
2218
2219          next_revision_data['passed'] = passed_regression
2220          next_revision_data['value'] = run_results[0]
2221
2222          if passed_regression:
2223            max_revision = next_revision_index
2224          else:
2225            min_revision = next_revision_index
2226        else:
2227          if run_results[1] == BUILD_RESULT_SKIPPED:
2228            next_revision_data['passed'] = 'Skipped'
2229          elif run_results[1] == BUILD_RESULT_FAIL:
2230            next_revision_data['passed'] = 'Build Failed'
2231
2232          print run_results[0]
2233
2234          # If the build is broken, remove it and redo search.
2235          revision_list.pop(next_revision_index)
2236
2237          max_revision -= 1
2238
2239        if self.opts.output_buildbot_annotations:
2240          bisect_utils.OutputAnnotationStepClosed()
2241    else:
2242      # Weren't able to sync and retrieve the revision range.
2243      results['error'] = 'An error occurred attempting to retrieve revision '\
2244                         'range: [%s..%s]' % (good_revision, bad_revision)
2245
2246    return results
2247
2248  def _PrintBanner(self, results_dict):
2249    print
2250    print " __o_\___          Aw Snap! We hit a speed bump!"
2251    print "=-O----O-'__.~.___________________________________"
2252    print
2253    print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % (
2254        results_dict['regression_size'], results_dict['regression_std_err'],
2255        '/'.join(self.opts.metric))
2256    # The perf dashboard specifically looks for the string
2257    # "Confidence in Bisection Results: 100%" to decide whether or not
2258    # to cc the author(s). If you change this, please update the perf
2259    # dashboard as well.
2260    print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
2261
2262  def _PrintRevisionInfo(self, cl, info, depot=None):
2263    # The perf dashboard specifically looks for the string
2264    # "Author  : " to parse out who to cc on a bug. If you change the
2265    # formatting here, please update the perf dashboard as well.
2266    print
2267    print 'Subject : %s' % info['subject']
2268    print 'Author  : %s' % info['author']
2269    if not info['email'].startswith(info['author']):
2270      print 'Email   : %s' % info['email']
2271    if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2272      try:
2273        # Format is "git-svn-id: svn://....@123456 <other data>"
2274        svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2275        svn_revision = svn_line[0].split('@')
2276        svn_revision = svn_revision[1].split(' ')[0]
2277        print 'Link    : %s' % DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2278      except IndexError:
2279        print
2280        print 'Failed to parse svn revision from body:'
2281        print
2282        print info['body']
2283        print
2284    print 'Commit  : %s' % cl
2285    print 'Date    : %s' % info['date']
2286
2287  def _PrintTestedCommitsTable(self, revision_data_sorted,
2288                               first_working_revision, last_broken_revision):
2289    print
2290    print 'Tested commits:'
2291    print '  %20s  %40s  %12s %14s %13s' % ('Depot'.center(20, ' '),
2292        'Commit SHA'.center(40, ' '), 'Mean'.center(12, ' '),
2293        'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2294    state = 0
2295    for current_id, current_data in revision_data_sorted:
2296      if current_data['value']:
2297        if (current_id == last_broken_revision or
2298            current_id == first_working_revision):
2299          print
2300          state += 1
2301
2302        state_str = 'Bad'
2303        if state == 1:
2304          state_str = 'Suspected CL'
2305        elif state == 2:
2306          state_str = 'Good'
2307        state_str = state_str.center(13, ' ')
2308
2309        std_error = ('+-%.02f' %
2310            current_data['value']['std_err']).center(14, ' ')
2311        mean = ('%.02f' % current_data['value']['mean']).center(12, ' ')
2312        print '  %20s  %40s  %12s %14s %13s' % (
2313            current_data['depot'].center(20, ' '), current_id, mean,
2314            std_error, state_str)
2315
2316  def _PrintReproSteps(self):
2317    print
2318    print 'To reproduce locally:'
2319    print '$ ' + self.opts.command
2320    if bisect_utils.IsTelemetryCommand(self.opts.command):
2321      print
2322      print 'Also consider passing --profiler=list to see available profilers.'
2323
2324  def _PrintOtherRegressions(self, other_regressions, revision_data):
2325    print
2326    print 'Other regressions may have occurred:'
2327    for regression in other_regressions:
2328      current_id, previous_id, percent_change, deviations = regression
2329      current_data = revision_data[current_id]
2330      previous_data = revision_data[previous_id]
2331
2332      if deviations is None:
2333        deviations = 'N/A'
2334      else:
2335        deviations = '%.2f' % deviations
2336
2337      if percent_change is None:
2338        percent_change = 0
2339
2340      print '  %8s  %s  [%.2f%%, %s x std.dev]' % (
2341          previous_data['depot'], previous_id, 100 * percent_change, deviations)
2342      print '  %8s  %s' % (current_data['depot'], current_id)
2343      print
2344
2345  def _PrintStepTime(self, revision_data_sorted):
2346    step_perf_time_avg = 0.0
2347    step_build_time_avg = 0.0
2348    step_count = 0.0
2349    for _, current_data in revision_data_sorted:
2350      step_perf_time_avg += current_data['perf_time']
2351      step_build_time_avg += current_data['build_time']
2352      step_count += 1
2353    if step_count:
2354      step_perf_time_avg = step_perf_time_avg / step_count
2355      step_build_time_avg = step_build_time_avg / step_count
2356    print
2357    print 'Average build time : %s' % datetime.timedelta(
2358        seconds=int(step_build_time_avg))
2359    print 'Average test time  : %s' % datetime.timedelta(
2360        seconds=int(step_perf_time_avg))
2361
2362  def _PrintWarnings(self):
2363    if not self.warnings:
2364      return
2365    print
2366    print 'WARNINGS:'
2367    for w in self.warnings:
2368      print '  !!! %s' % w
2369
2370  def _GetResultsDict(self, revision_data, revision_data_sorted):
2371    # Find range where it possibly broke.
2372    first_working_revision = None
2373    first_working_revision_index = -1
2374    last_broken_revision = None
2375    last_broken_revision_index = -1
2376
2377    for i in xrange(len(revision_data_sorted)):
2378      k, v = revision_data_sorted[i]
2379      if v['passed'] == 1:
2380        if not first_working_revision:
2381          first_working_revision = k
2382          first_working_revision_index = i
2383
2384      if not v['passed']:
2385        last_broken_revision = k
2386        last_broken_revision_index = i
2387
2388    if last_broken_revision != None and first_working_revision != None:
2389      bounds_broken = [revision_data[last_broken_revision]['value']['mean'],
2390          revision_data[last_broken_revision]['value']['mean']]
2391      broken_mean = []
2392      for i in xrange(0, last_broken_revision_index + 1):
2393        if revision_data_sorted[i][1]['value']:
2394          bounds_broken[0] = min(bounds_broken[0],
2395              revision_data_sorted[i][1]['value']['mean'])
2396          bounds_broken[1] = max(bounds_broken[1],
2397              revision_data_sorted[i][1]['value']['mean'])
2398          broken_mean.extend(revision_data_sorted[i][1]['value']['values'])
2399
2400      bounds_working = [revision_data[first_working_revision]['value']['mean'],
2401          revision_data[first_working_revision]['value']['mean']]
2402      working_mean = []
2403      for i in xrange(first_working_revision_index, len(revision_data_sorted)):
2404        if revision_data_sorted[i][1]['value']:
2405          bounds_working[0] = min(bounds_working[0],
2406              revision_data_sorted[i][1]['value']['mean'])
2407          bounds_working[1] = max(bounds_working[1],
2408              revision_data_sorted[i][1]['value']['mean'])
2409          working_mean.extend(revision_data_sorted[i][1]['value']['values'])
2410
2411      # Calculate the approximate size of the regression
2412      mean_of_bad_runs = CalculateTruncatedMean(broken_mean, 0.0)
2413      mean_of_good_runs = CalculateTruncatedMean(working_mean, 0.0)
2414
2415      regression_size = math.fabs(max(mean_of_good_runs, mean_of_bad_runs) /
2416          max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0 - 100.0
2417
2418      regression_std_err = math.fabs(CalculatePooledStandardError(
2419          [working_mean, broken_mean]) /
2420          max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
2421
2422      # Give a "confidence" in the bisect. At the moment we use how distinct the
2423      # values are before and after the last broken revision, and how noisy the
2424      # overall graph is.
2425      dist_between_groups = min(math.fabs(bounds_broken[1] - bounds_working[0]),
2426          math.fabs(bounds_broken[0] - bounds_working[1]))
2427      len_working_group = CalculateStandardDeviation(working_mean)
2428      len_broken_group = CalculateStandardDeviation(broken_mean)
2429
2430      confidence = (dist_between_groups / (
2431          max(0.0001, (len_broken_group + len_working_group ))))
2432      confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
2433
2434      culprit_revisions = []
2435
2436      cwd = os.getcwd()
2437      self.ChangeToDepotWorkingDirectory(
2438          revision_data[last_broken_revision]['depot'])
2439
2440      if revision_data[last_broken_revision]['depot'] == 'cros':
2441        # Want to get a list of all the commits and what depots they belong
2442        # to so that we can grab info about each.
2443        cmd = ['repo', 'forall', '-c',
2444            'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2445            last_broken_revision, first_working_revision + 1)]
2446        (output, return_code) = RunProcessAndRetrieveOutput(cmd)
2447
2448        changes = []
2449        assert not return_code, 'An error occurred while running'\
2450                                ' "%s"' % ' '.join(cmd)
2451        last_depot = None
2452        cwd = os.getcwd()
2453        for l in output.split('\n'):
2454          if l:
2455            # Output will be in form:
2456            # /path_to_depot
2457            # /path_to_other_depot
2458            # <SHA1>
2459            # /path_again
2460            # <SHA1>
2461            # etc.
2462            if l[0] == '/':
2463              last_depot = l
2464            else:
2465              contents = l.split(' ')
2466              if len(contents) > 1:
2467                changes.append([last_depot, contents[0]])
2468        for c in changes:
2469          os.chdir(c[0])
2470          info = self.source_control.QueryRevisionInfo(c[1])
2471          culprit_revisions.append((c[1], info, None))
2472      else:
2473        for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
2474          k, v = revision_data_sorted[i]
2475          if k == first_working_revision:
2476            break
2477          self.ChangeToDepotWorkingDirectory(v['depot'])
2478          info = self.source_control.QueryRevisionInfo(k)
2479          culprit_revisions.append((k, info, v['depot']))
2480      os.chdir(cwd)
2481
2482      # Check for any other possible regression ranges
2483      good_std_dev = revision_data[first_working_revision]['value']['std_err']
2484      good_mean = revision_data[first_working_revision]['value']['mean']
2485      bad_mean = revision_data[last_broken_revision]['value']['mean']
2486      prev_revision_data = revision_data_sorted[0][1]
2487      prev_revision_id = revision_data_sorted[0][0]
2488      other_regressions = []
2489      for current_id, current_data in revision_data_sorted:
2490        if current_data['value']:
2491          prev_mean = prev_revision_data['value']['mean']
2492          cur_mean = current_data['value']['mean']
2493
2494          if good_std_dev:
2495            deviations = math.fabs(prev_mean - cur_mean) / good_std_dev
2496          else:
2497            deviations = None
2498
2499          if good_mean:
2500            percent_change = (prev_mean - cur_mean) / good_mean
2501
2502            # If the "good" valuse are supposed to be higher than the "bad"
2503            # values (ie. scores), flip the sign of the percent change so that
2504            # a positive value always represents a regression.
2505            if bad_mean < good_mean:
2506              percent_change *= -1.0
2507          else:
2508            percent_change = None
2509
2510          if deviations >= 1.5 or percent_change > 0.01:
2511            if current_id != first_working_revision:
2512              other_regressions.append(
2513                  [current_id, prev_revision_id, percent_change, deviations])
2514          prev_revision_data = current_data
2515          prev_revision_id = current_id
2516
2517    # Check for warnings:
2518    if len(culprit_revisions) > 1:
2519      self.warnings.append('Due to build errors, regression range could '
2520                           'not be narrowed down to a single commit.')
2521    if self.opts.repeat_test_count == 1:
2522      self.warnings.append('Tests were only set to run once. This may '
2523                           'be insufficient to get meaningful results.')
2524    if confidence < 100:
2525      self.warnings.append(
2526          'Confidence is less than 100%. There could be other candidates for '
2527          'this regression. Try bisecting again with increased repeat_count or '
2528          'on a sub-metric that shows the regression more clearly.')
2529
2530    return {
2531        'first_working_revision': first_working_revision,
2532        'last_broken_revision': last_broken_revision,
2533        'culprit_revisions': culprit_revisions,
2534        'other_regressions': other_regressions,
2535        'regression_size': regression_size,
2536        'regression_std_err': regression_std_err,
2537        'confidence': confidence,
2538        }
2539
2540  def FormatAndPrintResults(self, bisect_results):
2541    """Prints the results from a bisection run in a readable format.
2542
2543    Args
2544      bisect_results: The results from a bisection test run.
2545    """
2546    revision_data = bisect_results['revision_data']
2547    revision_data_sorted = sorted(revision_data.iteritems(),
2548                                  key = lambda x: x[1]['sort'])
2549    results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2550
2551    if self.opts.output_buildbot_annotations:
2552      bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
2553
2554    print
2555    print 'Full results of bisection:'
2556    for current_id, current_data  in revision_data_sorted:
2557      build_status = current_data['passed']
2558
2559      if type(build_status) is bool:
2560        if build_status:
2561          build_status = 'Good'
2562        else:
2563          build_status = 'Bad'
2564
2565      print '  %20s  %40s  %s' % (current_data['depot'],
2566                                  current_id, build_status)
2567    print
2568
2569    if self.opts.output_buildbot_annotations:
2570      bisect_utils.OutputAnnotationStepClosed()
2571      # The perf dashboard scrapes the "results" step in order to comment on
2572      # bugs. If you change this, please update the perf dashboard as well.
2573      bisect_utils.OutputAnnotationStepStart('Results')
2574
2575    if results_dict['culprit_revisions']:
2576      self._PrintBanner(results_dict)
2577      for culprit in results_dict['culprit_revisions']:
2578        cl, info, depot = culprit
2579        self._PrintRevisionInfo(cl, info, depot)
2580      self._PrintReproSteps()
2581      if results_dict['other_regressions']:
2582        self._PrintOtherRegressions(results_dict['other_regressions'],
2583                                    revision_data)
2584
2585    self._PrintTestedCommitsTable(revision_data_sorted,
2586                                  results_dict['first_working_revision'],
2587                                  results_dict['last_broken_revision'])
2588    self._PrintStepTime(revision_data_sorted)
2589    self._PrintWarnings()
2590
2591    if self.opts.output_buildbot_annotations:
2592      bisect_utils.OutputAnnotationStepClosed()
2593
2594
2595def DetermineAndCreateSourceControl(opts):
2596  """Attempts to determine the underlying source control workflow and returns
2597  a SourceControl object.
2598
2599  Returns:
2600    An instance of a SourceControl object, or None if the current workflow
2601    is unsupported.
2602  """
2603
2604  (output, return_code) = RunGit(['rev-parse', '--is-inside-work-tree'])
2605
2606  if output.strip() == 'true':
2607    return GitSourceControl(opts)
2608
2609  return None
2610
2611
2612def IsPlatformSupported(opts):
2613  """Checks that this platform and build system are supported.
2614
2615  Args:
2616    opts: The options parsed from the command line.
2617
2618  Returns:
2619    True if the platform and build system are supported.
2620  """
2621  # Haven't tested the script out on any other platforms yet.
2622  supported = ['posix', 'nt']
2623  return os.name in supported
2624
2625
2626def RmTreeAndMkDir(path_to_dir):
2627  """Removes the directory tree specified, and then creates an empty
2628  directory in the same location.
2629
2630  Args:
2631    path_to_dir: Path to the directory tree.
2632
2633  Returns:
2634    True if successful, False if an error occurred.
2635  """
2636  try:
2637    if os.path.exists(path_to_dir):
2638      shutil.rmtree(path_to_dir)
2639  except OSError, e:
2640    if e.errno != errno.ENOENT:
2641      return False
2642
2643  try:
2644    os.makedirs(path_to_dir)
2645  except OSError, e:
2646    if e.errno != errno.EEXIST:
2647      return False
2648
2649  return True
2650
2651
2652def RemoveBuildFiles():
2653  """Removes build files from previous runs."""
2654  if RmTreeAndMkDir(os.path.join('out', 'Release')):
2655    if RmTreeAndMkDir(os.path.join('build', 'Release')):
2656      return True
2657  return False
2658
2659
2660class BisectOptions(object):
2661  """Options to be used when running bisection."""
2662  def __init__(self):
2663    super(BisectOptions, self).__init__()
2664
2665    self.target_platform = 'chromium'
2666    self.build_preference = None
2667    self.good_revision = None
2668    self.bad_revision = None
2669    self.use_goma = None
2670    self.cros_board = None
2671    self.cros_remote_ip = None
2672    self.repeat_test_count = 20
2673    self.truncate_percent = 25
2674    self.max_time_minutes = 20
2675    self.metric = None
2676    self.command = None
2677    self.output_buildbot_annotations = None
2678    self.no_custom_deps = False
2679    self.working_directory = None
2680    self.extra_src = None
2681    self.debug_ignore_build = None
2682    self.debug_ignore_sync = None
2683    self.debug_ignore_perf_test = None
2684
2685  def _CreateCommandLineParser(self):
2686    """Creates a parser with bisect options.
2687
2688    Returns:
2689      An instance of optparse.OptionParser.
2690    """
2691    usage = ('%prog [options] [-- chromium-options]\n'
2692             'Perform binary search on revision history to find a minimal '
2693             'range of revisions where a peformance metric regressed.\n')
2694
2695    parser = optparse.OptionParser(usage=usage)
2696
2697    group = optparse.OptionGroup(parser, 'Bisect options')
2698    group.add_option('-c', '--command',
2699                     type='str',
2700                     help='A command to execute your performance test at' +
2701                     ' each point in the bisection.')
2702    group.add_option('-b', '--bad_revision',
2703                     type='str',
2704                     help='A bad revision to start bisection. ' +
2705                     'Must be later than good revision. May be either a git' +
2706                     ' or svn revision.')
2707    group.add_option('-g', '--good_revision',
2708                     type='str',
2709                     help='A revision to start bisection where performance' +
2710                     ' test is known to pass. Must be earlier than the ' +
2711                     'bad revision. May be either a git or svn revision.')
2712    group.add_option('-m', '--metric',
2713                     type='str',
2714                     help='The desired metric to bisect on. For example ' +
2715                     '"vm_rss_final_b/vm_rss_f_b"')
2716    group.add_option('-r', '--repeat_test_count',
2717                     type='int',
2718                     default=20,
2719                     help='The number of times to repeat the performance '
2720                     'test. Values will be clamped to range [1, 100]. '
2721                     'Default value is 20.')
2722    group.add_option('--max_time_minutes',
2723                     type='int',
2724                     default=20,
2725                     help='The maximum time (in minutes) to take running the '
2726                     'performance tests. The script will run the performance '
2727                     'tests according to --repeat_test_count, so long as it '
2728                     'doesn\'t exceed --max_time_minutes. Values will be '
2729                     'clamped to range [1, 60].'
2730                     'Default value is 20.')
2731    group.add_option('-t', '--truncate_percent',
2732                     type='int',
2733                     default=25,
2734                     help='The highest/lowest % are discarded to form a '
2735                     'truncated mean. Values will be clamped to range [0, '
2736                     '25]. Default value is 25 (highest/lowest 25% will be '
2737                     'discarded).')
2738    parser.add_option_group(group)
2739
2740    group = optparse.OptionGroup(parser, 'Build options')
2741    group.add_option('-w', '--working_directory',
2742                     type='str',
2743                     help='Path to the working directory where the script '
2744                     'will do an initial checkout of the chromium depot. The '
2745                     'files will be placed in a subdirectory "bisect" under '
2746                     'working_directory and that will be used to perform the '
2747                     'bisection. This parameter is optional, if it is not '
2748                     'supplied, the script will work from the current depot.')
2749    group.add_option('--build_preference',
2750                     type='choice',
2751                     choices=['msvs', 'ninja', 'make'],
2752                     help='The preferred build system to use. On linux/mac '
2753                     'the options are make/ninja. On Windows, the options '
2754                     'are msvs/ninja.')
2755    group.add_option('--target_platform',
2756                     type='choice',
2757                     choices=['chromium', 'cros', 'android', 'android-chrome'],
2758                     default='chromium',
2759                     help='The target platform. Choices are "chromium" '
2760                     '(current platform), "cros", or "android". If you '
2761                     'specify something other than "chromium", you must be '
2762                     'properly set up to build that platform.')
2763    group.add_option('--no_custom_deps',
2764                     dest='no_custom_deps',
2765                     action="store_true",
2766                     default=False,
2767                     help='Run the script with custom_deps or not.')
2768    group.add_option('--extra_src',
2769                     type='str',
2770                     help='Path to a script which can be used to modify '
2771                     'the bisect script\'s behavior.')
2772    group.add_option('--cros_board',
2773                     type='str',
2774                     help='The cros board type to build.')
2775    group.add_option('--cros_remote_ip',
2776                     type='str',
2777                     help='The remote machine to image to.')
2778    group.add_option('--use_goma',
2779                     action="store_true",
2780                     help='Add a bunch of extra threads for goma.')
2781    group.add_option('--output_buildbot_annotations',
2782                     action="store_true",
2783                     help='Add extra annotation output for buildbot.')
2784    parser.add_option_group(group)
2785
2786    group = optparse.OptionGroup(parser, 'Debug options')
2787    group.add_option('--debug_ignore_build',
2788                     action="store_true",
2789                     help='DEBUG: Don\'t perform builds.')
2790    group.add_option('--debug_ignore_sync',
2791                     action="store_true",
2792                     help='DEBUG: Don\'t perform syncs.')
2793    group.add_option('--debug_ignore_perf_test',
2794                     action="store_true",
2795                     help='DEBUG: Don\'t perform performance tests.')
2796    parser.add_option_group(group)
2797
2798
2799    return parser
2800
2801  def ParseCommandLine(self):
2802    """Parses the command line for bisect options."""
2803    parser = self._CreateCommandLineParser()
2804    (opts, args) = parser.parse_args()
2805
2806    try:
2807      if not opts.command:
2808        raise RuntimeError('missing required parameter: --command')
2809
2810      if not opts.good_revision:
2811        raise RuntimeError('missing required parameter: --good_revision')
2812
2813      if not opts.bad_revision:
2814        raise RuntimeError('missing required parameter: --bad_revision')
2815
2816      if not opts.metric:
2817        raise RuntimeError('missing required parameter: --metric')
2818
2819      if opts.target_platform == 'cros':
2820        # Run sudo up front to make sure credentials are cached for later.
2821        print 'Sudo is required to build cros:'
2822        print
2823        RunProcess(['sudo', 'true'])
2824
2825        if not opts.cros_board:
2826          raise RuntimeError('missing required parameter: --cros_board')
2827
2828        if not opts.cros_remote_ip:
2829          raise RuntimeError('missing required parameter: --cros_remote_ip')
2830
2831        if not opts.working_directory:
2832          raise RuntimeError('missing required parameter: --working_directory')
2833
2834      metric_values = opts.metric.split('/')
2835      if len(metric_values) != 2:
2836        raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
2837
2838      opts.metric = metric_values
2839      opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
2840      opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
2841      opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
2842      opts.truncate_percent = opts.truncate_percent / 100.0
2843
2844      for k, v in opts.__dict__.iteritems():
2845        assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
2846        setattr(self, k, v)
2847    except RuntimeError, e:
2848      output_string = StringIO.StringIO()
2849      parser.print_help(file=output_string)
2850      error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
2851      output_string.close()
2852      raise RuntimeError(error_message)
2853
2854  @staticmethod
2855  def FromDict(values):
2856    """Creates an instance of BisectOptions with the values parsed from a
2857    .cfg file.
2858
2859    Args:
2860      values: a dict containing options to set.
2861
2862    Returns:
2863      An instance of BisectOptions.
2864    """
2865    opts = BisectOptions()
2866
2867    for k, v in values.iteritems():
2868      assert hasattr(opts, k), 'Invalid %s attribute in '\
2869          'BisectOptions.' % k
2870      setattr(opts, k, v)
2871
2872    metric_values = opts.metric.split('/')
2873    if len(metric_values) != 2:
2874      raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
2875
2876    opts.metric = metric_values
2877    opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
2878    opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
2879    opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
2880    opts.truncate_percent = opts.truncate_percent / 100.0
2881
2882    return opts
2883
2884
2885def main():
2886
2887  try:
2888    opts = BisectOptions()
2889    parse_results = opts.ParseCommandLine()
2890
2891    if opts.extra_src:
2892      extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
2893      if not extra_src:
2894        raise RuntimeError("Invalid or missing --extra_src.")
2895      _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
2896
2897    if opts.working_directory:
2898      custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
2899      if opts.no_custom_deps:
2900        custom_deps = None
2901      bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
2902
2903      os.chdir(os.path.join(os.getcwd(), 'src'))
2904
2905      if not RemoveBuildFiles():
2906        raise RuntimeError('Something went wrong removing the build files.')
2907
2908    if not IsPlatformSupported(opts):
2909      raise RuntimeError("Sorry, this platform isn't supported yet.")
2910
2911    # Check what source control method they're using. Only support git workflow
2912    # at the moment.
2913    source_control = DetermineAndCreateSourceControl(opts)
2914
2915    if not source_control:
2916      raise RuntimeError("Sorry, only the git workflow is supported at the "
2917          "moment.")
2918
2919    # gClient sync seems to fail if you're not in master branch.
2920    if (not source_control.IsInProperBranch() and
2921        not opts.debug_ignore_sync and
2922        not opts.working_directory):
2923      raise RuntimeError("You must switch to master branch to run bisection.")
2924
2925    bisect_test = BisectPerformanceMetrics(source_control, opts)
2926    try:
2927      bisect_results = bisect_test.Run(opts.command,
2928                                       opts.bad_revision,
2929                                       opts.good_revision,
2930                                       opts.metric)
2931      if bisect_results['error']:
2932        raise RuntimeError(bisect_results['error'])
2933      bisect_test.FormatAndPrintResults(bisect_results)
2934      return 0
2935    finally:
2936      bisect_test.PerformCleanup()
2937  except RuntimeError, e:
2938    if opts.output_buildbot_annotations:
2939      # The perf dashboard scrapes the "results" step in order to comment on
2940      # bugs. If you change this, please update the perf dashboard as well.
2941      bisect_utils.OutputAnnotationStepStart('Results')
2942    print 'Error: %s' % e.message
2943    if opts.output_buildbot_annotations:
2944      bisect_utils.OutputAnnotationStepClosed()
2945  return 1
2946
2947if __name__ == '__main__':
2948  sys.exit(main())
2949