• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright (c) 2013 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Performance Test Bisect Tool
7
8This script bisects a series of changelists using binary search. It starts at
9a bad revision where a performance metric has regressed, and asks for a last
10known-good revision. It will then binary search across this revision range by
11syncing, building, and running a performance test. If the change is
12suspected to occur as a result of WebKit/V8 changes, the script will
13further bisect changes to those depots and attempt to narrow down the revision
14range.
15
16
17An example usage (using svn cl's):
18
19./tools/bisect-perf-regression.py -c\
20"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21-g 168222 -b 168232 -m shutdown/simple-user-quit
22
23Be aware that if you're using the git workflow and specify an svn revision,
24the script will attempt to find the git SHA1 where svn changes up to that
25revision were merged in.
26
27
28An example usage (using git hashes):
29
30./tools/bisect-perf-regression.py -c\
31"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32-g 1f6e67861535121c5c819c16a666f2436c207e7b\
33-b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34-m shutdown/simple-user-quit
35
36"""
37
38import copy
39import datetime
40import errno
41import hashlib
42import math
43import optparse
44import os
45import re
46import shlex
47import shutil
48import StringIO
49import subprocess
50import sys
51import time
52import zipfile
53
54sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
55
56import bisect_utils
57import post_perf_builder_job as bisect_builder
58from telemetry.page import cloud_storage
59
60# The additional repositories that might need to be bisected.
61# If the repository has any dependant repositories (such as skia/src needs
62# skia/include and skia/gyp to be updated), specify them in the 'depends'
63# so that they're synced appropriately.
64# Format is:
65# src: path to the working directory.
66# recurse: True if this repositry will get bisected.
67# depends: A list of other repositories that are actually part of the same
68#   repository in svn.
69# svn: Needed for git workflow to resolve hashes to svn revisions.
70# from: Parent depot that must be bisected before this is bisected.
71# deps_var: Key name in vars varible in DEPS file that has revision information.
72DEPOT_DEPS_NAME = {
73  'chromium' : {
74    "src" : "src",
75    "recurse" : True,
76    "depends" : None,
77    "from" : ['cros', 'android-chrome'],
78    'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79    'deps_var': 'chromium_rev'
80  },
81  'webkit' : {
82    "src" : "src/third_party/WebKit",
83    "recurse" : True,
84    "depends" : None,
85    "from" : ['chromium'],
86    'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87    'deps_var': 'webkit_revision'
88  },
89  'angle' : {
90    "src" : "src/third_party/angle",
91    "src_old" : "src/third_party/angle_dx11",
92    "recurse" : True,
93    "depends" : None,
94    "from" : ['chromium'],
95    "platform": 'nt',
96    'deps_var': 'angle_revision'
97  },
98  'v8' : {
99    "src" : "src/v8",
100    "recurse" : True,
101    "depends" : None,
102    "from" : ['chromium'],
103    "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104    'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105    'deps_var': 'v8_revision'
106  },
107  'v8_bleeding_edge' : {
108    "src" : "src/v8_bleeding_edge",
109    "recurse" : True,
110    "depends" : None,
111    "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
112    "from" : ['v8'],
113    'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114    'deps_var': 'v8_revision'
115  },
116  'skia/src' : {
117    "src" : "src/third_party/skia/src",
118    "recurse" : True,
119    "svn" : "http://skia.googlecode.com/svn/trunk/src",
120    "depends" : ['skia/include', 'skia/gyp'],
121    "from" : ['chromium'],
122    'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123    'deps_var': 'skia_revision'
124  },
125  'skia/include' : {
126    "src" : "src/third_party/skia/include",
127    "recurse" : False,
128    "svn" : "http://skia.googlecode.com/svn/trunk/include",
129    "depends" : None,
130    "from" : ['chromium'],
131    'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132    'deps_var': 'None'
133  },
134  'skia/gyp' : {
135    "src" : "src/third_party/skia/gyp",
136    "recurse" : False,
137    "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
138    "depends" : None,
139    "from" : ['chromium'],
140    'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
141    'deps_var': 'None'
142  },
143}
144
145DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
146CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147CROS_VERSION_PATTERN = 'new version number from %s'
148CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
149CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
150                                  'testing_rsa')
151CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
152                                    'mod_for_test_scripts', 'ssh_keys',
153                                    'testing_rsa')
154
155BUILD_RESULT_SUCCEED = 0
156BUILD_RESULT_FAIL = 1
157BUILD_RESULT_SKIPPED = 2
158
159# Maximum time in seconds to wait after posting build request to tryserver.
160# TODO: Change these values based on the actual time taken by buildbots on
161# the tryserver.
162MAX_MAC_BUILD_TIME = 14400
163MAX_WIN_BUILD_TIME = 14400
164MAX_LINUX_BUILD_TIME = 14400
165
166# Patch template to add a new file, DEPS.sha under src folder.
167# This file contains SHA1 value of the DEPS changes made while bisecting
168# dependency repositories. This patch send along with DEPS patch to tryserver.
169# When a build requested is posted with a patch, bisect builders on tryserver,
170# once build is produced, it reads SHA value from this file and appends it
171# to build archive filename.
172DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
173new file mode 100644
174--- /dev/null
175+++ src/DEPS.sha
176@@ -0,0 +1 @@
177+%(deps_sha)s
178"""
179
180# The possible values of the --bisect_mode flag, which determines what to
181# use when classifying a revision as "good" or "bad".
182BISECT_MODE_MEAN = 'mean'
183BISECT_MODE_STD_DEV = 'std_dev'
184BISECT_MODE_RETURN_CODE = 'return_code'
185
186
187def _AddAdditionalDepotInfo(depot_info):
188  """Adds additional depot info to the global depot variables."""
189  global DEPOT_DEPS_NAME
190  global DEPOT_NAMES
191  DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
192      depot_info.items())
193  DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
194
195
196def CalculateTruncatedMean(data_set, truncate_percent):
197  """Calculates the truncated mean of a set of values.
198
199  Note that this isn't just the mean of the set of values with the highest
200  and lowest values discarded; the non-discarded values are also weighted
201  differently depending how many values are discarded.
202
203  Args:
204    data_set: Non-empty list of values.
205    truncate_percent: The % from the upper and lower portions of the data set
206        to discard, expressed as a value in [0, 1].
207
208  Returns:
209    The truncated mean as a float.
210
211  Raises:
212    TypeError: The data set was empty after discarding values.
213  """
214  if len(data_set) > 2:
215    data_set = sorted(data_set)
216
217    discard_num_float = len(data_set) * truncate_percent
218    discard_num_int = int(math.floor(discard_num_float))
219    kept_weight = len(data_set) - discard_num_float * 2
220
221    data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
222
223    weight_left = 1.0 - (discard_num_float - discard_num_int)
224
225    if weight_left < 1:
226      # If the % to discard leaves a fractional portion, need to weight those
227      # values.
228      unweighted_vals = data_set[1:len(data_set)-1]
229      weighted_vals = [data_set[0], data_set[len(data_set)-1]]
230      weighted_vals = [w * weight_left for w in weighted_vals]
231      data_set = weighted_vals + unweighted_vals
232  else:
233    kept_weight = len(data_set)
234
235  truncated_mean = reduce(lambda x, y: float(x) + float(y),
236                          data_set) / kept_weight
237
238  return truncated_mean
239
240
241def CalculateMean(values):
242  """Calculates the arithmetic mean of a list of values."""
243  return CalculateTruncatedMean(values, 0.0)
244
245
246def CalculateConfidence(good_results_lists, bad_results_lists):
247  """Calculates a confidence percentage.
248
249  This is calculated based on how distinct the "good" and "bad" values are,
250  and how noisy the results are. More precisely, the confidence is the quotient
251  of the difference between the closest values across the good and bad groups
252  and the sum of the standard deviations of the good and bad groups.
253
254  TODO(qyearsley): Replace this confidence function with a function that
255      uses a Student's t-test. The confidence would be (1 - p-value), where
256      p-value is the probability of obtaining the given a set of good and bad
257      values just by chance.
258
259  Args:
260    good_results_lists: A list of lists of "good" result numbers.
261    bad_results_lists: A list of lists of "bad" result numbers.
262
263  Returns:
264    A number between in the range [0, 100].
265  """
266  # Get the distance between the two groups.
267  means_good = map(CalculateMean, good_results_lists)
268  means_bad = map(CalculateMean, bad_results_lists)
269  bounds_good = (min(means_good), max(means_good))
270  bounds_bad = (min(means_bad), max(means_bad))
271  dist_between_groups = min(
272      math.fabs(bounds_bad[1] - bounds_good[0]),
273      math.fabs(bounds_bad[0] - bounds_good[1]))
274
275  # Get the sum of the standard deviations of the two groups.
276  good_results_flattened = sum(good_results_lists, [])
277  bad_results_flattened = sum(bad_results_lists, [])
278  stddev_good = CalculateStandardDeviation(good_results_flattened)
279  stddev_bad = CalculateStandardDeviation(bad_results_flattened)
280  stddev_sum = stddev_good + stddev_bad
281
282  confidence = dist_between_groups / (max(0.0001, stddev_sum))
283  confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
284  return confidence
285
286
287def CalculateStandardDeviation(values):
288  """Calculates the sample standard deviation of the given list of values."""
289  if len(values) == 1:
290    return 0.0
291
292  mean = CalculateMean(values)
293  differences_from_mean = [float(x) - mean for x in values]
294  squared_differences = [float(x * x) for x in differences_from_mean]
295  variance = sum(squared_differences) / (len(values) - 1)
296  std_dev = math.sqrt(variance)
297
298  return std_dev
299
300
301def CalculateRelativeChange(before, after):
302  """Returns the relative change of before and after, relative to before.
303
304  There are several different ways to define relative difference between
305  two numbers; sometimes it is defined as relative to the smaller number,
306  or to the mean of the two numbers. This version returns the difference
307  relative to the first of the two numbers.
308
309  Args:
310    before: A number representing an earlier value.
311    after: Another number, representing a later value.
312
313  Returns:
314    A non-negative floating point number; 0.1 represents a 10% change.
315  """
316  if before == after:
317    return 0.0
318  if before == 0:
319    return float('nan')
320  difference = after - before
321  return math.fabs(difference / before)
322
323
324def CalculatePooledStandardError(work_sets):
325  numerator = 0.0
326  denominator1 = 0.0
327  denominator2 = 0.0
328
329  for current_set in work_sets:
330    std_dev = CalculateStandardDeviation(current_set)
331    numerator += (len(current_set) - 1) * std_dev ** 2
332    denominator1 += len(current_set) - 1
333    denominator2 += 1.0 / len(current_set)
334
335  if denominator1:
336    return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
337  return 0.0
338
339
340def CalculateStandardError(values):
341  """Calculates the standard error of a list of values."""
342  if len(values) <= 1:
343    return 0.0
344
345  std_dev = CalculateStandardDeviation(values)
346
347  return std_dev / math.sqrt(len(values))
348
349
350def IsStringFloat(string_to_check):
351  """Checks whether or not the given string can be converted to a floating
352  point number.
353
354  Args:
355    string_to_check: Input string to check if it can be converted to a float.
356
357  Returns:
358    True if the string can be converted to a float.
359  """
360  try:
361    float(string_to_check)
362
363    return True
364  except ValueError:
365    return False
366
367
368def IsStringInt(string_to_check):
369  """Checks whether or not the given string can be converted to a integer.
370
371  Args:
372    string_to_check: Input string to check if it can be converted to an int.
373
374  Returns:
375    True if the string can be converted to an int.
376  """
377  try:
378    int(string_to_check)
379
380    return True
381  except ValueError:
382    return False
383
384
385def IsWindows():
386  """Checks whether or not the script is running on Windows.
387
388  Returns:
389    True if running on Windows.
390  """
391  return sys.platform == 'cygwin' or sys.platform.startswith('win')
392
393
394def Is64BitWindows():
395  """Returns whether or not Windows is a 64-bit version.
396
397  Returns:
398    True if Windows is 64-bit, False if 32-bit.
399  """
400  platform = os.environ['PROCESSOR_ARCHITECTURE']
401  try:
402    platform = os.environ['PROCESSOR_ARCHITEW6432']
403  except KeyError:
404    # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
405    pass
406
407  return platform in ['AMD64', 'I64']
408
409
410def IsLinux():
411  """Checks whether or not the script is running on Linux.
412
413  Returns:
414    True if running on Linux.
415  """
416  return sys.platform.startswith('linux')
417
418
419def IsMac():
420  """Checks whether or not the script is running on Mac.
421
422  Returns:
423    True if running on Mac.
424  """
425  return sys.platform.startswith('darwin')
426
427
428def GetSHA1HexDigest(contents):
429  """Returns secured hash containing hexadecimal for the given contents."""
430  return hashlib.sha1(contents).hexdigest()
431
432
433def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
434  """Gets the archive file name for the given revision."""
435  def PlatformName():
436    """Return a string to be used in paths for the platform."""
437    if IsWindows():
438      # Build archive for x64 is still stored with 'win32'suffix
439      # (chromium_utils.PlatformName()).
440      if Is64BitWindows() and target_arch == 'x64':
441        return 'win32'
442      return 'win32'
443    if IsLinux():
444      return 'linux'
445    if IsMac():
446      return 'mac'
447    raise NotImplementedError('Unknown platform "%s".' % sys.platform)
448
449  base_name = 'full-build-%s' % PlatformName()
450  if not build_revision:
451    return base_name
452  if patch_sha:
453    build_revision = '%s_%s' % (build_revision , patch_sha)
454  return '%s_%s.zip' % (base_name, build_revision)
455
456
457def GetRemoteBuildPath(build_revision, target_arch='ia32', patch_sha=None):
458  """Compute the url to download the build from."""
459  def GetGSRootFolderName():
460    """Gets Google Cloud Storage root folder names"""
461    if IsWindows():
462      if Is64BitWindows() and target_arch == 'x64':
463        return 'Win x64 Builder'
464      return 'Win Builder'
465    if IsLinux():
466      return 'Linux Builder'
467    if IsMac():
468      return 'Mac Builder'
469    raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
470
471  base_filename = GetZipFileName(build_revision, target_arch, patch_sha)
472  builder_folder = GetGSRootFolderName()
473  return '%s/%s' % (builder_folder, base_filename)
474
475
476def FetchFromCloudStorage(bucket_name, source_path, destination_path):
477  """Fetches file(s) from the Google Cloud Storage.
478
479  Args:
480    bucket_name: Google Storage bucket name.
481    source_path: Source file path.
482    destination_path: Destination file path.
483
484  Returns:
485    Downloaded file path if exisits, otherwise None.
486  """
487  target_file = os.path.join(destination_path, os.path.basename(source_path))
488  try:
489    if cloud_storage.Exists(bucket_name, source_path):
490      print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
491      cloud_storage.Get(bucket_name, source_path, destination_path)
492      if os.path.exists(target_file):
493        return target_file
494    else:
495      print ('File gs://%s/%s not found in cloud storage.' % (
496          bucket_name, source_path))
497  except Exception as e:
498    print 'Something went wrong while fetching file from cloud: %s' % e
499    if os.path.exists(target_file):
500      os.remove(target_file)
501  return None
502
503
504# This is copied from Chromium's project build/scripts/common/chromium_utils.py.
505def MaybeMakeDirectory(*path):
506  """Creates an entire path, if it doesn't already exist."""
507  file_path = os.path.join(*path)
508  try:
509    os.makedirs(file_path)
510  except OSError, e:
511    if e.errno != errno.EEXIST:
512      return False
513  return True
514
515
516# This is copied from Chromium's project build/scripts/common/chromium_utils.py.
517def ExtractZip(filename, output_dir, verbose=True):
518  """ Extract the zip archive in the output directory."""
519  MaybeMakeDirectory(output_dir)
520
521  # On Linux and Mac, we use the unzip command as it will
522  # handle links and file bits (executable), which is much
523  # easier then trying to do that with ZipInfo options.
524  #
525  # The Mac Version of unzip unfortunately does not support Zip64, whereas
526  # the python module does, so we have to fallback to the python zip module
527  # on Mac if the filesize is greater than 4GB.
528  #
529  # On Windows, try to use 7z if it is installed, otherwise fall back to python
530  # zip module and pray we don't have files larger than 512MB to unzip.
531  unzip_cmd = None
532  if ((IsMac() and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
533      or IsLinux()):
534    unzip_cmd = ['unzip', '-o']
535  elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
536    unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
537
538  if unzip_cmd:
539    # Make sure path is absolute before changing directories.
540    filepath = os.path.abspath(filename)
541    saved_dir = os.getcwd()
542    os.chdir(output_dir)
543    command = unzip_cmd + [filepath]
544    result = RunProcess(command)
545    os.chdir(saved_dir)
546    if result:
547      raise IOError('unzip failed: %s => %s' % (str(command), result))
548  else:
549    assert IsWindows() or IsMac()
550    zf = zipfile.ZipFile(filename)
551    for name in zf.namelist():
552      if verbose:
553        print 'Extracting %s' % name
554      zf.extract(name, output_dir)
555      if IsMac():
556        # Restore permission bits.
557        os.chmod(os.path.join(output_dir, name),
558                 zf.getinfo(name).external_attr >> 16L)
559
560
561def RunProcess(command):
562  """Runs an arbitrary command.
563
564  If output from the call is needed, use RunProcessAndRetrieveOutput instead.
565
566  Args:
567    command: A list containing the command and args to execute.
568
569  Returns:
570    The return code of the call.
571  """
572  # On Windows, use shell=True to get PATH interpretation.
573  shell = IsWindows()
574  return subprocess.call(command, shell=shell)
575
576
577def RunProcessAndRetrieveOutput(command, cwd=None):
578  """Runs an arbitrary command, returning its output and return code.
579
580  Since output is collected via communicate(), there will be no output until
581  the call terminates. If you need output while the program runs (ie. so
582  that the buildbot doesn't terminate the script), consider RunProcess().
583
584  Args:
585    command: A list containing the command and args to execute.
586    cwd: A directory to change to while running the command. The command can be
587        relative to this directory. If this is None, the command will be run in
588        the current directory.
589
590  Returns:
591    A tuple of the output and return code.
592  """
593  if cwd:
594    original_cwd = os.getcwd()
595    os.chdir(cwd)
596
597  # On Windows, use shell=True to get PATH interpretation.
598  shell = IsWindows()
599  proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE)
600  (output, _) = proc.communicate()
601
602  if cwd:
603    os.chdir(original_cwd)
604
605  return (output, proc.returncode)
606
607
608def RunGit(command, cwd=None):
609  """Run a git subcommand, returning its output and return code.
610
611  Args:
612    command: A list containing the args to git.
613    cwd: A directory to change to while running the git command (optional).
614
615  Returns:
616    A tuple of the output and return code.
617  """
618  command = ['git'] + command
619
620  return RunProcessAndRetrieveOutput(command, cwd=cwd)
621
622
623def CheckRunGit(command, cwd=None):
624  """Run a git subcommand, returning its output and return code. Asserts if
625  the return code of the call is non-zero.
626
627  Args:
628    command: A list containing the args to git.
629
630  Returns:
631    A tuple of the output and return code.
632  """
633  (output, return_code) = RunGit(command, cwd=cwd)
634
635  assert not return_code, 'An error occurred while running'\
636                          ' "git %s"' % ' '.join(command)
637  return output
638
639
640def SetBuildSystemDefault(build_system, use_goma):
641  """Sets up any environment variables needed to build with the specified build
642  system.
643
644  Args:
645    build_system: A string specifying build system. Currently only 'ninja' or
646        'make' are supported."""
647  if build_system == 'ninja':
648    gyp_var = os.getenv('GYP_GENERATORS')
649
650    if not gyp_var or not 'ninja' in gyp_var:
651      if gyp_var:
652        os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
653      else:
654        os.environ['GYP_GENERATORS'] = 'ninja'
655
656      if IsWindows():
657        os.environ['GYP_DEFINES'] = 'component=shared_library '\
658            'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
659            'chromium_win_pch=0'
660
661  elif build_system == 'make':
662    os.environ['GYP_GENERATORS'] = 'make'
663  else:
664    raise RuntimeError('%s build not supported.' % build_system)
665
666  if use_goma:
667    os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', ''),
668                                              'use_goma=1')
669
670
671def BuildWithMake(threads, targets, build_type='Release'):
672  cmd = ['make', 'BUILDTYPE=%s' % build_type]
673
674  if threads:
675    cmd.append('-j%d' % threads)
676
677  cmd += targets
678
679  return_code = RunProcess(cmd)
680
681  return not return_code
682
683
684def BuildWithNinja(threads, targets, build_type='Release'):
685  cmd = ['ninja', '-C', os.path.join('out', build_type)]
686
687  if threads:
688    cmd.append('-j%d' % threads)
689
690  cmd += targets
691
692  return_code = RunProcess(cmd)
693
694  return not return_code
695
696
697def BuildWithVisualStudio(targets, build_type='Release'):
698  path_to_devenv = os.path.abspath(
699      os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
700  path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
701  cmd = [path_to_devenv, '/build', build_type, path_to_sln]
702
703  for t in targets:
704    cmd.extend(['/Project', t])
705
706  return_code = RunProcess(cmd)
707
708  return not return_code
709
710
711def WriteStringToFile(text, file_name):
712  try:
713    with open(file_name, "wb") as f:
714      f.write(text)
715  except IOError as e:
716    raise RuntimeError('Error writing to file [%s]' % file_name )
717
718
719def ReadStringFromFile(file_name):
720  try:
721    with open(file_name) as f:
722      return f.read()
723  except IOError as e:
724    raise RuntimeError('Error reading file [%s]' % file_name )
725
726
727def ChangeBackslashToSlashInPatch(diff_text):
728  """Formats file paths in the given text to unix-style paths."""
729  if diff_text:
730    diff_lines = diff_text.split('\n')
731    for i in range(len(diff_lines)):
732      if (diff_lines[i].startswith('--- ') or
733          diff_lines[i].startswith('+++ ')):
734        diff_lines[i] = diff_lines[i].replace('\\', '/')
735    return '\n'.join(diff_lines)
736  return None
737
738
739class Builder(object):
740  """Builder is used by the bisect script to build relevant targets and deploy.
741  """
742  def __init__(self, opts):
743    """Performs setup for building with target build system.
744
745    Args:
746        opts: Options parsed from command line.
747    """
748    if IsWindows():
749      if not opts.build_preference:
750        opts.build_preference = 'msvs'
751
752      if opts.build_preference == 'msvs':
753        if not os.getenv('VS100COMNTOOLS'):
754          raise RuntimeError(
755              'Path to visual studio could not be determined.')
756      else:
757        SetBuildSystemDefault(opts.build_preference, opts.use_goma)
758    else:
759      if not opts.build_preference:
760        if 'ninja' in os.getenv('GYP_GENERATORS'):
761          opts.build_preference = 'ninja'
762        else:
763          opts.build_preference = 'make'
764
765      SetBuildSystemDefault(opts.build_preference, opts.use_goma)
766
767    if not bisect_utils.SetupPlatformBuildEnvironment(opts):
768      raise RuntimeError('Failed to set platform environment.')
769
770  @staticmethod
771  def FromOpts(opts):
772    builder = None
773    if opts.target_platform == 'cros':
774      builder = CrosBuilder(opts)
775    elif opts.target_platform == 'android':
776      builder = AndroidBuilder(opts)
777    elif opts.target_platform == 'android-chrome':
778      builder = AndroidChromeBuilder(opts)
779    else:
780      builder = DesktopBuilder(opts)
781    return builder
782
783  def Build(self, depot, opts):
784    raise NotImplementedError()
785
786  def GetBuildOutputDirectory(self, opts, src_dir=None):
787    raise NotImplementedError()
788
789
790class DesktopBuilder(Builder):
791  """DesktopBuilder is used to build Chromium on linux/mac/windows."""
792  def __init__(self, opts):
793    super(DesktopBuilder, self).__init__(opts)
794
795  def Build(self, depot, opts):
796    """Builds chromium_builder_perf target using options passed into
797    the script.
798
799    Args:
800        depot: Current depot being bisected.
801        opts: The options parsed from the command line.
802
803    Returns:
804        True if build was successful.
805    """
806    targets = ['chromium_builder_perf']
807
808    threads = None
809    if opts.use_goma:
810      threads = 64
811
812    build_success = False
813    if opts.build_preference == 'make':
814      build_success = BuildWithMake(threads, targets, opts.target_build_type)
815    elif opts.build_preference == 'ninja':
816      build_success = BuildWithNinja(threads, targets, opts.target_build_type)
817    elif opts.build_preference == 'msvs':
818      assert IsWindows(), 'msvs is only supported on Windows.'
819      build_success = BuildWithVisualStudio(targets, opts.target_build_type)
820    else:
821      assert False, 'No build system defined.'
822    return build_success
823
824  def GetBuildOutputDirectory(self, opts, src_dir=None):
825    """Returns the path to the build directory, relative to the checkout root.
826
827      Assumes that the current working directory is the checkout root.
828    """
829    src_dir = src_dir or 'src'
830    if opts.build_preference == 'ninja' or IsLinux():
831      return os.path.join(src_dir, 'out')
832    if IsMac():
833      return os.path.join(src_dir, 'xcodebuild')
834    if IsWindows():
835      return os.path.join(src_dir, 'build')
836    raise NotImplementedError('Unexpected platform %s' % sys.platform)
837
838
839class AndroidBuilder(Builder):
840  """AndroidBuilder is used to build on android."""
841  def __init__(self, opts):
842    super(AndroidBuilder, self).__init__(opts)
843
844  def _GetTargets(self):
845    return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
846
847  def Build(self, depot, opts):
848    """Builds the android content shell and other necessary tools using options
849    passed into the script.
850
851    Args:
852        depot: Current depot being bisected.
853        opts: The options parsed from the command line.
854
855    Returns:
856        True if build was successful.
857    """
858    threads = None
859    if opts.use_goma:
860      threads = 64
861
862    build_success = False
863    if opts.build_preference == 'ninja':
864      build_success = BuildWithNinja(
865          threads, self._GetTargets(), opts.target_build_type)
866    else:
867      assert False, 'No build system defined.'
868
869    return build_success
870
871
872class AndroidChromeBuilder(AndroidBuilder):
873  """AndroidBuilder is used to build on android's chrome."""
874  def __init__(self, opts):
875    super(AndroidChromeBuilder, self).__init__(opts)
876
877  def _GetTargets(self):
878    return AndroidBuilder._GetTargets(self) + ['chrome_apk']
879
880
881class CrosBuilder(Builder):
882  """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
883  target platform."""
884  def __init__(self, opts):
885    super(CrosBuilder, self).__init__(opts)
886
887  def ImageToTarget(self, opts):
888    """Installs latest image to target specified by opts.cros_remote_ip.
889
890    Args:
891        opts: Program options containing cros_board and cros_remote_ip.
892
893    Returns:
894        True if successful.
895    """
896    try:
897      # Keys will most likely be set to 0640 after wiping the chroot.
898      os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
899      os.chmod(CROS_TEST_KEY_PATH, 0600)
900      cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
901             '--remote=%s' % opts.cros_remote_ip,
902             '--board=%s' % opts.cros_board, '--test', '--verbose']
903
904      return_code = RunProcess(cmd)
905      return not return_code
906    except OSError, e:
907      return False
908
909  def BuildPackages(self, opts, depot):
910    """Builds packages for cros.
911
912    Args:
913        opts: Program options containing cros_board.
914        depot: The depot being bisected.
915
916    Returns:
917        True if successful.
918    """
919    cmd = [CROS_SDK_PATH]
920
921    if depot != 'cros':
922      path_to_chrome = os.path.join(os.getcwd(), '..')
923      cmd += ['--chrome_root=%s' % path_to_chrome]
924
925    cmd += ['--']
926
927    if depot != 'cros':
928      cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
929
930    cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
931        '--board=%s' % opts.cros_board]
932    return_code = RunProcess(cmd)
933
934    return not return_code
935
936  def BuildImage(self, opts, depot):
937    """Builds test image for cros.
938
939    Args:
940        opts: Program options containing cros_board.
941        depot: The depot being bisected.
942
943    Returns:
944        True if successful.
945    """
946    cmd = [CROS_SDK_PATH]
947
948    if depot != 'cros':
949      path_to_chrome = os.path.join(os.getcwd(), '..')
950      cmd += ['--chrome_root=%s' % path_to_chrome]
951
952    cmd += ['--']
953
954    if depot != 'cros':
955      cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
956
957    cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
958        '--board=%s' % opts.cros_board, 'test']
959
960    return_code = RunProcess(cmd)
961
962    return not return_code
963
964  def Build(self, depot, opts):
965    """Builds targets using options passed into the script.
966
967    Args:
968        depot: Current depot being bisected.
969        opts: The options parsed from the command line.
970
971    Returns:
972        True if build was successful.
973    """
974    if self.BuildPackages(opts, depot):
975      if self.BuildImage(opts, depot):
976        return self.ImageToTarget(opts)
977    return False
978
979
980class SourceControl(object):
981  """SourceControl is an abstraction over the underlying source control
982  system used for chromium. For now only git is supported, but in the
983  future, the svn workflow could be added as well."""
984  def __init__(self):
985    super(SourceControl, self).__init__()
986
987  def SyncToRevisionWithGClient(self, revision):
988    """Uses gclient to sync to the specified revision.
989
990    ie. gclient sync --revision <revision>
991
992    Args:
993      revision: The git SHA1 or svn CL (depending on workflow).
994
995    Returns:
996      The return code of the call.
997    """
998    return bisect_utils.RunGClient(['sync', '--verbose', '--reset', '--force',
999        '--delete_unversioned_trees', '--nohooks', '--revision', revision])
1000
1001  def SyncToRevisionWithRepo(self, timestamp):
1002    """Uses repo to sync all the underlying git depots to the specified
1003    time.
1004
1005    Args:
1006      timestamp: The unix timestamp to sync to.
1007
1008    Returns:
1009      The return code of the call.
1010    """
1011    return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
1012
1013
1014class GitSourceControl(SourceControl):
1015  """GitSourceControl is used to query the underlying source control. """
1016  def __init__(self, opts):
1017    super(GitSourceControl, self).__init__()
1018    self.opts = opts
1019
1020  def IsGit(self):
1021    return True
1022
1023  def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
1024    """Retrieves a list of revisions between |revision_range_start| and
1025    |revision_range_end|.
1026
1027    Args:
1028      revision_range_end: The SHA1 for the end of the range.
1029      revision_range_start: The SHA1 for the beginning of the range.
1030
1031    Returns:
1032      A list of the revisions between |revision_range_start| and
1033      |revision_range_end| (inclusive).
1034    """
1035    revision_range = '%s..%s' % (revision_range_start, revision_range_end)
1036    cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
1037    log_output = CheckRunGit(cmd, cwd=cwd)
1038
1039    revision_hash_list = log_output.split()
1040    revision_hash_list.append(revision_range_start)
1041
1042    return revision_hash_list
1043
1044  def SyncToRevision(self, revision, sync_client=None):
1045    """Syncs to the specified revision.
1046
1047    Args:
1048      revision: The revision to sync to.
1049      use_gclient: Specifies whether or not we should sync using gclient or
1050        just use source control directly.
1051
1052    Returns:
1053      True if successful.
1054    """
1055
1056    if not sync_client:
1057      results = RunGit(['checkout', revision])[1]
1058    elif sync_client == 'gclient':
1059      results = self.SyncToRevisionWithGClient(revision)
1060    elif sync_client == 'repo':
1061      results = self.SyncToRevisionWithRepo(revision)
1062
1063    return not results
1064
1065  def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
1066    """If an SVN revision is supplied, try to resolve it to a git SHA1.
1067
1068    Args:
1069      revision_to_check: The user supplied revision string that may need to be
1070        resolved to a git SHA1.
1071      depot: The depot the revision_to_check is from.
1072      search: The number of changelists to try if the first fails to resolve
1073        to a git hash. If the value is negative, the function will search
1074        backwards chronologically, otherwise it will search forward.
1075
1076    Returns:
1077      A string containing a git SHA1 hash, otherwise None.
1078    """
1079    # Android-chrome is git only, so no need to resolve this to anything else.
1080    if depot == 'android-chrome':
1081      return revision_to_check
1082
1083    if depot != 'cros':
1084      if not IsStringInt(revision_to_check):
1085        return revision_to_check
1086
1087      depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
1088
1089      if depot != 'chromium':
1090        depot_svn = DEPOT_DEPS_NAME[depot]['svn']
1091
1092      svn_revision = int(revision_to_check)
1093      git_revision = None
1094
1095      if search > 0:
1096        search_range = xrange(svn_revision, svn_revision + search, 1)
1097      else:
1098        search_range = xrange(svn_revision, svn_revision + search, -1)
1099
1100      for i in search_range:
1101        svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
1102        cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
1103               'origin/master']
1104
1105        (log_output, return_code) = RunGit(cmd, cwd=cwd)
1106
1107        assert not return_code, 'An error occurred while running'\
1108                                ' "git %s"' % ' '.join(cmd)
1109
1110        if not return_code:
1111          log_output = log_output.strip()
1112
1113          if log_output:
1114            git_revision = log_output
1115
1116            break
1117
1118      return git_revision
1119    else:
1120      if IsStringInt(revision_to_check):
1121        return int(revision_to_check)
1122      else:
1123        cwd = os.getcwd()
1124        os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
1125            'chromiumos-overlay'))
1126        pattern = CROS_VERSION_PATTERN % revision_to_check
1127        cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
1128
1129        git_revision = None
1130
1131        log_output = CheckRunGit(cmd, cwd=cwd)
1132        if log_output:
1133          git_revision = log_output
1134          git_revision = int(log_output.strip())
1135        os.chdir(cwd)
1136
1137        return git_revision
1138
1139  def IsInProperBranch(self):
1140    """Confirms they're in the master branch for performing the bisection.
1141    This is needed or gclient will fail to sync properly.
1142
1143    Returns:
1144      True if the current branch on src is 'master'
1145    """
1146    cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
1147    log_output = CheckRunGit(cmd)
1148    log_output = log_output.strip()
1149
1150    return log_output == "master"
1151
1152  def SVNFindRev(self, revision, cwd=None):
1153    """Maps directly to the 'git svn find-rev' command.
1154
1155    Args:
1156      revision: The git SHA1 to use.
1157
1158    Returns:
1159      An integer changelist #, otherwise None.
1160    """
1161
1162    cmd = ['svn', 'find-rev', revision]
1163
1164    output = CheckRunGit(cmd, cwd)
1165    svn_revision = output.strip()
1166
1167    if IsStringInt(svn_revision):
1168      return int(svn_revision)
1169
1170    return None
1171
1172  def QueryRevisionInfo(self, revision, cwd=None):
1173    """Gathers information on a particular revision, such as author's name,
1174    email, subject, and date.
1175
1176    Args:
1177      revision: Revision you want to gather information on.
1178    Returns:
1179      A dict in the following format:
1180      {
1181        'author': %s,
1182        'email': %s,
1183        'date': %s,
1184        'subject': %s,
1185        'body': %s,
1186      }
1187    """
1188    commit_info = {}
1189
1190    formats = ['%cN', '%cE', '%s', '%cD', '%b']
1191    targets = ['author', 'email', 'subject', 'date', 'body']
1192
1193    for i in xrange(len(formats)):
1194      cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1195      output = CheckRunGit(cmd, cwd=cwd)
1196      commit_info[targets[i]] = output.rstrip()
1197
1198    return commit_info
1199
1200  def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1201    """Performs a checkout on a file at the given revision.
1202
1203    Returns:
1204      True if successful.
1205    """
1206    return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1207
1208  def RevertFileToHead(self, file_name):
1209    """Unstages a file and returns it to HEAD.
1210
1211    Returns:
1212      True if successful.
1213    """
1214    # Reset doesn't seem to return 0 on success.
1215    RunGit(['reset', 'HEAD', file_name])
1216
1217    return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1218
1219  def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1220    """Returns a list of commits that modified this file.
1221
1222    Args:
1223        filename: Name of file.
1224        revision_start: Start of revision range.
1225        revision_end: End of revision range.
1226
1227    Returns:
1228        Returns a list of commits that touched this file.
1229    """
1230    cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1231           filename]
1232    output = CheckRunGit(cmd)
1233
1234    return [o for o in output.split('\n') if o]
1235
1236
1237class BisectPerformanceMetrics(object):
1238  """This class contains functionality to perform a bisection of a range of
1239  revisions to narrow down where performance regressions may have occurred.
1240
1241  The main entry-point is the Run method.
1242  """
1243
1244  def __init__(self, source_control, opts):
1245    super(BisectPerformanceMetrics, self).__init__()
1246
1247    self.opts = opts
1248    self.source_control = source_control
1249    self.src_cwd = os.getcwd()
1250    self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1251    self.depot_cwd = {}
1252    self.cleanup_commands = []
1253    self.warnings = []
1254    self.builder = Builder.FromOpts(opts)
1255
1256    # This always starts true since the script grabs latest first.
1257    self.was_blink = True
1258
1259    for d in DEPOT_NAMES:
1260      # The working directory of each depot is just the path to the depot, but
1261      # since we're already in 'src', we can skip that part.
1262
1263      self.depot_cwd[d] = os.path.join(
1264          self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1265
1266  def PerformCleanup(self):
1267    """Performs cleanup when script is finished."""
1268    os.chdir(self.src_cwd)
1269    for c in self.cleanup_commands:
1270      if c[0] == 'mv':
1271        shutil.move(c[1], c[2])
1272      else:
1273        assert False, 'Invalid cleanup command.'
1274
1275  def GetRevisionList(self, depot, bad_revision, good_revision):
1276    """Retrieves a list of all the commits between the bad revision and
1277    last known good revision."""
1278
1279    revision_work_list = []
1280
1281    if depot == 'cros':
1282      revision_range_start = good_revision
1283      revision_range_end = bad_revision
1284
1285      cwd = os.getcwd()
1286      self.ChangeToDepotWorkingDirectory('cros')
1287
1288      # Print the commit timestamps for every commit in the revision time
1289      # range. We'll sort them and bisect by that. There is a remote chance that
1290      # 2 (or more) commits will share the exact same timestamp, but it's
1291      # probably safe to ignore that case.
1292      cmd = ['repo', 'forall', '-c',
1293          'git log --format=%%ct --before=%d --after=%d' % (
1294          revision_range_end, revision_range_start)]
1295      (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1296
1297      assert not return_code, 'An error occurred while running'\
1298                              ' "%s"' % ' '.join(cmd)
1299
1300      os.chdir(cwd)
1301
1302      revision_work_list = list(set(
1303          [int(o) for o in output.split('\n') if IsStringInt(o)]))
1304      revision_work_list = sorted(revision_work_list, reverse=True)
1305    else:
1306      cwd = self._GetDepotDirectory(depot)
1307      revision_work_list = self.source_control.GetRevisionList(bad_revision,
1308          good_revision, cwd=cwd)
1309
1310    return revision_work_list
1311
1312  def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1313    svn_revision = self.source_control.SVNFindRev(revision)
1314
1315    if IsStringInt(svn_revision):
1316      # V8 is tricky to bisect, in that there are only a few instances when
1317      # we can dive into bleeding_edge and get back a meaningful result.
1318      # Try to detect a V8 "business as usual" case, which is when:
1319      #  1. trunk revision N has description "Version X.Y.Z"
1320      #  2. bleeding_edge revision (N-1) has description "Prepare push to
1321      #     trunk. Now working on X.Y.(Z+1)."
1322      #
1323      # As of 01/24/2014, V8 trunk descriptions are formatted:
1324      # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1325      # So we can just try parsing that out first and fall back to the old way.
1326      v8_dir = self._GetDepotDirectory('v8')
1327      v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1328
1329      revision_info = self.source_control.QueryRevisionInfo(revision,
1330          cwd=v8_dir)
1331
1332      version_re = re.compile("Version (?P<values>[0-9,.]+)")
1333
1334      regex_results = version_re.search(revision_info['subject'])
1335
1336      if regex_results:
1337        git_revision = None
1338
1339        # Look for "based on bleeding_edge" and parse out revision
1340        if 'based on bleeding_edge' in revision_info['subject']:
1341          try:
1342            bleeding_edge_revision = revision_info['subject'].split(
1343                'bleeding_edge revision r')[1]
1344            bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1345            git_revision = self.source_control.ResolveToRevision(
1346                bleeding_edge_revision, 'v8_bleeding_edge', 1,
1347                cwd=v8_bleeding_edge_dir)
1348            return git_revision
1349          except (IndexError, ValueError):
1350            pass
1351
1352        if not git_revision:
1353          # Wasn't successful, try the old way of looking for "Prepare push to"
1354          git_revision = self.source_control.ResolveToRevision(
1355              int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1356              cwd=v8_bleeding_edge_dir)
1357
1358          if git_revision:
1359            revision_info = self.source_control.QueryRevisionInfo(git_revision,
1360                cwd=v8_bleeding_edge_dir)
1361
1362            if 'Prepare push to trunk' in revision_info['subject']:
1363              return git_revision
1364    return None
1365
1366  def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1367    cwd = self._GetDepotDirectory('v8')
1368    cmd = ['log', '--format=%ct', '-1', revision]
1369    output = CheckRunGit(cmd, cwd=cwd)
1370    commit_time = int(output)
1371    commits = []
1372
1373    if search_forward:
1374      cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1375          'origin/master']
1376      output = CheckRunGit(cmd, cwd=cwd)
1377      output = output.split()
1378      commits = output
1379      commits = reversed(commits)
1380    else:
1381      cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1382          'origin/master']
1383      output = CheckRunGit(cmd, cwd=cwd)
1384      output = output.split()
1385      commits = output
1386
1387    bleeding_edge_revision = None
1388
1389    for c in commits:
1390      bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1391      if bleeding_edge_revision:
1392        break
1393
1394    return bleeding_edge_revision
1395
1396  def _ParseRevisionsFromDEPSFileManually(self, deps_file_contents):
1397    """Manually parses the vars section of the DEPS file to determine
1398    chromium/blink/etc... revisions.
1399
1400    Returns:
1401      A dict in the format {depot:revision} if successful, otherwise None.
1402    """
1403    # We'll parse the "vars" section of the DEPS file.
1404    rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
1405    re_results = rxp.search(deps_file_contents)
1406    locals = {}
1407
1408    if not re_results:
1409      return None
1410
1411    # We should be left with a series of entries in the vars component of
1412    # the DEPS file with the following format:
1413    # 'depot_name': 'revision',
1414    vars_body = re_results.group('vars_body')
1415    rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1416                     re.MULTILINE)
1417    re_results = rxp.findall(vars_body)
1418
1419    return dict(re_results)
1420
1421  def _ParseRevisionsFromDEPSFile(self, depot):
1422    """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1423    be needed if the bisect recurses into those depots later.
1424
1425    Args:
1426      depot: Depot being bisected.
1427
1428    Returns:
1429      A dict in the format {depot:revision} if successful, otherwise None.
1430    """
1431    try:
1432      deps_data = {'Var': lambda _: deps_data["vars"][_],
1433                   'From': lambda *args: None
1434                  }
1435      execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1436      deps_data = deps_data['deps']
1437
1438      rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1439      results = {}
1440      for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1441        if (depot_data.get('platform') and
1442            depot_data.get('platform') != os.name):
1443          continue
1444
1445        if (depot_data.get('recurse') and depot in depot_data.get('from')):
1446          depot_data_src = depot_data.get('src') or depot_data.get('src_old')
1447          src_dir = deps_data.get(depot_data_src)
1448          if src_dir:
1449            self.depot_cwd[depot_name] = os.path.join(self.src_cwd,
1450                                                      depot_data_src[4:])
1451            re_results = rxp.search(src_dir)
1452            if re_results:
1453              results[depot_name] = re_results.group('revision')
1454            else:
1455              warning_text = ('Couldn\'t parse revision for %s while bisecting '
1456                              '%s' % (depot_name, depot))
1457              if not warning_text in self.warnings:
1458                self.warnings.append(warning_text)
1459          else:
1460            results[depot_name] = None
1461      return results
1462    except ImportError:
1463      deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1464      parse_results = self._ParseRevisionsFromDEPSFileManually(
1465          deps_file_contents)
1466      results = {}
1467      for depot_name, depot_revision in parse_results.iteritems():
1468        depot_revision = depot_revision.strip('@')
1469        print depot_name, depot_revision
1470        for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1471          if (current_data.has_key('deps_var') and
1472              current_data['deps_var'] == depot_name):
1473            src_name = current_name
1474            results[src_name] = depot_revision
1475            break
1476      return results
1477
1478  def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1479    """Parses the DEPS file to determine WebKit/v8/etc... versions.
1480
1481    Returns:
1482      A dict in the format {depot:revision} if successful, otherwise None.
1483    """
1484    cwd = os.getcwd()
1485    self.ChangeToDepotWorkingDirectory(depot)
1486
1487    results = {}
1488
1489    if depot == 'chromium' or depot == 'android-chrome':
1490      results = self._ParseRevisionsFromDEPSFile(depot)
1491      os.chdir(cwd)
1492    elif depot == 'cros':
1493      cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1494             'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1495             CROS_CHROMEOS_PATTERN]
1496      (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1497
1498      assert not return_code, 'An error occurred while running' \
1499                              ' "%s"' % ' '.join(cmd)
1500
1501      if len(output) > CROS_CHROMEOS_PATTERN:
1502        output = output[len(CROS_CHROMEOS_PATTERN):]
1503
1504      if len(output) > 1:
1505        output = output.split('_')[0]
1506
1507        if len(output) > 3:
1508          contents = output.split('.')
1509
1510          version = contents[2]
1511
1512          if contents[3] != '0':
1513            warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1514                (version, contents[3], version)
1515            if not warningText in self.warnings:
1516              self.warnings.append(warningText)
1517
1518          cwd = os.getcwd()
1519          self.ChangeToDepotWorkingDirectory('chromium')
1520          return_code = CheckRunGit(['log', '-1', '--format=%H',
1521              '--author=chrome-release@google.com', '--grep=to %s' % version,
1522              'origin/master'])
1523          os.chdir(cwd)
1524
1525          results['chromium'] = output.strip()
1526    elif depot == 'v8':
1527      # We can't try to map the trunk revision to bleeding edge yet, because
1528      # we don't know which direction to try to search in. Have to wait until
1529      # the bisect has narrowed the results down to 2 v8 rolls.
1530      results['v8_bleeding_edge'] = None
1531
1532    return results
1533
1534  def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1535    """Backs up or restores build output directory based on restore argument.
1536
1537    Args:
1538      restore: Indicates whether to restore or backup. Default is False(Backup)
1539      build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1540
1541    Returns:
1542      Path to backup or restored location as string. otherwise None if it fails.
1543    """
1544    build_dir = os.path.abspath(
1545        self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1546    source_dir = os.path.join(build_dir, build_type)
1547    destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1548    if restore:
1549      source_dir, destination_dir = destination_dir, source_dir
1550    if os.path.exists(source_dir):
1551      RmTreeAndMkDir(destination_dir, skip_makedir=True)
1552      shutil.move(source_dir, destination_dir)
1553      return destination_dir
1554    return None
1555
1556  def GetBuildArchiveForRevision(self, revision, gs_bucket, target_arch,
1557                                 patch_sha, out_dir):
1558    """Checks and downloads build archive for a given revision.
1559
1560    Checks for build archive with Git hash or SVN revision. If either of the
1561    file exists, then downloads the archive file.
1562
1563    Args:
1564      revision: A Git hash revision.
1565      gs_bucket: Cloud storage bucket name
1566      target_arch: 32 or 64 bit build target
1567      patch: A DEPS patch (used while bisecting 3rd party repositories).
1568      out_dir: Build output directory where downloaded file is stored.
1569
1570    Returns:
1571      Downloaded archive file path if exists, otherwise None.
1572    """
1573    # Source archive file path on cloud storage using Git revision.
1574    source_file = GetRemoteBuildPath(revision, target_arch, patch_sha)
1575    downloaded_archive = FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1576    if not downloaded_archive:
1577      # Get SVN revision for the given SHA.
1578      svn_revision = self.source_control.SVNFindRev(revision)
1579      if svn_revision:
1580        # Source archive file path on cloud storage using SVN revision.
1581        source_file = GetRemoteBuildPath(svn_revision, target_arch, patch_sha)
1582        return FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1583    return downloaded_archive
1584
1585  def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1586    """Downloads the build archive for the given revision.
1587
1588    Args:
1589      revision: The Git revision to download or build.
1590      build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1591      patch: A DEPS patch (used while bisecting 3rd party repositories).
1592
1593    Returns:
1594      True if download succeeds, otherwise False.
1595    """
1596    patch_sha = None
1597    if patch:
1598      # Get the SHA of the DEPS changes patch.
1599      patch_sha = GetSHA1HexDigest(patch)
1600
1601      # Update the DEPS changes patch with a patch to create a new file named
1602      # 'DEPS.sha' and add patch_sha evaluated above to it.
1603      patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1604
1605    # Get Build output directory
1606    abs_build_dir = os.path.abspath(
1607        self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1608
1609    fetch_build_func = lambda: self.GetBuildArchiveForRevision(
1610      revision, self.opts.gs_bucket, self.opts.target_arch,
1611      patch_sha, abs_build_dir)
1612
1613    # Downloaded archive file path, downloads build archive for given revision.
1614    downloaded_file = fetch_build_func()
1615
1616    # When build archive doesn't exists, post a build request to tryserver
1617    # and wait for the build to be produced.
1618    if not downloaded_file:
1619      downloaded_file = self.PostBuildRequestAndWait(
1620          revision, fetch_build=fetch_build_func, patch=patch)
1621      if not downloaded_file:
1622        return False
1623
1624    # Generic name for the archive, created when archive file is extracted.
1625    output_dir = os.path.join(
1626        abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1627    # Unzip build archive directory.
1628    try:
1629      RmTreeAndMkDir(output_dir, skip_makedir=True)
1630      ExtractZip(downloaded_file, abs_build_dir)
1631      if os.path.exists(output_dir):
1632        self.BackupOrRestoreOutputdirectory(restore=False)
1633        # Build output directory based on target(e.g. out/Release, out/Debug).
1634        target_build_output_dir = os.path.join(abs_build_dir, build_type)
1635        print 'Moving build from %s to %s' % (
1636            output_dir, target_build_output_dir)
1637        shutil.move(output_dir, target_build_output_dir)
1638        return True
1639      raise IOError('Missing extracted folder %s ' % output_dir)
1640    except Exception as e:
1641      print 'Somewthing went wrong while extracting archive file: %s' % e
1642      self.BackupOrRestoreOutputdirectory(restore=True)
1643      # Cleanup any leftovers from unzipping.
1644      if os.path.exists(output_dir):
1645        RmTreeAndMkDir(output_dir, skip_makedir=True)
1646    finally:
1647      # Delete downloaded archive
1648      if os.path.exists(downloaded_file):
1649        os.remove(downloaded_file)
1650    return False
1651
1652  def WaitUntilBuildIsReady(self, fetch_build, bot_name, builder_host,
1653                            builder_port, build_request_id, max_timeout):
1654    """Waits until build is produced by bisect builder on tryserver.
1655
1656    Args:
1657      fetch_build: Function to check and download build from cloud storage.
1658      bot_name: Builder bot name on tryserver.
1659      builder_host Tryserver hostname.
1660      builder_port: Tryserver port.
1661      build_request_id: A unique ID of the build request posted to tryserver.
1662      max_timeout: Maximum time to wait for the build.
1663
1664    Returns:
1665       Downloaded archive file path if exists, otherwise None.
1666    """
1667    # Build number on the tryserver.
1668    build_num = None
1669    # Interval to check build on cloud storage.
1670    poll_interval = 60
1671    # Interval to check build status on tryserver.
1672    status_check_interval = 600
1673    last_status_check = time.time()
1674    start_time = time.time()
1675    while True:
1676      # Checks for build on gs://chrome-perf and download if exists.
1677      res = fetch_build()
1678      if res:
1679        return (res, 'Build successfully found')
1680      elapsed_status_check = time.time() - last_status_check
1681      # To avoid overloading tryserver with status check requests, we check
1682      # build status for every 10 mins.
1683      if elapsed_status_check > status_check_interval:
1684        last_status_check = time.time()
1685        if not build_num:
1686          # Get the build number on tryserver for the current build.
1687          build_num = bisect_builder.GetBuildNumFromBuilder(
1688              build_request_id, bot_name, builder_host, builder_port)
1689        # Check the status of build using the build number.
1690        # Note: Build is treated as PENDING if build number is not found
1691        # on the the tryserver.
1692        build_status, status_link = bisect_builder.GetBuildStatus(
1693            build_num, bot_name, builder_host, builder_port)
1694        if build_status == bisect_builder.FAILED:
1695          return (None, 'Failed to produce build, log: %s' % status_link)
1696      elapsed_time = time.time() - start_time
1697      if elapsed_time > max_timeout:
1698        return (None, 'Timed out: %ss without build' % max_timeout)
1699
1700      print 'Time elapsed: %ss without build.' % elapsed_time
1701      time.sleep(poll_interval)
1702
1703  def PostBuildRequestAndWait(self, revision, fetch_build, patch=None):
1704    """POSTs the build request job to the tryserver instance.
1705
1706    A try job build request is posted to tryserver.chromium.perf master,
1707    and waits for the binaries to be produced and archived on cloud storage.
1708    Once the build is ready and stored onto cloud, build archive is downloaded
1709    into the output folder.
1710
1711    Args:
1712      revision: A Git hash revision.
1713      fetch_build: Function to check and download build from cloud storage.
1714      patch: A DEPS patch (used while bisecting 3rd party repositories).
1715
1716    Returns:
1717      Downloaded archive file path when requested build exists and download is
1718      successful, otherwise None.
1719    """
1720    # Get SVN revision for the given SHA.
1721    svn_revision = self.source_control.SVNFindRev(revision)
1722    if not svn_revision:
1723      raise RuntimeError(
1724          'Failed to determine SVN revision for %s' % revision)
1725
1726    def GetBuilderNameAndBuildTime(target_arch='ia32'):
1727      """Gets builder bot name and buildtime in seconds based on platform."""
1728      # Bot names should match the one listed in tryserver.chromium's
1729      # master.cfg which produces builds for bisect.
1730      if IsWindows():
1731        if Is64BitWindows() and target_arch == 'x64':
1732          return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1733        return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1734      if IsLinux():
1735        return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1736      if IsMac():
1737        return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1738      raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1739    if not fetch_build:
1740      return False
1741
1742    bot_name, build_timeout = GetBuilderNameAndBuildTime(self.opts.target_arch)
1743    builder_host = self.opts.builder_host
1744    builder_port = self.opts.builder_port
1745    # Create a unique ID for each build request posted to tryserver builders.
1746    # This ID is added to "Reason" property in build's json.
1747    build_request_id = GetSHA1HexDigest(
1748        '%s-%s-%s' % (svn_revision, patch, time.time()))
1749
1750    # Creates a try job description.
1751    job_args = {'host': builder_host,
1752                'port': builder_port,
1753                'revision': 'src@%s' % svn_revision,
1754                'bot': bot_name,
1755                'name': build_request_id
1756               }
1757    # Update patch information if supplied.
1758    if patch:
1759      job_args['patch'] = patch
1760    # Posts job to build the revision on the server.
1761    if bisect_builder.PostTryJob(job_args):
1762      target_file, error_msg = self.WaitUntilBuildIsReady(fetch_build,
1763                                                          bot_name,
1764                                                          builder_host,
1765                                                          builder_port,
1766                                                          build_request_id,
1767                                                          build_timeout)
1768      if not target_file:
1769        print '%s [revision: %s]' % (error_msg, svn_revision)
1770        return None
1771      return target_file
1772    print 'Failed to post build request for revision: [%s]' % svn_revision
1773    return None
1774
1775  def IsDownloadable(self, depot):
1776    """Checks if build is downloadable based on target platform and depot."""
1777    if self.opts.target_platform in ['chromium'] and self.opts.gs_bucket:
1778      return (depot == 'chromium' or
1779              'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1780              'v8' in DEPOT_DEPS_NAME[depot]['from'])
1781    return False
1782
1783  def UpdateDeps(self, revision, depot, deps_file):
1784    """Updates DEPS file with new revision of dependency repository.
1785
1786    This method search DEPS for a particular pattern in which depot revision
1787    is specified (e.g "webkit_revision": "123456"). If a match is found then
1788    it resolves the given git hash to SVN revision and replace it in DEPS file.
1789
1790    Args:
1791      revision: A git hash revision of the dependency repository.
1792      depot: Current depot being bisected.
1793      deps_file: Path to DEPS file.
1794
1795    Returns:
1796      True if DEPS file is modified successfully, otherwise False.
1797    """
1798    if not os.path.exists(deps_file):
1799      return False
1800
1801    deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1802    # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1803    if not deps_var:
1804      print 'DEPS update not supported for Depot: %s', depot
1805      return False
1806
1807    # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1808    # contains "angle_revision" key that holds git hash instead of SVN revision.
1809    # And sometime "angle_revision" key is not specified in "vars" variable,
1810    # in such cases check "deps" dictionary variable that matches
1811    # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1812    if depot == 'angle':
1813      return self.UpdateDEPSForAngle(revision, depot, deps_file)
1814
1815    try:
1816      deps_contents = ReadStringFromFile(deps_file)
1817      # Check whether the depot and revision pattern in DEPS file vars
1818      # e.g. for webkit the format is "webkit_revision": "12345".
1819      deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1820                                 re.MULTILINE)
1821      match = re.search(deps_revision, deps_contents)
1822      if match:
1823        svn_revision = self.source_control.SVNFindRev(
1824            revision, self._GetDepotDirectory(depot))
1825        if not svn_revision:
1826          print 'Could not determine SVN revision for %s' % revision
1827          return False
1828        # Update the revision information for the given depot
1829        new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1830
1831        # For v8_bleeding_edge revisions change V8 branch in order
1832        # to fetch bleeding edge revision.
1833        if depot == 'v8_bleeding_edge':
1834          new_data = self.UpdateV8Branch(new_data)
1835          if not new_data:
1836            return False
1837        # Write changes to DEPS file
1838        WriteStringToFile(new_data, deps_file)
1839        return True
1840    except IOError, e:
1841      print 'Something went wrong while updating DEPS file. [%s]' % e
1842    return False
1843
1844  def UpdateV8Branch(self, deps_content):
1845    """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1846
1847    Check for "v8_branch" in DEPS file if exists update its value
1848    with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1849    variable from DEPS revision 254916, therefore check for "src/v8":
1850    <v8 source path> in DEPS in order to support prior DEPS revisions
1851    and update it.
1852
1853    Args:
1854      deps_content: DEPS file contents to be modified.
1855
1856    Returns:
1857      Modified DEPS file contents as a string.
1858    """
1859    new_branch = r'branches/bleeding_edge'
1860    v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
1861    if re.search(v8_branch_pattern, deps_content):
1862      deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
1863    else:
1864      # Replaces the branch assigned to "src/v8" key in DEPS file.
1865      # Format of "src/v8" in DEPS:
1866      # "src/v8":
1867      #    (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1868      # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1869      v8_src_pattern = re.compile(
1870          r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
1871      if re.search(v8_src_pattern, deps_content):
1872        deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
1873    return deps_content
1874
1875  def UpdateDEPSForAngle(self, revision, depot, deps_file):
1876    """Updates DEPS file with new revision for Angle repository.
1877
1878    This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1879    variable contains "angle_revision" key that holds git hash instead of
1880    SVN revision.
1881
1882    And sometimes "angle_revision" key is not specified in "vars" variable,
1883    in such cases check "deps" dictionary variable that matches
1884    angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1885    """
1886    deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1887    try:
1888      deps_contents = ReadStringFromFile(deps_file)
1889      # Check whether the depot and revision pattern in DEPS file vars variable
1890      # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1891      angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1892                                     deps_var, re.MULTILINE)
1893      match = re.search(angle_rev_pattern % deps_var, deps_contents)
1894      if match:
1895        # Update the revision information for the given depot
1896        new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1897      else:
1898        # Check whether the depot and revision pattern in DEPS file deps
1899        # variable. e.g.,
1900        # "src/third_party/angle": Var("chromium_git") +
1901        # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1902        angle_rev_pattern = re.compile(
1903            r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
1904        match = re.search(angle_rev_pattern, deps_contents)
1905        if not match:
1906          print 'Could not find angle revision information in DEPS file.'
1907          return False
1908        new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1909      # Write changes to DEPS file
1910      WriteStringToFile(new_data, deps_file)
1911      return True
1912    except IOError, e:
1913      print 'Something went wrong while updating DEPS file, %s' % e
1914    return False
1915
1916  def CreateDEPSPatch(self, depot, revision):
1917    """Modifies DEPS and returns diff as text.
1918
1919    Args:
1920      depot: Current depot being bisected.
1921      revision: A git hash revision of the dependency repository.
1922
1923    Returns:
1924      A tuple with git hash of chromium revision and DEPS patch text.
1925    """
1926    deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1927    if not os.path.exists(deps_file_path):
1928      raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1929    # Get current chromium revision (git hash).
1930    chromium_sha = CheckRunGit(['rev-parse', 'HEAD']).strip()
1931    if not chromium_sha:
1932      raise RuntimeError('Failed to determine Chromium revision for %s' %
1933                         revision)
1934    if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1935        'v8' in DEPOT_DEPS_NAME[depot]['from']):
1936      # Checkout DEPS file for the current chromium revision.
1937      if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1938                                                    chromium_sha,
1939                                                    cwd=self.src_cwd):
1940        if self.UpdateDeps(revision, depot, deps_file_path):
1941          diff_command = ['diff',
1942                          '--src-prefix=src/',
1943                          '--dst-prefix=src/',
1944                          '--no-ext-diff',
1945                           bisect_utils.FILE_DEPS]
1946          diff_text = CheckRunGit(diff_command, cwd=self.src_cwd)
1947          return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1948        else:
1949          raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1950                             chromium_sha)
1951      else:
1952        raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1953                           chromium_sha)
1954    return (None, None)
1955
1956  def BuildCurrentRevision(self, depot, revision=None):
1957    """Builds chrome and performance_ui_tests on the current revision.
1958
1959    Returns:
1960      True if the build was successful.
1961    """
1962    if self.opts.debug_ignore_build:
1963      return True
1964    cwd = os.getcwd()
1965    os.chdir(self.src_cwd)
1966    # Fetch build archive for the given revision from the cloud storage when
1967    # the storage bucket is passed.
1968    if self.IsDownloadable(depot) and revision:
1969      deps_patch = None
1970      if depot != 'chromium':
1971        # Create a DEPS patch with new revision for dependency repository.
1972        (revision, deps_patch) = self.CreateDEPSPatch(depot, revision)
1973      if self.DownloadCurrentBuild(revision, patch=deps_patch):
1974        os.chdir(cwd)
1975        if deps_patch:
1976          # Reverts the changes to DEPS file.
1977          self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1978                                                     revision,
1979                                                     cwd=self.src_cwd)
1980        return True
1981      return False
1982
1983    # These codes are executed when bisect bots builds binaries locally.
1984    build_success = self.builder.Build(depot, self.opts)
1985    os.chdir(cwd)
1986    return build_success
1987
1988  def RunGClientHooks(self):
1989    """Runs gclient with runhooks command.
1990
1991    Returns:
1992      True if gclient reports no errors.
1993    """
1994
1995    if self.opts.debug_ignore_build:
1996      return True
1997
1998    return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1999
2000  def TryParseHistogramValuesFromOutput(self, metric, text):
2001    """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
2002
2003    Args:
2004      metric: The metric as a list of [<trace>, <value>] strings.
2005      text: The text to parse the metric values from.
2006
2007    Returns:
2008      A list of floating point numbers found.
2009    """
2010    metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
2011
2012    text_lines = text.split('\n')
2013    values_list = []
2014
2015    for current_line in text_lines:
2016      if metric_formatted in current_line:
2017        current_line = current_line[len(metric_formatted):]
2018
2019        try:
2020          histogram_values = eval(current_line)
2021
2022          for b in histogram_values['buckets']:
2023            average_for_bucket = float(b['high'] + b['low']) * 0.5
2024            # Extends the list with N-elements with the average for that bucket.
2025            values_list.extend([average_for_bucket] * b['count'])
2026        except:
2027          pass
2028
2029    return values_list
2030
2031  def TryParseResultValuesFromOutput(self, metric, text):
2032    """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
2033
2034    Args:
2035      metric: The metric as a list of [<trace>, <value>] strings.
2036      text: The text to parse the metric values from.
2037
2038    Returns:
2039      A list of floating point numbers found.
2040    """
2041    # Format is: RESULT <graph>: <trace>= <value> <units>
2042    metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
2043
2044    # The log will be parsed looking for format:
2045    # <*>RESULT <graph_name>: <trace_name>= <value>
2046    single_result_re = re.compile(
2047        metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
2048
2049    # The log will be parsed looking for format:
2050    # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
2051    multi_results_re = re.compile(
2052        metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
2053
2054    # The log will be parsed looking for format:
2055    # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
2056    mean_stddev_re = re.compile(
2057        metric_re +
2058        '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
2059
2060    text_lines = text.split('\n')
2061    values_list = []
2062    for current_line in text_lines:
2063      # Parse the output from the performance test for the metric we're
2064      # interested in.
2065      single_result_match = single_result_re.search(current_line)
2066      multi_results_match = multi_results_re.search(current_line)
2067      mean_stddev_match = mean_stddev_re.search(current_line)
2068      if (not single_result_match is None and
2069          single_result_match.group('VALUE')):
2070        values_list += [single_result_match.group('VALUE')]
2071      elif (not multi_results_match is None and
2072            multi_results_match.group('VALUES')):
2073        metric_values = multi_results_match.group('VALUES')
2074        values_list += metric_values.split(',')
2075      elif (not mean_stddev_match is None and
2076            mean_stddev_match.group('MEAN')):
2077        values_list += [mean_stddev_match.group('MEAN')]
2078
2079    values_list = [float(v) for v in values_list if IsStringFloat(v)]
2080
2081    # If the metric is times/t, we need to sum the timings in order to get
2082    # similar regression results as the try-bots.
2083    metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
2084        ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
2085
2086    if metric in metrics_to_sum:
2087      if values_list:
2088        values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
2089
2090    return values_list
2091
2092  def ParseMetricValuesFromOutput(self, metric, text):
2093    """Parses output from performance_ui_tests and retrieves the results for
2094    a given metric.
2095
2096    Args:
2097      metric: The metric as a list of [<trace>, <value>] strings.
2098      text: The text to parse the metric values from.
2099
2100    Returns:
2101      A list of floating point numbers found.
2102    """
2103    metric_values = self.TryParseResultValuesFromOutput(metric, text)
2104
2105    if not metric_values:
2106      metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
2107
2108    return metric_values
2109
2110  def _GenerateProfileIfNecessary(self, command_args):
2111    """Checks the command line of the performance test for dependencies on
2112    profile generation, and runs tools/perf/generate_profile as necessary.
2113
2114    Args:
2115      command_args: Command line being passed to performance test, as a list.
2116
2117    Returns:
2118      False if profile generation was necessary and failed, otherwise True.
2119    """
2120
2121    if '--profile-dir' in ' '.join(command_args):
2122      # If we were using python 2.7+, we could just use the argparse
2123      # module's parse_known_args to grab --profile-dir. Since some of the
2124      # bots still run 2.6, have to grab the arguments manually.
2125      arg_dict = {}
2126      args_to_parse = ['--profile-dir', '--browser']
2127
2128      for arg_to_parse in args_to_parse:
2129        for i, current_arg in enumerate(command_args):
2130          if arg_to_parse in current_arg:
2131            current_arg_split = current_arg.split('=')
2132
2133            # Check 2 cases, --arg=<val> and --arg <val>
2134            if len(current_arg_split) == 2:
2135              arg_dict[arg_to_parse] = current_arg_split[1]
2136            elif i + 1 < len(command_args):
2137              arg_dict[arg_to_parse] = command_args[i+1]
2138
2139      path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
2140
2141      if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
2142        profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
2143        return not RunProcess(['python', path_to_generate,
2144            '--profile-type-to-generate', profile_type,
2145            '--browser', arg_dict['--browser'], '--output-dir', profile_path])
2146      return False
2147    return True
2148
2149  def _IsBisectModeUsingMetric(self):
2150    return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
2151
2152  def _IsBisectModeReturnCode(self):
2153    return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
2154
2155  def _IsBisectModeStandardDeviation(self):
2156    return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
2157
2158  def RunPerformanceTestAndParseResults(
2159      self, command_to_run, metric, reset_on_first_run=False,
2160      upload_on_last_run=False, results_label=None):
2161    """Runs a performance test on the current revision and parses the results.
2162
2163    Args:
2164      command_to_run: The command to be run to execute the performance test.
2165      metric: The metric to parse out from the results of the performance test.
2166          This is the result chart name and trace name, separated by slash.
2167      reset_on_first_run: If True, pass the flag --reset-results on first run.
2168      upload_on_last_run: If True, pass the flag --upload-results on last run.
2169      results_label: A value for the option flag --results-label.
2170          The arguments reset_on_first_run, upload_on_last_run and results_label
2171          are all ignored if the test is not a Telemetry test.
2172
2173    Returns:
2174      (values dict, 0) if --debug_ignore_perf_test was passed.
2175      (values dict, 0, test output) if the test was run successfully.
2176      (error message, -1) if the test couldn't be run.
2177      (error message, -1, test output) if the test ran but there was an error.
2178    """
2179    success_code, failure_code = 0, -1
2180
2181    if self.opts.debug_ignore_perf_test:
2182      fake_results = {
2183          'mean': 0.0,
2184          'std_err': 0.0,
2185          'std_dev': 0.0,
2186          'values': [0.0]
2187      }
2188      return (fake_results, success_code)
2189
2190    # For Windows platform set posix=False, to parse windows paths correctly.
2191    # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2192    # refer to http://bugs.python.org/issue1724822. By default posix=True.
2193    args = shlex.split(command_to_run, posix=not IsWindows())
2194
2195    if not self._GenerateProfileIfNecessary(args):
2196      err_text = 'Failed to generate profile for performance test.'
2197      return (err_text, failure_code)
2198
2199    # If running a Telemetry test for Chrome OS, insert the remote IP and
2200    # identity parameters.
2201    is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
2202    if self.opts.target_platform == 'cros' and is_telemetry:
2203      args.append('--remote=%s' % self.opts.cros_remote_ip)
2204      args.append('--identity=%s' % CROS_TEST_KEY_PATH)
2205
2206    start_time = time.time()
2207
2208    metric_values = []
2209    output_of_all_runs = ''
2210    for i in xrange(self.opts.repeat_test_count):
2211      # Can ignore the return code since if the tests fail, it won't return 0.
2212      current_args = copy.copy(args)
2213      if is_telemetry:
2214        if i == 0 and reset_on_first_run:
2215          current_args.append('--reset-results')
2216        elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
2217          current_args.append('--upload-results')
2218        if results_label:
2219          current_args.append('--results-label=%s' % results_label)
2220      try:
2221        (output, return_code) = RunProcessAndRetrieveOutput(current_args,
2222                                                            cwd=self.src_cwd)
2223      except OSError, e:
2224        if e.errno == errno.ENOENT:
2225          err_text  = ('Something went wrong running the performance test. '
2226                       'Please review the command line:\n\n')
2227          if 'src/' in ' '.join(args):
2228            err_text += ('Check that you haven\'t accidentally specified a '
2229                         'path with src/ in the command.\n\n')
2230          err_text += ' '.join(args)
2231          err_text += '\n'
2232
2233          return (err_text, failure_code)
2234        raise
2235
2236      output_of_all_runs += output
2237      if self.opts.output_buildbot_annotations:
2238        print output
2239
2240      if self._IsBisectModeUsingMetric():
2241        metric_values += self.ParseMetricValuesFromOutput(metric, output)
2242        # If we're bisecting on a metric (ie, changes in the mean or
2243        # standard deviation) and no metric values are produced, bail out.
2244        if not metric_values:
2245          break
2246      elif self._IsBisectModeReturnCode():
2247        metric_values.append(return_code)
2248
2249      elapsed_minutes = (time.time() - start_time) / 60.0
2250      if elapsed_minutes >= self.opts.max_time_minutes:
2251        break
2252
2253    if len(metric_values) == 0:
2254      err_text = 'Metric %s was not found in the test output.' % metric
2255      # TODO(qyearsley): Consider also getting and displaying a list of metrics
2256      # that were found in the output here.
2257      return (err_text, failure_code, output_of_all_runs)
2258
2259    # If we're bisecting on return codes, we're really just looking for zero vs
2260    # non-zero.
2261    if self._IsBisectModeReturnCode():
2262      # If any of the return codes is non-zero, output 1.
2263      overall_return_code = 0 if (
2264          all(current_value == 0 for current_value in metric_values)) else 1
2265
2266      values = {
2267        'mean': overall_return_code,
2268        'std_err': 0.0,
2269        'std_dev': 0.0,
2270        'values': metric_values,
2271      }
2272
2273      print 'Results of performance test: Command returned with %d' % (
2274          overall_return_code)
2275      print
2276    else:
2277      # Need to get the average value if there were multiple values.
2278      truncated_mean = CalculateTruncatedMean(metric_values,
2279          self.opts.truncate_percent)
2280      standard_err = CalculateStandardError(metric_values)
2281      standard_dev = CalculateStandardDeviation(metric_values)
2282
2283      if self._IsBisectModeStandardDeviation():
2284        metric_values = [standard_dev]
2285
2286      values = {
2287        'mean': truncated_mean,
2288        'std_err': standard_err,
2289        'std_dev': standard_dev,
2290        'values': metric_values,
2291      }
2292
2293      print 'Results of performance test: %12f %12f' % (
2294          truncated_mean, standard_err)
2295      print
2296    return (values, success_code, output_of_all_runs)
2297
2298  def FindAllRevisionsToSync(self, revision, depot):
2299    """Finds all dependant revisions and depots that need to be synced for a
2300    given revision. This is only useful in the git workflow, as an svn depot
2301    may be split into multiple mirrors.
2302
2303    ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2304    skia/include. To sync skia/src properly, one has to find the proper
2305    revisions in skia/gyp and skia/include.
2306
2307    Args:
2308      revision: The revision to sync to.
2309      depot: The depot in use at the moment (probably skia).
2310
2311    Returns:
2312      A list of [depot, revision] pairs that need to be synced.
2313    """
2314    revisions_to_sync = [[depot, revision]]
2315
2316    is_base = ((depot == 'chromium') or (depot == 'cros') or
2317        (depot == 'android-chrome'))
2318
2319    # Some SVN depots were split into multiple git depots, so we need to
2320    # figure out for each mirror which git revision to grab. There's no
2321    # guarantee that the SVN revision will exist for each of the dependant
2322    # depots, so we have to grep the git logs and grab the next earlier one.
2323    if not is_base and\
2324       DEPOT_DEPS_NAME[depot]['depends'] and\
2325       self.source_control.IsGit():
2326      svn_rev = self.source_control.SVNFindRev(revision)
2327
2328      for d in DEPOT_DEPS_NAME[depot]['depends']:
2329        self.ChangeToDepotWorkingDirectory(d)
2330
2331        dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
2332
2333        if dependant_rev:
2334          revisions_to_sync.append([d, dependant_rev])
2335
2336      num_resolved = len(revisions_to_sync)
2337      num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2338
2339      self.ChangeToDepotWorkingDirectory(depot)
2340
2341      if not ((num_resolved - 1) == num_needed):
2342        return None
2343
2344    return revisions_to_sync
2345
2346  def PerformPreBuildCleanup(self):
2347    """Performs necessary cleanup between runs."""
2348    print 'Cleaning up between runs.'
2349    print
2350
2351    # Having these pyc files around between runs can confuse the
2352    # perf tests and cause them to crash.
2353    for (path, _, files) in os.walk(self.src_cwd):
2354      for cur_file in files:
2355        if cur_file.endswith('.pyc'):
2356          path_to_file = os.path.join(path, cur_file)
2357          os.remove(path_to_file)
2358
2359  def PerformWebkitDirectoryCleanup(self, revision):
2360    """If the script is switching between Blink and WebKit during bisect,
2361    its faster to just delete the directory rather than leave it up to git
2362    to sync.
2363
2364    Returns:
2365      True if successful.
2366    """
2367    if not self.source_control.CheckoutFileAtRevision(
2368        bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2369      return False
2370
2371    cwd = os.getcwd()
2372    os.chdir(self.src_cwd)
2373
2374    is_blink = bisect_utils.IsDepsFileBlink()
2375
2376    os.chdir(cwd)
2377
2378    if not self.source_control.RevertFileToHead(
2379        bisect_utils.FILE_DEPS_GIT):
2380      return False
2381
2382    if self.was_blink != is_blink:
2383      self.was_blink = is_blink
2384      # Removes third_party/Webkit directory.
2385      return bisect_utils.RemoveThirdPartyDirectory('Webkit')
2386    return True
2387
2388  def PerformCrosChrootCleanup(self):
2389    """Deletes the chroot.
2390
2391    Returns:
2392        True if successful.
2393    """
2394    cwd = os.getcwd()
2395    self.ChangeToDepotWorkingDirectory('cros')
2396    cmd = [CROS_SDK_PATH, '--delete']
2397    return_code = RunProcess(cmd)
2398    os.chdir(cwd)
2399    return not return_code
2400
2401  def CreateCrosChroot(self):
2402    """Creates a new chroot.
2403
2404    Returns:
2405        True if successful.
2406    """
2407    cwd = os.getcwd()
2408    self.ChangeToDepotWorkingDirectory('cros')
2409    cmd = [CROS_SDK_PATH, '--create']
2410    return_code = RunProcess(cmd)
2411    os.chdir(cwd)
2412    return not return_code
2413
2414  def PerformPreSyncCleanup(self, revision, depot):
2415    """Performs any necessary cleanup before syncing.
2416
2417    Returns:
2418      True if successful.
2419    """
2420    if depot == 'chromium' or depot == 'android-chrome':
2421      # Removes third_party/libjingle. At some point, libjingle was causing
2422      # issues syncing when using the git workflow (crbug.com/266324).
2423      os.chdir(self.src_cwd)
2424      if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
2425        return False
2426      # Removes third_party/skia. At some point, skia was causing
2427      #  issues syncing when using the git workflow (crbug.com/377951).
2428      if not bisect_utils.RemoveThirdPartyDirectory('skia'):
2429        return False
2430      if depot == 'chromium':
2431        # The fast webkit cleanup doesn't work for android_chrome
2432        # The switch from Webkit to Blink that this deals with now happened
2433        # quite a long time ago so this is unlikely to be a problem.
2434        return self.PerformWebkitDirectoryCleanup(revision)
2435    elif depot == 'cros':
2436      return self.PerformCrosChrootCleanup()
2437    return True
2438
2439  def RunPostSync(self, depot):
2440    """Performs any work after syncing.
2441
2442    Returns:
2443      True if successful.
2444    """
2445    if self.opts.target_platform == 'android':
2446      if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2447          path_to_src=self.src_cwd):
2448        return False
2449
2450    if depot == 'cros':
2451      return self.CreateCrosChroot()
2452    else:
2453      return self.RunGClientHooks()
2454    return True
2455
2456  def ShouldSkipRevision(self, depot, revision):
2457    """Some commits can be safely skipped (such as a DEPS roll), since the tool
2458    is git based those changes would have no effect.
2459
2460    Args:
2461      depot: The depot being bisected.
2462      revision: Current revision we're synced to.
2463
2464    Returns:
2465      True if we should skip building/testing this revision.
2466    """
2467    if depot == 'chromium':
2468      if self.source_control.IsGit():
2469        cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2470        output = CheckRunGit(cmd)
2471
2472        files = output.splitlines()
2473
2474        if len(files) == 1 and files[0] == 'DEPS':
2475          return True
2476
2477    return False
2478
2479  def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2480      skippable=False):
2481    """Performs a full sync/build/run of the specified revision.
2482
2483    Args:
2484      revision: The revision to sync to.
2485      depot: The depot that's being used at the moment (src, webkit, etc.)
2486      command_to_run: The command to execute the performance test.
2487      metric: The performance metric being tested.
2488
2489    Returns:
2490      On success, a tuple containing the results of the performance test.
2491      Otherwise, a tuple with the error message.
2492    """
2493    sync_client = None
2494    if depot == 'chromium' or depot == 'android-chrome':
2495      sync_client = 'gclient'
2496    elif depot == 'cros':
2497      sync_client = 'repo'
2498
2499    revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2500
2501    if not revisions_to_sync:
2502      return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2503
2504    if not self.PerformPreSyncCleanup(revision, depot):
2505      return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2506
2507    success = True
2508
2509    if not self.opts.debug_ignore_sync:
2510      for r in revisions_to_sync:
2511        self.ChangeToDepotWorkingDirectory(r[0])
2512
2513        if sync_client:
2514          self.PerformPreBuildCleanup()
2515
2516        # If you're using gclient to sync, you need to specify the depot you
2517        # want so that all the dependencies sync properly as well.
2518        # ie. gclient sync src@<SHA1>
2519        current_revision = r[1]
2520        if sync_client == 'gclient':
2521          current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2522              current_revision)
2523        if not self.source_control.SyncToRevision(current_revision,
2524            sync_client):
2525          success = False
2526
2527          break
2528
2529    if success:
2530      success = self.RunPostSync(depot)
2531      if success:
2532        if skippable and self.ShouldSkipRevision(depot, revision):
2533          return ('Skipped revision: [%s]' % str(revision),
2534              BUILD_RESULT_SKIPPED)
2535
2536        start_build_time = time.time()
2537        if self.BuildCurrentRevision(depot, revision):
2538          after_build_time = time.time()
2539          results = self.RunPerformanceTestAndParseResults(command_to_run,
2540                                                           metric)
2541          # Restore build output directory once the tests are done, to avoid
2542          # any descrepancy.
2543          if self.IsDownloadable(depot) and revision:
2544            self.BackupOrRestoreOutputdirectory(restore=True)
2545
2546          if results[1] == 0:
2547            external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
2548                depot, revision)
2549
2550            if not external_revisions is None:
2551              return (results[0], results[1], external_revisions,
2552                  time.time() - after_build_time, after_build_time -
2553                  start_build_time)
2554            else:
2555              return ('Failed to parse DEPS file for external revisions.',
2556                  BUILD_RESULT_FAIL)
2557          else:
2558            return results
2559        else:
2560          return ('Failed to build revision: [%s]' % (str(revision, )),
2561              BUILD_RESULT_FAIL)
2562      else:
2563        return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2564    else:
2565      return ('Failed to sync revision: [%s]' % (str(revision, )),
2566          BUILD_RESULT_FAIL)
2567
2568  def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2569    """Given known good and bad values, decide if the current_value passed
2570    or failed.
2571
2572    Args:
2573      current_value: The value of the metric being checked.
2574      known_bad_value: The reference value for a "failed" run.
2575      known_good_value: The reference value for a "passed" run.
2576
2577    Returns:
2578      True if the current_value is closer to the known_good_value than the
2579      known_bad_value.
2580    """
2581    if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2582      dist_to_good_value = abs(current_value['std_dev'] -
2583          known_good_value['std_dev'])
2584      dist_to_bad_value = abs(current_value['std_dev'] -
2585          known_bad_value['std_dev'])
2586    else:
2587      dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2588      dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2589
2590    return dist_to_good_value < dist_to_bad_value
2591
2592  def _GetDepotDirectory(self, depot_name):
2593    if depot_name == 'chromium':
2594      return self.src_cwd
2595    elif depot_name == 'cros':
2596      return self.cros_cwd
2597    elif depot_name in DEPOT_NAMES:
2598      return self.depot_cwd[depot_name]
2599    else:
2600      assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2601                    ' was added without proper support?' % depot_name
2602
2603  def ChangeToDepotWorkingDirectory(self, depot_name):
2604    """Given a depot, changes to the appropriate working directory.
2605
2606    Args:
2607      depot_name: The name of the depot (see DEPOT_NAMES).
2608    """
2609    os.chdir(self._GetDepotDirectory(depot_name))
2610
2611  def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2612    r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2613        search_forward=True)
2614    r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2615        search_forward=False)
2616    min_revision_data['external']['v8_bleeding_edge'] = r1
2617    max_revision_data['external']['v8_bleeding_edge'] = r2
2618
2619    if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2620            min_revision_data['revision']) or
2621        not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2622            max_revision_data['revision'])):
2623      self.warnings.append('Trunk revisions in V8 did not map directly to '
2624          'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2625          'did map directly to bleeding_edge revisions, but results might not '
2626          'be valid.')
2627
2628  def _FindNextDepotToBisect(self, current_depot, current_revision,
2629      min_revision_data, max_revision_data):
2630    """Given the state of the bisect, decides which depot the script should
2631    dive into next (if any).
2632
2633    Args:
2634      current_depot: Current depot being bisected.
2635      current_revision: Current revision synced to.
2636      min_revision_data: Data about the earliest revision in the bisect range.
2637      max_revision_data: Data about the latest revision in the bisect range.
2638
2639    Returns:
2640      The depot to bisect next, or None.
2641    """
2642    external_depot = None
2643    for next_depot in DEPOT_NAMES:
2644      if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2645        if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2646          continue
2647
2648      if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2649          min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2650        continue
2651
2652      if current_depot == 'v8':
2653        # We grab the bleeding_edge info here rather than earlier because we
2654        # finally have the revision range. From that we can search forwards and
2655        # backwards to try to match trunk revisions to bleeding_edge.
2656        self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2657
2658      if (min_revision_data['external'].get(next_depot) ==
2659          max_revision_data['external'].get(next_depot)):
2660        continue
2661
2662      if (min_revision_data['external'].get(next_depot) and
2663          max_revision_data['external'].get(next_depot)):
2664        external_depot = next_depot
2665        break
2666
2667    return external_depot
2668
2669  def PrepareToBisectOnDepot(self,
2670                             current_depot,
2671                             end_revision,
2672                             start_revision,
2673                             previous_depot,
2674                             previous_revision):
2675    """Changes to the appropriate directory and gathers a list of revisions
2676    to bisect between |start_revision| and |end_revision|.
2677
2678    Args:
2679      current_depot: The depot we want to bisect.
2680      end_revision: End of the revision range.
2681      start_revision: Start of the revision range.
2682      previous_depot: The depot we were previously bisecting.
2683      previous_revision: The last revision we synced to on |previous_depot|.
2684
2685    Returns:
2686      A list containing the revisions between |start_revision| and
2687      |end_revision| inclusive.
2688    """
2689    # Change into working directory of external library to run
2690    # subsequent commands.
2691    self.ChangeToDepotWorkingDirectory(current_depot)
2692
2693    # V8 (and possibly others) is merged in periodically. Bisecting
2694    # this directory directly won't give much good info.
2695    if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2696      config_path = os.path.join(self.src_cwd, '..')
2697      if bisect_utils.RunGClientAndCreateConfig(self.opts,
2698          DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2699        return []
2700      if bisect_utils.RunGClient(
2701          ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2702        return []
2703
2704    if current_depot == 'v8_bleeding_edge':
2705      self.ChangeToDepotWorkingDirectory('chromium')
2706
2707      shutil.move('v8', 'v8.bak')
2708      shutil.move('v8_bleeding_edge', 'v8')
2709
2710      self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2711      self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2712
2713      self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2714      self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2715
2716      self.ChangeToDepotWorkingDirectory(current_depot)
2717
2718    depot_revision_list = self.GetRevisionList(current_depot,
2719                                               end_revision,
2720                                               start_revision)
2721
2722    self.ChangeToDepotWorkingDirectory('chromium')
2723
2724    return depot_revision_list
2725
2726  def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2727    """Gathers reference values by running the performance tests on the
2728    known good and bad revisions.
2729
2730    Args:
2731      good_rev: The last known good revision where the performance regression
2732        has not occurred yet.
2733      bad_rev: A revision where the performance regression has already occurred.
2734      cmd: The command to execute the performance test.
2735      metric: The metric being tested for regression.
2736
2737    Returns:
2738      A tuple with the results of building and running each revision.
2739    """
2740    bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2741                                                   target_depot,
2742                                                   cmd,
2743                                                   metric)
2744
2745    good_run_results = None
2746
2747    if not bad_run_results[1]:
2748      good_run_results = self.SyncBuildAndRunRevision(good_rev,
2749                                                      target_depot,
2750                                                      cmd,
2751                                                      metric)
2752
2753    return (bad_run_results, good_run_results)
2754
2755  def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2756    """Adds new revisions to the revision_data dict and initializes them.
2757
2758    Args:
2759      revisions: List of revisions to add.
2760      depot: Depot that's currently in use (src, webkit, etc...)
2761      sort: Sorting key for displaying revisions.
2762      revision_data: A dict to add the new revisions into. Existing revisions
2763        will have their sort keys offset.
2764    """
2765
2766    num_depot_revisions = len(revisions)
2767
2768    for _, v in revision_data.iteritems():
2769      if v['sort'] > sort:
2770        v['sort'] += num_depot_revisions
2771
2772    for i in xrange(num_depot_revisions):
2773      r = revisions[i]
2774
2775      revision_data[r] = {'revision' : r,
2776                          'depot' : depot,
2777                          'value' : None,
2778                          'perf_time' : 0,
2779                          'build_time' : 0,
2780                          'passed' : '?',
2781                          'sort' : i + sort + 1}
2782
2783  def PrintRevisionsToBisectMessage(self, revision_list, depot):
2784    if self.opts.output_buildbot_annotations:
2785      step_name = 'Bisection Range: [%s - %s]' % (
2786          revision_list[len(revision_list)-1], revision_list[0])
2787      bisect_utils.OutputAnnotationStepStart(step_name)
2788
2789    print
2790    print 'Revisions to bisect on [%s]:' % depot
2791    for revision_id in revision_list:
2792      print '  -> %s' % (revision_id, )
2793    print
2794
2795    if self.opts.output_buildbot_annotations:
2796      bisect_utils.OutputAnnotationStepClosed()
2797
2798  def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2799    """Checks to see if changes to DEPS file occurred, and that the revision
2800    range also includes the change to .DEPS.git. If it doesn't, attempts to
2801    expand the revision range to include it.
2802
2803    Args:
2804        bad_rev: First known bad revision.
2805        good_revision: Last known good revision.
2806
2807    Returns:
2808        A tuple with the new bad and good revisions.
2809    """
2810    if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2811      changes_to_deps = self.source_control.QueryFileRevisionHistory(
2812          'DEPS', good_revision, bad_revision)
2813
2814      if changes_to_deps:
2815        # DEPS file was changed, search from the oldest change to DEPS file to
2816        # bad_revision to see if there are matching .DEPS.git changes.
2817        oldest_deps_change = changes_to_deps[-1]
2818        changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2819            bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2820
2821        if len(changes_to_deps) != len(changes_to_gitdeps):
2822          # Grab the timestamp of the last DEPS change
2823          cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2824          output = CheckRunGit(cmd)
2825          commit_time = int(output)
2826
2827          # Try looking for a commit that touches the .DEPS.git file in the
2828          # next 15 minutes after the DEPS file change.
2829          cmd = ['log', '--format=%H', '-1',
2830              '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2831              'origin/master', bisect_utils.FILE_DEPS_GIT]
2832          output = CheckRunGit(cmd)
2833          output = output.strip()
2834          if output:
2835            self.warnings.append('Detected change to DEPS and modified '
2836                'revision range to include change to .DEPS.git')
2837            return (output, good_revision)
2838          else:
2839            self.warnings.append('Detected change to DEPS but couldn\'t find '
2840                'matching change to .DEPS.git')
2841    return (bad_revision, good_revision)
2842
2843  def CheckIfRevisionsInProperOrder(self,
2844                                    target_depot,
2845                                    good_revision,
2846                                    bad_revision):
2847    """Checks that |good_revision| is an earlier revision than |bad_revision|.
2848
2849    Args:
2850        good_revision: Number/tag of the known good revision.
2851        bad_revision: Number/tag of the known bad revision.
2852
2853    Returns:
2854        True if the revisions are in the proper order (good earlier than bad).
2855    """
2856    if self.source_control.IsGit() and target_depot != 'cros':
2857      cmd = ['log', '--format=%ct', '-1', good_revision]
2858      cwd = self._GetDepotDirectory(target_depot)
2859
2860      output = CheckRunGit(cmd, cwd=cwd)
2861      good_commit_time = int(output)
2862
2863      cmd = ['log', '--format=%ct', '-1', bad_revision]
2864      output = CheckRunGit(cmd, cwd=cwd)
2865      bad_commit_time = int(output)
2866
2867      return good_commit_time <= bad_commit_time
2868    else:
2869      # Cros/svn use integers
2870      return int(good_revision) <= int(bad_revision)
2871
2872  def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2873    """Given known good and bad revisions, run a binary search on all
2874    intermediate revisions to determine the CL where the performance regression
2875    occurred.
2876
2877    Args:
2878        command_to_run: Specify the command to execute the performance test.
2879        good_revision: Number/tag of the known good revision.
2880        bad_revision: Number/tag of the known bad revision.
2881        metric: The performance metric to monitor.
2882
2883    Returns:
2884        A dict with 2 members, 'revision_data' and 'error'. On success,
2885        'revision_data' will contain a dict mapping revision ids to
2886        data about that revision. Each piece of revision data consists of a
2887        dict with the following keys:
2888
2889        'passed': Represents whether the performance test was successful at
2890            that revision. Possible values include: 1 (passed), 0 (failed),
2891            '?' (skipped), 'F' (build failed).
2892        'depot': The depot that this revision is from (ie. WebKit)
2893        'external': If the revision is a 'src' revision, 'external' contains
2894            the revisions of each of the external libraries.
2895        'sort': A sort value for sorting the dict in order of commits.
2896
2897        For example:
2898        {
2899          'error':None,
2900          'revision_data':
2901          {
2902            'CL #1':
2903            {
2904              'passed':False,
2905              'depot':'chromium',
2906              'external':None,
2907              'sort':0
2908            }
2909          }
2910        }
2911
2912        If an error occurred, the 'error' field will contain the message and
2913        'revision_data' will be empty.
2914    """
2915    results = {'revision_data' : {},
2916               'error' : None}
2917
2918    # Choose depot to bisect first
2919    target_depot = 'chromium'
2920    if self.opts.target_platform == 'cros':
2921      target_depot = 'cros'
2922    elif self.opts.target_platform == 'android-chrome':
2923      target_depot = 'android-chrome'
2924
2925    cwd = os.getcwd()
2926    self.ChangeToDepotWorkingDirectory(target_depot)
2927
2928    # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2929    bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2930                                                         target_depot, 100)
2931    good_revision = self.source_control.ResolveToRevision(good_revision_in,
2932                                                          target_depot, -100)
2933
2934    os.chdir(cwd)
2935
2936
2937    if bad_revision is None:
2938      results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2939      return results
2940
2941    if good_revision is None:
2942      results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2943      return results
2944
2945    # Check that they didn't accidentally swap good and bad revisions.
2946    if not self.CheckIfRevisionsInProperOrder(
2947        target_depot, good_revision, bad_revision):
2948      results['error'] = 'bad_revision < good_revision, did you swap these '\
2949          'by mistake?'
2950      return results
2951
2952    (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2953        bad_revision, good_revision)
2954
2955    if self.opts.output_buildbot_annotations:
2956      bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2957
2958    print 'Gathering revision range for bisection.'
2959    # Retrieve a list of revisions to do bisection on.
2960    src_revision_list = self.GetRevisionList(target_depot,
2961                                             bad_revision,
2962                                             good_revision)
2963
2964    if self.opts.output_buildbot_annotations:
2965      bisect_utils.OutputAnnotationStepClosed()
2966
2967    if src_revision_list:
2968      # revision_data will store information about a revision such as the
2969      # depot it came from, the webkit/V8 revision at that time,
2970      # performance timing, build state, etc...
2971      revision_data = results['revision_data']
2972
2973      # revision_list is the list we're binary searching through at the moment.
2974      revision_list = []
2975
2976      sort_key_ids = 0
2977
2978      for current_revision_id in src_revision_list:
2979        sort_key_ids += 1
2980
2981        revision_data[current_revision_id] = {'value' : None,
2982                                              'passed' : '?',
2983                                              'depot' : target_depot,
2984                                              'external' : None,
2985                                              'perf_time' : 0,
2986                                              'build_time' : 0,
2987                                              'sort' : sort_key_ids}
2988        revision_list.append(current_revision_id)
2989
2990      min_revision = 0
2991      max_revision = len(revision_list) - 1
2992
2993      self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2994
2995      if self.opts.output_buildbot_annotations:
2996        bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2997
2998      print 'Gathering reference values for bisection.'
2999
3000      # Perform the performance tests on the good and bad revisions, to get
3001      # reference values.
3002      (bad_results, good_results) = self.GatherReferenceValues(good_revision,
3003                                                               bad_revision,
3004                                                               command_to_run,
3005                                                               metric,
3006                                                               target_depot)
3007
3008      if self.opts.output_buildbot_annotations:
3009        bisect_utils.OutputAnnotationStepClosed()
3010
3011      if bad_results[1]:
3012        results['error'] = ('An error occurred while building and running '
3013            'the \'bad\' reference value. The bisect cannot continue without '
3014            'a working \'bad\' revision to start from.\n\nError: %s' %
3015                bad_results[0])
3016        return results
3017
3018      if good_results[1]:
3019        results['error'] = ('An error occurred while building and running '
3020            'the \'good\' reference value. The bisect cannot continue without '
3021            'a working \'good\' revision to start from.\n\nError: %s' %
3022                good_results[0])
3023        return results
3024
3025
3026      # We need these reference values to determine if later runs should be
3027      # classified as pass or fail.
3028      known_bad_value = bad_results[0]
3029      known_good_value = good_results[0]
3030
3031      # Can just mark the good and bad revisions explicitly here since we
3032      # already know the results.
3033      bad_revision_data = revision_data[revision_list[0]]
3034      bad_revision_data['external'] = bad_results[2]
3035      bad_revision_data['perf_time'] = bad_results[3]
3036      bad_revision_data['build_time'] = bad_results[4]
3037      bad_revision_data['passed'] = False
3038      bad_revision_data['value'] = known_bad_value
3039
3040      good_revision_data = revision_data[revision_list[max_revision]]
3041      good_revision_data['external'] = good_results[2]
3042      good_revision_data['perf_time'] = good_results[3]
3043      good_revision_data['build_time'] = good_results[4]
3044      good_revision_data['passed'] = True
3045      good_revision_data['value'] = known_good_value
3046
3047      next_revision_depot = target_depot
3048
3049      while True:
3050        if not revision_list:
3051          break
3052
3053        min_revision_data = revision_data[revision_list[min_revision]]
3054        max_revision_data = revision_data[revision_list[max_revision]]
3055
3056        if max_revision - min_revision <= 1:
3057          current_depot = min_revision_data['depot']
3058          if min_revision_data['passed'] == '?':
3059            next_revision_index = min_revision
3060          elif max_revision_data['passed'] == '?':
3061            next_revision_index = max_revision
3062          elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
3063            previous_revision = revision_list[min_revision]
3064            # If there were changes to any of the external libraries we track,
3065            # should bisect the changes there as well.
3066            external_depot = self._FindNextDepotToBisect(current_depot,
3067                previous_revision, min_revision_data, max_revision_data)
3068
3069            # If there was no change in any of the external depots, the search
3070            # is over.
3071            if not external_depot:
3072              if current_depot == 'v8':
3073                self.warnings.append('Unfortunately, V8 bisection couldn\'t '
3074                    'continue any further. The script can only bisect into '
3075                    'V8\'s bleeding_edge repository if both the current and '
3076                    'previous revisions in trunk map directly to revisions in '
3077                    'bleeding_edge.')
3078              break
3079
3080            earliest_revision = max_revision_data['external'][external_depot]
3081            latest_revision = min_revision_data['external'][external_depot]
3082
3083            new_revision_list = self.PrepareToBisectOnDepot(external_depot,
3084                                                            latest_revision,
3085                                                            earliest_revision,
3086                                                            next_revision_depot,
3087                                                            previous_revision)
3088
3089            if not new_revision_list:
3090              results['error'] = 'An error occurred attempting to retrieve'\
3091                                 ' revision range: [%s..%s]' % \
3092                                 (earliest_revision, latest_revision)
3093              return results
3094
3095            self.AddRevisionsIntoRevisionData(new_revision_list,
3096                                              external_depot,
3097                                              min_revision_data['sort'],
3098                                              revision_data)
3099
3100            # Reset the bisection and perform it on the newly inserted
3101            # changelists.
3102            revision_list = new_revision_list
3103            min_revision = 0
3104            max_revision = len(revision_list) - 1
3105            sort_key_ids += len(revision_list)
3106
3107            print 'Regression in metric:%s appears to be the result of changes'\
3108                  ' in [%s].' % (metric, external_depot)
3109
3110            self.PrintRevisionsToBisectMessage(revision_list, external_depot)
3111
3112            continue
3113          else:
3114            break
3115        else:
3116          next_revision_index = int((max_revision - min_revision) / 2) +\
3117                                min_revision
3118
3119        next_revision_id = revision_list[next_revision_index]
3120        next_revision_data = revision_data[next_revision_id]
3121        next_revision_depot = next_revision_data['depot']
3122
3123        self.ChangeToDepotWorkingDirectory(next_revision_depot)
3124
3125        if self.opts.output_buildbot_annotations:
3126          step_name = 'Working on [%s]' % next_revision_id
3127          bisect_utils.OutputAnnotationStepStart(step_name)
3128
3129        print 'Working on revision: [%s]' % next_revision_id
3130
3131        run_results = self.SyncBuildAndRunRevision(next_revision_id,
3132                                                   next_revision_depot,
3133                                                   command_to_run,
3134                                                   metric, skippable=True)
3135
3136        # If the build is successful, check whether or not the metric
3137        # had regressed.
3138        if not run_results[1]:
3139          if len(run_results) > 2:
3140            next_revision_data['external'] = run_results[2]
3141            next_revision_data['perf_time'] = run_results[3]
3142            next_revision_data['build_time'] = run_results[4]
3143
3144          passed_regression = self._CheckIfRunPassed(run_results[0],
3145                                                     known_good_value,
3146                                                     known_bad_value)
3147
3148          next_revision_data['passed'] = passed_regression
3149          next_revision_data['value'] = run_results[0]
3150
3151          if passed_regression:
3152            max_revision = next_revision_index
3153          else:
3154            min_revision = next_revision_index
3155        else:
3156          if run_results[1] == BUILD_RESULT_SKIPPED:
3157            next_revision_data['passed'] = 'Skipped'
3158          elif run_results[1] == BUILD_RESULT_FAIL:
3159            next_revision_data['passed'] = 'Build Failed'
3160
3161          print run_results[0]
3162
3163          # If the build is broken, remove it and redo search.
3164          revision_list.pop(next_revision_index)
3165
3166          max_revision -= 1
3167
3168        if self.opts.output_buildbot_annotations:
3169          self._PrintPartialResults(results)
3170          bisect_utils.OutputAnnotationStepClosed()
3171    else:
3172      # Weren't able to sync and retrieve the revision range.
3173      results['error'] = 'An error occurred attempting to retrieve revision '\
3174                         'range: [%s..%s]' % (good_revision, bad_revision)
3175
3176    return results
3177
3178  def _PrintPartialResults(self, results_dict):
3179    revision_data = results_dict['revision_data']
3180    revision_data_sorted = sorted(revision_data.iteritems(),
3181                                  key = lambda x: x[1]['sort'])
3182    results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3183
3184    self._PrintTestedCommitsTable(revision_data_sorted,
3185                                  results_dict['first_working_revision'],
3186                                  results_dict['last_broken_revision'],
3187                                  100, final_step=False)
3188
3189  def _PrintConfidence(self, results_dict):
3190    # The perf dashboard specifically looks for the string
3191    # "Confidence in Bisection Results: 100%" to decide whether or not
3192    # to cc the author(s). If you change this, please update the perf
3193    # dashboard as well.
3194    print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
3195
3196  def _PrintBanner(self, results_dict):
3197    print
3198    print " __o_\___          Aw Snap! We hit a speed bump!"
3199    print "=-O----O-'__.~.___________________________________"
3200    print
3201    if self._IsBisectModeReturnCode():
3202      print ('Bisect reproduced a change in return codes while running the '
3203          'performance test.')
3204    else:
3205      print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3206          '%s metric.' % (results_dict['regression_size'],
3207          results_dict['regression_std_err'], '/'.join(self.opts.metric)))
3208    self._PrintConfidence(results_dict)
3209
3210  def _PrintFailedBanner(self, results_dict):
3211    print
3212    if self._IsBisectModeReturnCode():
3213      print 'Bisect could not reproduce a change in the return code.'
3214    else:
3215      print ('Bisect could not reproduce a change in the '
3216          '%s metric.' % '/'.join(self.opts.metric))
3217    print
3218
3219  def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
3220    info = self.source_control.QueryRevisionInfo(cl,
3221        self._GetDepotDirectory(depot))
3222    if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
3223      try:
3224        # Format is "git-svn-id: svn://....@123456 <other data>"
3225        svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
3226        svn_revision = svn_line[0].split('@')
3227        svn_revision = svn_revision[1].split(' ')[0]
3228        return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
3229      except IndexError:
3230        return ''
3231    return ''
3232
3233  def _PrintRevisionInfo(self, cl, info, depot=None):
3234    # The perf dashboard specifically looks for the string
3235    # "Author  : " to parse out who to cc on a bug. If you change the
3236    # formatting here, please update the perf dashboard as well.
3237    print
3238    print 'Subject : %s' % info['subject']
3239    print 'Author  : %s' % info['author']
3240    if not info['email'].startswith(info['author']):
3241      print 'Email   : %s' % info['email']
3242    commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
3243    if commit_link:
3244      print 'Link    : %s' % commit_link
3245    else:
3246      print
3247      print 'Failed to parse svn revision from body:'
3248      print
3249      print info['body']
3250      print
3251    print 'Commit  : %s' % cl
3252    print 'Date    : %s' % info['date']
3253
3254  def _PrintTableRow(self, column_widths, row_data):
3255    assert len(column_widths) == len(row_data)
3256
3257    text = ''
3258    for i in xrange(len(column_widths)):
3259      current_row_data = row_data[i].center(column_widths[i], ' ')
3260      text += ('%%%ds' % column_widths[i]) % current_row_data
3261    print text
3262
3263  def _PrintTestedCommitsHeader(self):
3264    if self.opts.bisect_mode == BISECT_MODE_MEAN:
3265      self._PrintTableRow(
3266          [20, 70, 14, 12, 13],
3267          ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3268    elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3269      self._PrintTableRow(
3270          [20, 70, 14, 12, 13],
3271          ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3272    elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3273      self._PrintTableRow(
3274          [20, 70, 14, 13],
3275          ['Depot', 'Commit SHA', 'Return Code', 'State'])
3276    else:
3277      assert False, "Invalid bisect_mode specified."
3278      print '  %20s  %70s  %14s %13s' % ('Depot'.center(20, ' '),
3279          'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3280           'State'.center(13, ' '))
3281
3282  def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
3283    if self.opts.bisect_mode == BISECT_MODE_MEAN:
3284      std_error = '+-%.02f' % current_data['value']['std_err']
3285      mean = '%.02f' % current_data['value']['mean']
3286      self._PrintTableRow(
3287          [20, 70, 12, 14, 13],
3288          [current_data['depot'], cl_link, mean, std_error, state_str])
3289    elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3290      std_error = '+-%.02f' % current_data['value']['std_err']
3291      mean = '%.02f' % current_data['value']['mean']
3292      self._PrintTableRow(
3293          [20, 70, 12, 14, 13],
3294          [current_data['depot'], cl_link, std_error, mean, state_str])
3295    elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3296      mean = '%d' % current_data['value']['mean']
3297      self._PrintTableRow(
3298          [20, 70, 14, 13],
3299          [current_data['depot'], cl_link, mean, state_str])
3300
3301  def _PrintTestedCommitsTable(self, revision_data_sorted,
3302      first_working_revision, last_broken_revision, confidence,
3303      final_step=True):
3304    print
3305    if final_step:
3306      print 'Tested commits:'
3307    else:
3308      print 'Partial results:'
3309    self._PrintTestedCommitsHeader()
3310    state = 0
3311    for current_id, current_data in revision_data_sorted:
3312      if current_data['value']:
3313        if (current_id == last_broken_revision or
3314            current_id == first_working_revision):
3315          # If confidence is too low, don't add this empty line since it's
3316          # used to put focus on a suspected CL.
3317          if confidence and final_step:
3318            print
3319          state += 1
3320          if state == 2 and not final_step:
3321            # Just want a separation between "bad" and "good" cl's.
3322            print
3323
3324        state_str = 'Bad'
3325        if state == 1 and final_step:
3326          state_str = 'Suspected CL'
3327        elif state == 2:
3328          state_str = 'Good'
3329
3330        # If confidence is too low, don't bother outputting good/bad.
3331        if not confidence:
3332          state_str = ''
3333        state_str = state_str.center(13, ' ')
3334
3335        cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3336            current_data['depot'])
3337        if not cl_link:
3338          cl_link = current_id
3339        self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3340
3341  def _PrintReproSteps(self):
3342    print
3343    print 'To reproduce locally:'
3344    print '$ ' + self.opts.command
3345    if bisect_utils.IsTelemetryCommand(self.opts.command):
3346      print
3347      print 'Also consider passing --profiler=list to see available profilers.'
3348
3349  def _PrintOtherRegressions(self, other_regressions, revision_data):
3350    print
3351    print 'Other regressions may have occurred:'
3352    print '  %8s  %70s  %10s' % ('Depot'.center(8, ' '),
3353        'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3354    for regression in other_regressions:
3355      current_id, previous_id, confidence = regression
3356      current_data = revision_data[current_id]
3357      previous_data = revision_data[previous_id]
3358
3359      current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3360          current_data['depot'])
3361      previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3362          previous_data['depot'])
3363
3364      # If we can't map it to a viewable URL, at least show the original hash.
3365      if not current_link:
3366        current_link = current_id
3367      if not previous_link:
3368        previous_link = previous_id
3369
3370      print '  %8s  %70s %s' % (
3371          current_data['depot'], current_link,
3372          ('%d%%' % confidence).center(10, ' '))
3373      print '  %8s  %70s' % (
3374          previous_data['depot'], previous_link)
3375      print
3376
3377  def _PrintStepTime(self, revision_data_sorted):
3378    step_perf_time_avg = 0.0
3379    step_build_time_avg = 0.0
3380    step_count = 0.0
3381    for _, current_data in revision_data_sorted:
3382      if current_data['value']:
3383        step_perf_time_avg += current_data['perf_time']
3384        step_build_time_avg += current_data['build_time']
3385        step_count += 1
3386    if step_count:
3387      step_perf_time_avg = step_perf_time_avg / step_count
3388      step_build_time_avg = step_build_time_avg / step_count
3389    print
3390    print 'Average build time : %s' % datetime.timedelta(
3391        seconds=int(step_build_time_avg))
3392    print 'Average test time  : %s' % datetime.timedelta(
3393        seconds=int(step_perf_time_avg))
3394
3395  def _PrintWarnings(self):
3396    if not self.warnings:
3397      return
3398    print
3399    print 'WARNINGS:'
3400    for w in set(self.warnings):
3401      print '  !!! %s' % w
3402
3403  def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
3404    other_regressions = []
3405    previous_values = []
3406    previous_id = None
3407    for current_id, current_data in revision_data_sorted:
3408      current_values = current_data['value']
3409      if current_values:
3410        current_values = current_values['values']
3411        if previous_values:
3412          confidence = CalculateConfidence(previous_values, [current_values])
3413          mean_of_prev_runs = CalculateMean(sum(previous_values, []))
3414          mean_of_current_runs = CalculateMean(current_values)
3415
3416          # Check that the potential regression is in the same direction as
3417          # the overall regression. If the mean of the previous runs < the
3418          # mean of the current runs, this local regression is in same
3419          # direction.
3420          prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
3421          is_same_direction = (prev_less_than_current if
3422              bad_greater_than_good else not prev_less_than_current)
3423
3424          # Only report potential regressions with high confidence.
3425          if is_same_direction and confidence > 50:
3426            other_regressions.append([current_id, previous_id, confidence])
3427        previous_values.append(current_values)
3428        previous_id = current_id
3429    return other_regressions
3430
3431
3432  def _GetResultsDict(self, revision_data, revision_data_sorted):
3433    # Find range where it possibly broke.
3434    first_working_revision = None
3435    first_working_revision_index = -1
3436    last_broken_revision = None
3437    last_broken_revision_index = -1
3438
3439    for i in xrange(len(revision_data_sorted)):
3440      k, v = revision_data_sorted[i]
3441      if v['passed'] == 1:
3442        if not first_working_revision:
3443          first_working_revision = k
3444          first_working_revision_index = i
3445
3446      if not v['passed']:
3447        last_broken_revision = k
3448        last_broken_revision_index = i
3449
3450    if last_broken_revision != None and first_working_revision != None:
3451      broken_means = []
3452      for i in xrange(0, last_broken_revision_index + 1):
3453        if revision_data_sorted[i][1]['value']:
3454          broken_means.append(revision_data_sorted[i][1]['value']['values'])
3455
3456      working_means = []
3457      for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3458        if revision_data_sorted[i][1]['value']:
3459          working_means.append(revision_data_sorted[i][1]['value']['values'])
3460
3461      # Flatten the lists to calculate mean of all values.
3462      working_mean = sum(working_means, [])
3463      broken_mean = sum(broken_means, [])
3464
3465      # Calculate the approximate size of the regression
3466      mean_of_bad_runs = CalculateMean(broken_mean)
3467      mean_of_good_runs = CalculateMean(working_mean)
3468
3469      regression_size = 100 * CalculateRelativeChange(mean_of_good_runs,
3470                                                      mean_of_bad_runs)
3471      if math.isnan(regression_size):
3472        regression_size = 'zero-to-nonzero'
3473
3474      regression_std_err = math.fabs(CalculatePooledStandardError(
3475          [working_mean, broken_mean]) /
3476          max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3477
3478      # Give a "confidence" in the bisect. At the moment we use how distinct the
3479      # values are before and after the last broken revision, and how noisy the
3480      # overall graph is.
3481      confidence = CalculateConfidence(working_means, broken_means)
3482
3483      culprit_revisions = []
3484
3485      cwd = os.getcwd()
3486      self.ChangeToDepotWorkingDirectory(
3487          revision_data[last_broken_revision]['depot'])
3488
3489      if revision_data[last_broken_revision]['depot'] == 'cros':
3490        # Want to get a list of all the commits and what depots they belong
3491        # to so that we can grab info about each.
3492        cmd = ['repo', 'forall', '-c',
3493            'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3494            last_broken_revision, first_working_revision + 1)]
3495        (output, return_code) = RunProcessAndRetrieveOutput(cmd)
3496
3497        changes = []
3498        assert not return_code, 'An error occurred while running'\
3499                                ' "%s"' % ' '.join(cmd)
3500        last_depot = None
3501        cwd = os.getcwd()
3502        for l in output.split('\n'):
3503          if l:
3504            # Output will be in form:
3505            # /path_to_depot
3506            # /path_to_other_depot
3507            # <SHA1>
3508            # /path_again
3509            # <SHA1>
3510            # etc.
3511            if l[0] == '/':
3512              last_depot = l
3513            else:
3514              contents = l.split(' ')
3515              if len(contents) > 1:
3516                changes.append([last_depot, contents[0]])
3517        for c in changes:
3518          os.chdir(c[0])
3519          info = self.source_control.QueryRevisionInfo(c[1])
3520          culprit_revisions.append((c[1], info, None))
3521      else:
3522        for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3523          k, v = revision_data_sorted[i]
3524          if k == first_working_revision:
3525            break
3526          self.ChangeToDepotWorkingDirectory(v['depot'])
3527          info = self.source_control.QueryRevisionInfo(k)
3528          culprit_revisions.append((k, info, v['depot']))
3529      os.chdir(cwd)
3530
3531      # Check for any other possible regression ranges
3532      other_regressions = self._FindOtherRegressions(revision_data_sorted,
3533          mean_of_bad_runs > mean_of_good_runs)
3534
3535    return {
3536        'first_working_revision': first_working_revision,
3537        'last_broken_revision': last_broken_revision,
3538        'culprit_revisions': culprit_revisions,
3539        'other_regressions': other_regressions,
3540        'regression_size': regression_size,
3541        'regression_std_err': regression_std_err,
3542        'confidence': confidence,
3543        }
3544
3545  def _CheckForWarnings(self, results_dict):
3546    if len(results_dict['culprit_revisions']) > 1:
3547      self.warnings.append('Due to build errors, regression range could '
3548                           'not be narrowed down to a single commit.')
3549    if self.opts.repeat_test_count == 1:
3550      self.warnings.append('Tests were only set to run once. This may '
3551                           'be insufficient to get meaningful results.')
3552    if results_dict['confidence'] < 100:
3553      if results_dict['confidence']:
3554        self.warnings.append(
3555            'Confidence is less than 100%. There could be other candidates '
3556            'for this regression. Try bisecting again with increased '
3557            'repeat_count or on a sub-metric that shows the regression more '
3558            'clearly.')
3559      else:
3560        self.warnings.append(
3561          'Confidence is 0%. Try bisecting again on another platform, with '
3562          'increased repeat_count or on a sub-metric that shows the '
3563          'regression more clearly.')
3564
3565  def FormatAndPrintResults(self, bisect_results):
3566    """Prints the results from a bisection run in a readable format.
3567
3568    Args
3569      bisect_results: The results from a bisection test run.
3570    """
3571    revision_data = bisect_results['revision_data']
3572    revision_data_sorted = sorted(revision_data.iteritems(),
3573                                  key = lambda x: x[1]['sort'])
3574    results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3575
3576    self._CheckForWarnings(results_dict)
3577
3578    if self.opts.output_buildbot_annotations:
3579      bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3580
3581    print
3582    print 'Full results of bisection:'
3583    for current_id, current_data  in revision_data_sorted:
3584      build_status = current_data['passed']
3585
3586      if type(build_status) is bool:
3587        if build_status:
3588          build_status = 'Good'
3589        else:
3590          build_status = 'Bad'
3591
3592      print '  %20s  %40s  %s' % (current_data['depot'],
3593                                  current_id, build_status)
3594    print
3595
3596    if self.opts.output_buildbot_annotations:
3597      bisect_utils.OutputAnnotationStepClosed()
3598      # The perf dashboard scrapes the "results" step in order to comment on
3599      # bugs. If you change this, please update the perf dashboard as well.
3600      bisect_utils.OutputAnnotationStepStart('Results')
3601
3602    if results_dict['culprit_revisions'] and results_dict['confidence']:
3603      self._PrintBanner(results_dict)
3604      for culprit in results_dict['culprit_revisions']:
3605        cl, info, depot = culprit
3606        self._PrintRevisionInfo(cl, info, depot)
3607      self._PrintReproSteps()
3608      if results_dict['other_regressions']:
3609        self._PrintOtherRegressions(results_dict['other_regressions'],
3610                                    revision_data)
3611    else:
3612      self._PrintFailedBanner(results_dict)
3613      self._PrintReproSteps()
3614
3615    self._PrintTestedCommitsTable(revision_data_sorted,
3616                                  results_dict['first_working_revision'],
3617                                  results_dict['last_broken_revision'],
3618                                  results_dict['confidence'])
3619    self._PrintStepTime(revision_data_sorted)
3620    self._PrintWarnings()
3621
3622    if self.opts.output_buildbot_annotations:
3623      bisect_utils.OutputAnnotationStepClosed()
3624
3625
3626def DetermineAndCreateSourceControl(opts):
3627  """Attempts to determine the underlying source control workflow and returns
3628  a SourceControl object.
3629
3630  Returns:
3631    An instance of a SourceControl object, or None if the current workflow
3632    is unsupported.
3633  """
3634
3635  (output, _) = RunGit(['rev-parse', '--is-inside-work-tree'])
3636
3637  if output.strip() == 'true':
3638    return GitSourceControl(opts)
3639
3640  return None
3641
3642
3643def IsPlatformSupported(opts):
3644  """Checks that this platform and build system are supported.
3645
3646  Args:
3647    opts: The options parsed from the command line.
3648
3649  Returns:
3650    True if the platform and build system are supported.
3651  """
3652  # Haven't tested the script out on any other platforms yet.
3653  supported = ['posix', 'nt']
3654  return os.name in supported
3655
3656
3657def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3658  """Removes the directory tree specified, and then creates an empty
3659  directory in the same location (if not specified to skip).
3660
3661  Args:
3662    path_to_dir: Path to the directory tree.
3663    skip_makedir: Whether to skip creating empty directory, default is False.
3664
3665  Returns:
3666    True if successful, False if an error occurred.
3667  """
3668  try:
3669    if os.path.exists(path_to_dir):
3670      shutil.rmtree(path_to_dir)
3671  except OSError, e:
3672    if e.errno != errno.ENOENT:
3673      return False
3674
3675  if not skip_makedir:
3676    return MaybeMakeDirectory(path_to_dir)
3677
3678  return True
3679
3680
3681def RemoveBuildFiles(build_type):
3682  """Removes build files from previous runs."""
3683  if RmTreeAndMkDir(os.path.join('out', build_type)):
3684    if RmTreeAndMkDir(os.path.join('build', build_type)):
3685      return True
3686  return False
3687
3688
3689class BisectOptions(object):
3690  """Options to be used when running bisection."""
3691  def __init__(self):
3692    super(BisectOptions, self).__init__()
3693
3694    self.target_platform = 'chromium'
3695    self.build_preference = None
3696    self.good_revision = None
3697    self.bad_revision = None
3698    self.use_goma = None
3699    self.cros_board = None
3700    self.cros_remote_ip = None
3701    self.repeat_test_count = 20
3702    self.truncate_percent = 25
3703    self.max_time_minutes = 20
3704    self.metric = None
3705    self.command = None
3706    self.output_buildbot_annotations = None
3707    self.no_custom_deps = False
3708    self.working_directory = None
3709    self.extra_src = None
3710    self.debug_ignore_build = None
3711    self.debug_ignore_sync = None
3712    self.debug_ignore_perf_test = None
3713    self.gs_bucket = None
3714    self.target_arch = 'ia32'
3715    self.target_build_type = 'Release'
3716    self.builder_host = None
3717    self.builder_port = None
3718    self.bisect_mode = BISECT_MODE_MEAN
3719
3720  def _CreateCommandLineParser(self):
3721    """Creates a parser with bisect options.
3722
3723    Returns:
3724      An instance of optparse.OptionParser.
3725    """
3726    usage = ('%prog [options] [-- chromium-options]\n'
3727             'Perform binary search on revision history to find a minimal '
3728             'range of revisions where a peformance metric regressed.\n')
3729
3730    parser = optparse.OptionParser(usage=usage)
3731
3732    group = optparse.OptionGroup(parser, 'Bisect options')
3733    group.add_option('-c', '--command',
3734                     type='str',
3735                     help='A command to execute your performance test at' +
3736                     ' each point in the bisection.')
3737    group.add_option('-b', '--bad_revision',
3738                     type='str',
3739                     help='A bad revision to start bisection. ' +
3740                     'Must be later than good revision. May be either a git' +
3741                     ' or svn revision.')
3742    group.add_option('-g', '--good_revision',
3743                     type='str',
3744                     help='A revision to start bisection where performance' +
3745                     ' test is known to pass. Must be earlier than the ' +
3746                     'bad revision. May be either a git or svn revision.')
3747    group.add_option('-m', '--metric',
3748                     type='str',
3749                     help='The desired metric to bisect on. For example ' +
3750                     '"vm_rss_final_b/vm_rss_f_b"')
3751    group.add_option('-r', '--repeat_test_count',
3752                     type='int',
3753                     default=20,
3754                     help='The number of times to repeat the performance '
3755                     'test. Values will be clamped to range [1, 100]. '
3756                     'Default value is 20.')
3757    group.add_option('--max_time_minutes',
3758                     type='int',
3759                     default=20,
3760                     help='The maximum time (in minutes) to take running the '
3761                     'performance tests. The script will run the performance '
3762                     'tests according to --repeat_test_count, so long as it '
3763                     'doesn\'t exceed --max_time_minutes. Values will be '
3764                     'clamped to range [1, 60].'
3765                     'Default value is 20.')
3766    group.add_option('-t', '--truncate_percent',
3767                     type='int',
3768                     default=25,
3769                     help='The highest/lowest % are discarded to form a '
3770                     'truncated mean. Values will be clamped to range [0, '
3771                     '25]. Default value is 25 (highest/lowest 25% will be '
3772                     'discarded).')
3773    group.add_option('--bisect_mode',
3774                     type='choice',
3775                     choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3776                        BISECT_MODE_RETURN_CODE],
3777                     default=BISECT_MODE_MEAN,
3778                     help='The bisect mode. Choices are to bisect on the '
3779                     'difference in mean, std_dev, or return_code.')
3780    parser.add_option_group(group)
3781
3782    group = optparse.OptionGroup(parser, 'Build options')
3783    group.add_option('-w', '--working_directory',
3784                     type='str',
3785                     help='Path to the working directory where the script '
3786                     'will do an initial checkout of the chromium depot. The '
3787                     'files will be placed in a subdirectory "bisect" under '
3788                     'working_directory and that will be used to perform the '
3789                     'bisection. This parameter is optional, if it is not '
3790                     'supplied, the script will work from the current depot.')
3791    group.add_option('--build_preference',
3792                     type='choice',
3793                     choices=['msvs', 'ninja', 'make'],
3794                     help='The preferred build system to use. On linux/mac '
3795                     'the options are make/ninja. On Windows, the options '
3796                     'are msvs/ninja.')
3797    group.add_option('--target_platform',
3798                     type='choice',
3799                     choices=['chromium', 'cros', 'android', 'android-chrome'],
3800                     default='chromium',
3801                     help='The target platform. Choices are "chromium" '
3802                     '(current platform), "cros", or "android". If you '
3803                     'specify something other than "chromium", you must be '
3804                     'properly set up to build that platform.')
3805    group.add_option('--no_custom_deps',
3806                     dest='no_custom_deps',
3807                     action="store_true",
3808                     default=False,
3809                     help='Run the script with custom_deps or not.')
3810    group.add_option('--extra_src',
3811                     type='str',
3812                     help='Path to a script which can be used to modify '
3813                     'the bisect script\'s behavior.')
3814    group.add_option('--cros_board',
3815                     type='str',
3816                     help='The cros board type to build.')
3817    group.add_option('--cros_remote_ip',
3818                     type='str',
3819                     help='The remote machine to image to.')
3820    group.add_option('--use_goma',
3821                     action="store_true",
3822                     help='Add a bunch of extra threads for goma, and enable '
3823                     'goma')
3824    group.add_option('--output_buildbot_annotations',
3825                     action="store_true",
3826                     help='Add extra annotation output for buildbot.')
3827    group.add_option('--gs_bucket',
3828                     default='',
3829                     dest='gs_bucket',
3830                     type='str',
3831                     help=('Name of Google Storage bucket to upload or '
3832                     'download build. e.g., chrome-perf'))
3833    group.add_option('--target_arch',
3834                     type='choice',
3835                     choices=['ia32', 'x64', 'arm'],
3836                     default='ia32',
3837                     dest='target_arch',
3838                     help=('The target build architecture. Choices are "ia32" '
3839                     '(default), "x64" or "arm".'))
3840    group.add_option('--target_build_type',
3841                     type='choice',
3842                     choices=['Release', 'Debug'],
3843                     default='Release',
3844                     help='The target build type. Choices are "Release" '
3845                     '(default), or "Debug".')
3846    group.add_option('--builder_host',
3847                     dest='builder_host',
3848                     type='str',
3849                     help=('Host address of server to produce build by posting'
3850                           ' try job request.'))
3851    group.add_option('--builder_port',
3852                     dest='builder_port',
3853                     type='int',
3854                     help=('HTTP port of the server to produce build by posting'
3855                           ' try job request.'))
3856    parser.add_option_group(group)
3857
3858    group = optparse.OptionGroup(parser, 'Debug options')
3859    group.add_option('--debug_ignore_build',
3860                     action="store_true",
3861                     help='DEBUG: Don\'t perform builds.')
3862    group.add_option('--debug_ignore_sync',
3863                     action="store_true",
3864                     help='DEBUG: Don\'t perform syncs.')
3865    group.add_option('--debug_ignore_perf_test',
3866                     action="store_true",
3867                     help='DEBUG: Don\'t perform performance tests.')
3868    parser.add_option_group(group)
3869    return parser
3870
3871  def ParseCommandLine(self):
3872    """Parses the command line for bisect options."""
3873    parser = self._CreateCommandLineParser()
3874    (opts, _) = parser.parse_args()
3875
3876    try:
3877      if not opts.command:
3878        raise RuntimeError('missing required parameter: --command')
3879
3880      if not opts.good_revision:
3881        raise RuntimeError('missing required parameter: --good_revision')
3882
3883      if not opts.bad_revision:
3884        raise RuntimeError('missing required parameter: --bad_revision')
3885
3886      if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3887        raise RuntimeError('missing required parameter: --metric')
3888
3889      if opts.gs_bucket:
3890        if not cloud_storage.List(opts.gs_bucket):
3891          raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3892        if not opts.builder_host:
3893          raise RuntimeError('Must specify try server hostname, when '
3894                             'gs_bucket is used: --builder_host')
3895        if not opts.builder_port:
3896          raise RuntimeError('Must specify try server port number, when '
3897                             'gs_bucket is used: --builder_port')
3898      if opts.target_platform == 'cros':
3899        # Run sudo up front to make sure credentials are cached for later.
3900        print 'Sudo is required to build cros:'
3901        print
3902        RunProcess(['sudo', 'true'])
3903
3904        if not opts.cros_board:
3905          raise RuntimeError('missing required parameter: --cros_board')
3906
3907        if not opts.cros_remote_ip:
3908          raise RuntimeError('missing required parameter: --cros_remote_ip')
3909
3910        if not opts.working_directory:
3911          raise RuntimeError('missing required parameter: --working_directory')
3912
3913      metric_values = opts.metric.split('/')
3914      if (len(metric_values) != 2 and
3915          opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3916        raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3917
3918      opts.metric = metric_values
3919      opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3920      opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3921      opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3922      opts.truncate_percent = opts.truncate_percent / 100.0
3923
3924      for k, v in opts.__dict__.iteritems():
3925        assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3926        setattr(self, k, v)
3927    except RuntimeError, e:
3928      output_string = StringIO.StringIO()
3929      parser.print_help(file=output_string)
3930      error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3931      output_string.close()
3932      raise RuntimeError(error_message)
3933
3934  @staticmethod
3935  def FromDict(values):
3936    """Creates an instance of BisectOptions with the values parsed from a
3937    .cfg file.
3938
3939    Args:
3940      values: a dict containing options to set.
3941
3942    Returns:
3943      An instance of BisectOptions.
3944    """
3945    opts = BisectOptions()
3946    for k, v in values.iteritems():
3947      assert hasattr(opts, k), 'Invalid %s attribute in '\
3948          'BisectOptions.' % k
3949      setattr(opts, k, v)
3950
3951    metric_values = opts.metric.split('/')
3952    if len(metric_values) != 2:
3953      raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3954
3955    opts.metric = metric_values
3956    opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3957    opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3958    opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3959    opts.truncate_percent = opts.truncate_percent / 100.0
3960
3961    return opts
3962
3963
3964def main():
3965
3966  try:
3967    opts = BisectOptions()
3968    opts.ParseCommandLine()
3969
3970    if opts.extra_src:
3971      extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3972      if not extra_src:
3973        raise RuntimeError("Invalid or missing --extra_src.")
3974      _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3975
3976    if opts.working_directory:
3977      custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3978      if opts.no_custom_deps:
3979        custom_deps = None
3980      bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3981
3982      os.chdir(os.path.join(os.getcwd(), 'src'))
3983
3984      if not RemoveBuildFiles(opts.target_build_type):
3985        raise RuntimeError('Something went wrong removing the build files.')
3986
3987    if not IsPlatformSupported(opts):
3988      raise RuntimeError("Sorry, this platform isn't supported yet.")
3989
3990    # Check what source control method they're using. Only support git workflow
3991    # at the moment.
3992    source_control = DetermineAndCreateSourceControl(opts)
3993
3994    if not source_control:
3995      raise RuntimeError("Sorry, only the git workflow is supported at the "
3996          "moment.")
3997
3998    # gClient sync seems to fail if you're not in master branch.
3999    if (not source_control.IsInProperBranch() and
4000        not opts.debug_ignore_sync and
4001        not opts.working_directory):
4002      raise RuntimeError("You must switch to master branch to run bisection.")
4003    bisect_test = BisectPerformanceMetrics(source_control, opts)
4004    try:
4005      bisect_results = bisect_test.Run(opts.command,
4006                                       opts.bad_revision,
4007                                       opts.good_revision,
4008                                       opts.metric)
4009      if bisect_results['error']:
4010        raise RuntimeError(bisect_results['error'])
4011      bisect_test.FormatAndPrintResults(bisect_results)
4012      return 0
4013    finally:
4014      bisect_test.PerformCleanup()
4015  except RuntimeError, e:
4016    if opts.output_buildbot_annotations:
4017      # The perf dashboard scrapes the "results" step in order to comment on
4018      # bugs. If you change this, please update the perf dashboard as well.
4019      bisect_utils.OutputAnnotationStepStart('Results')
4020    print 'Error: %s' % e.message
4021    if opts.output_buildbot_annotations:
4022      bisect_utils.OutputAnnotationStepClosed()
4023  return 1
4024
4025if __name__ == '__main__':
4026  sys.exit(main())
4027