1# Copyright 2012 The Chromium Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import argparse 6import logging 7import sys 8 9from telemetry import benchmark 10from telemetry import story 11from telemetry.core import discover 12from telemetry.internal.browser import browser_options 13from telemetry.internal.results import results_options 14from telemetry.internal import story_runner 15from telemetry.internal.util import binary_manager 16from telemetry.page import legacy_page_test 17from telemetry.util import matching 18from telemetry.util import wpr_modes 19from telemetry.web_perf import timeline_based_measurement 20from telemetry.web_perf import timeline_based_page_test 21 22import py_utils 23 24DEFAULT_LOG_FORMAT = ( 25 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' 26 '%(message)s') 27 28 29class RecorderPageTest(legacy_page_test.LegacyPageTest): 30 def __init__(self): 31 super(RecorderPageTest, self).__init__() 32 self.page_test = None 33 self.platform = None 34 35 def CustomizeBrowserOptions(self, options): 36 if self.page_test: 37 self.page_test.CustomizeBrowserOptions(options) 38 39 def WillStartBrowser(self, browser): 40 if self.platform is not None: 41 assert browser.GetOSName() == self.platform 42 self.platform = browser.GetOSName() 43 if self.page_test: 44 self.page_test.WillStartBrowser(browser) 45 46 def DidStartBrowser(self, browser): 47 if self.page_test: 48 self.page_test.DidStartBrowser(browser) 49 50 def WillNavigateToPage(self, page, tab): 51 """Override to ensure all resources are fetched from network.""" 52 tab.ClearCache(force=False) 53 if self.page_test: 54 self.page_test.WillNavigateToPage(page, tab) 55 56 def DidNavigateToPage(self, page, tab): 57 if self.page_test: 58 self.page_test.DidNavigateToPage(page, tab) 59 tab.WaitForDocumentReadyStateToBeComplete() 60 py_utils.WaitFor(tab.HasReachedQuiescence, 30) 61 62 def CleanUpAfterPage(self, page, tab): 63 if self.page_test: 64 self.page_test.CleanUpAfterPage(page, tab) 65 66 def ValidateAndMeasurePage(self, page, tab, results): 67 if self.page_test: 68 self.page_test.ValidateAndMeasurePage(page, tab, results) 69 70 def RunNavigateSteps(self, page, tab): 71 if self.page_test: 72 self.page_test.RunNavigateSteps(page, tab) 73 else: 74 super(RecorderPageTest, self).RunNavigateSteps(page, tab) 75 76 77def _GetSubclasses(base_dir, cls): 78 """Returns all subclasses of |cls| in |base_dir|. 79 80 Args: 81 cls: a class 82 83 Returns: 84 dict of {underscored_class_name: benchmark class} 85 """ 86 return discover.DiscoverClasses(base_dir, base_dir, cls, 87 index_by_class_name=True) 88 89 90def _MaybeGetInstanceOfClass(target, base_dir, cls): 91 if isinstance(target, cls): 92 return target 93 classes = _GetSubclasses(base_dir, cls) 94 return classes[target]() if target in classes else None 95 96 97def _PrintAllImpl(all_items, item_name, output_stream): 98 output_stream.write('Available %s\' names with descriptions:\n' % item_name) 99 keys = sorted(all_items.keys()) 100 key_description = [(k, all_items[k].Description()) for k in keys] 101 _PrintPairs(key_description, output_stream) 102 output_stream.write('\n') 103 104 105def _PrintAllBenchmarks(base_dir, output_stream): 106 # TODO: reuse the logic of finding supported benchmarks in benchmark_runner.py 107 # so this only prints out benchmarks that are supported by the recording 108 # platform. 109 _PrintAllImpl(_GetSubclasses(base_dir, benchmark.Benchmark), 'benchmarks', 110 output_stream) 111 112 113def _PrintAllStories(base_dir, output_stream): 114 # TODO: actually print all stories once record_wpr support general 115 # stories recording. 116 _PrintAllImpl(_GetSubclasses(base_dir, story.StorySet), 'story sets', 117 output_stream) 118 119 120def _PrintPairs(pairs, output_stream, prefix=''): 121 """Prints a list of string pairs with alignment.""" 122 first_column_length = max(len(a) for a, _ in pairs) 123 format_string = '%s%%-%ds %%s\n' % (prefix, first_column_length) 124 for a, b in pairs: 125 output_stream.write(format_string % (a, b.strip())) 126 127 128class WprRecorder(object): 129 130 def __init__(self, base_dir, target, args=None): 131 self._base_dir = base_dir 132 self._record_page_test = RecorderPageTest() 133 self._options = self._CreateOptions() 134 135 self._benchmark = _MaybeGetInstanceOfClass(target, base_dir, 136 benchmark.Benchmark) 137 self._parser = self._options.CreateParser(usage='See %prog --help') 138 self._AddCommandLineArgs() 139 self._ParseArgs(args) 140 self._ProcessCommandLineArgs() 141 if self._benchmark is not None: 142 test = self._benchmark.CreatePageTest(self.options) 143 if isinstance(test, timeline_based_measurement.TimelineBasedMeasurement): 144 test = timeline_based_page_test.TimelineBasedPageTest(test) 145 # This must be called after the command line args are added. 146 self._record_page_test.page_test = test 147 148 self._page_set_base_dir = ( 149 self._options.page_set_base_dir if self._options.page_set_base_dir 150 else self._base_dir) 151 self._story_set = self._GetStorySet(target) 152 153 @property 154 def options(self): 155 return self._options 156 157 def _CreateOptions(self): 158 options = browser_options.BrowserFinderOptions() 159 options.browser_options.wpr_mode = wpr_modes.WPR_RECORD 160 return options 161 162 def CreateResults(self): 163 if self._benchmark is not None: 164 benchmark_metadata = self._benchmark.GetMetadata() 165 else: 166 benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr') 167 168 return results_options.CreateResults(benchmark_metadata, self._options) 169 170 def _AddCommandLineArgs(self): 171 self._parser.add_option('--page-set-base-dir', action='store', 172 type='string') 173 story_runner.AddCommandLineArgs(self._parser) 174 if self._benchmark is not None: 175 self._benchmark.AddCommandLineArgs(self._parser) 176 self._benchmark.SetArgumentDefaults(self._parser) 177 self._parser.add_option('--upload', action='store_true') 178 self._SetArgumentDefaults() 179 180 def _SetArgumentDefaults(self): 181 self._parser.set_defaults(**{'output_formats': ['none']}) 182 183 def _ParseArgs(self, args=None): 184 args_to_parse = sys.argv[1:] if args is None else args 185 self._parser.parse_args(args_to_parse) 186 187 def _ProcessCommandLineArgs(self): 188 story_runner.ProcessCommandLineArgs(self._parser, self._options) 189 190 if self._options.use_live_sites: 191 self._parser.error("Can't --use-live-sites while recording") 192 193 if self._benchmark is not None: 194 self._benchmark.ProcessCommandLineArgs(self._parser, self._options) 195 196 def _GetStorySet(self, target): 197 if self._benchmark is not None: 198 return self._benchmark.CreateStorySet(self._options) 199 story_set = _MaybeGetInstanceOfClass(target, self._page_set_base_dir, 200 story.StorySet) 201 if story_set is None: 202 sys.stderr.write('Target %s is neither benchmark nor story set.\n' 203 % target) 204 if not self._HintMostLikelyBenchmarksStories(target): 205 sys.stderr.write( 206 'Found no similar benchmark or story. Please use ' 207 '--list-benchmarks or --list-stories to list candidates.\n') 208 self._parser.print_usage() 209 sys.exit(1) 210 return story_set 211 212 def _HintMostLikelyBenchmarksStories(self, target): 213 def _Impl(all_items, category_name): 214 candidates = matching.GetMostLikelyMatchedObject( 215 all_items.iteritems(), target, name_func=lambda kv: kv[1].Name()) 216 if candidates: 217 sys.stderr.write('\nDo you mean any of those %s below?\n' % 218 category_name) 219 _PrintPairs([(k, v.Description()) for k, v in candidates], sys.stderr) 220 return True 221 return False 222 223 has_benchmark_hint = _Impl( 224 _GetSubclasses(self._base_dir, benchmark.Benchmark), 'benchmarks') 225 has_story_hint = _Impl( 226 _GetSubclasses(self._base_dir, story.StorySet), 'stories') 227 return has_benchmark_hint or has_story_hint 228 229 def Record(self, results): 230 assert self._story_set.wpr_archive_info, ( 231 'Pageset archive_data_file path must be specified.') 232 self._story_set.wpr_archive_info.AddNewTemporaryRecording() 233 self._record_page_test.CustomizeBrowserOptions(self._options) 234 story_runner.Run(self._record_page_test, self._story_set, 235 self._options, results) 236 237 def HandleResults(self, results, upload_to_cloud_storage): 238 if results.failures or results.skipped_values: 239 logging.warning('Some pages failed and/or were skipped. The recording ' 240 'has not been updated for these pages.') 241 results.PrintSummary() 242 self._story_set.wpr_archive_info.AddRecordedStories( 243 results.pages_that_succeeded, 244 upload_to_cloud_storage, 245 target_platform=self._record_page_test.platform) 246 247 248def Main(environment, **log_config_kwargs): 249 # the log level is set in browser_options 250 log_config_kwargs.pop('level', None) 251 log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT) 252 logging.basicConfig(**log_config_kwargs) 253 254 parser = argparse.ArgumentParser( 255 usage='Record a benchmark or a story (page set).') 256 parser.add_argument( 257 'benchmark', 258 help=('benchmark name. This argument is optional. If both benchmark name ' 259 'and story name are specified, this takes precedence as the ' 260 'target of the recording.'), 261 nargs='?') 262 parser.add_argument('--story', help='story (page set) name') 263 parser.add_argument('--list-stories', dest='list_stories', 264 action='store_true', help='list all story names.') 265 parser.add_argument('--list-benchmarks', dest='list_benchmarks', 266 action='store_true', help='list all benchmark names.') 267 parser.add_argument('--upload', action='store_true', 268 help='upload to cloud storage.') 269 args, extra_args = parser.parse_known_args() 270 271 if args.list_benchmarks or args.list_stories: 272 if args.list_benchmarks: 273 _PrintAllBenchmarks(environment.top_level_dir, sys.stderr) 274 if args.list_stories: 275 _PrintAllStories(environment.top_level_dir, sys.stderr) 276 return 0 277 278 target = args.benchmark or args.story 279 280 if not target: 281 sys.stderr.write('Please specify target (benchmark or story). Please refer ' 282 'usage below\n\n') 283 parser.print_help() 284 return 0 285 286 binary_manager.InitDependencyManager(environment.client_configs) 287 288 # TODO(nednguyen): update WprRecorder so that it handles the difference 289 # between recording a benchmark vs recording a story better based on 290 # the distinction between args.benchmark & args.story 291 wpr_recorder = WprRecorder(environment.top_level_dir, target, extra_args) 292 results = wpr_recorder.CreateResults() 293 wpr_recorder.Record(results) 294 wpr_recorder.HandleResults(results, args.upload) 295 return min(255, len(results.failures)) 296