• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2023 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5from __future__ import annotations
6
7import abc
8import datetime as dt
9import json
10import logging
11from typing import (TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple,
12                    Type)
13
14from crossbench import helper
15from crossbench.benchmarks.base import (BenchmarkProbeMixin, PressBenchmark,
16                                        PressBenchmarkStoryFilter)
17from crossbench.parse import NumberParser
18from crossbench.probes.helper import Flatten
19from crossbench.probes.json import JsonResultProbe
20from crossbench.probes.metric import Metric, MetricsMerger
21from crossbench.probes.results import ProbeResult, ProbeResultDict
22from crossbench.stories.press_benchmark import PressBenchmarkStory
23
24if TYPE_CHECKING:
25  import argparse
26
27  from crossbench.path import LocalPath
28  from crossbench.runner.actions import Actions
29  from crossbench.runner.groups.browsers import BrowsersRunGroup
30  from crossbench.runner.groups.stories import StoriesRunGroup
31  from crossbench.runner.run import Run
32  from crossbench.types import Json
33
34
35def _probe_remove_tests_segments(path: Tuple[str, ...]) -> str:
36  return "/".join(segment for segment in path if segment != "tests")
37
38
39class SpeedometerProbe(
40    BenchmarkProbeMixin, JsonResultProbe, metaclass=abc.ABCMeta):
41  """
42  Speedometer-specific probe (compatible with v2.X and v3.X).
43  Extracts all speedometer times and scores.
44  """
45  JS: str = "return window.suiteValues;"
46  SORT_KEYS: bool = False
47
48  def to_json(self, actions: Actions) -> Json:
49    return actions.js(self.JS)
50
51  def flatten_json_data(self, json_data: Any) -> Json:
52    # json_data may contain multiple iterations, merge those first
53    assert isinstance(json_data, list), f"Expected list got {type(json_data)}"
54    merged = MetricsMerger(
55        json_data, key_fn=_probe_remove_tests_segments).to_json(
56            value_fn=lambda values: values.geomean, sort=self.SORT_KEYS)
57    return Flatten(merged, sort=self.SORT_KEYS).data
58
59  def merge_stories(self, group: StoriesRunGroup) -> ProbeResult:
60    merged = MetricsMerger.merge_json_list(
61        repetitions_group.results[self].json
62        for repetitions_group in group.repetitions_groups)
63    return self.write_group_result(group, merged)
64
65  def merge_browsers(self, group: BrowsersRunGroup) -> ProbeResult:
66    return self.merge_browsers_json_list(group).merge(
67        self.merge_browsers_csv_list(group))
68
69  def log_run_result(self, run: Run) -> None:
70    self._log_result(run.results, single_result=True)
71
72  def log_browsers_result(self, group: BrowsersRunGroup) -> None:
73    self._log_result(group.results, single_result=False)
74
75  def _log_result(self, result_dict: ProbeResultDict,
76                  single_result: bool) -> None:
77    if self not in result_dict:
78      return
79    results_json: LocalPath = result_dict[self].json
80    logging.info("-" * 80)
81    logging.critical("Speedometer results:")
82    if not single_result:
83      logging.critical("  %s", result_dict[self].csv)
84    logging.info("- " * 40)
85
86    with results_json.open(encoding="utf-8") as f:
87      data = json.load(f)
88      if single_result:
89        score = data.get("score") or data["Score"]
90        logging.critical("Score %s", score)
91      else:
92        self._log_result_metrics(data)
93
94  def _extract_result_metrics_table(self, metrics: Dict[str, Any],
95                                    table: Dict[str, List[str]]) -> None:
96    for metric_key, metric in metrics.items():
97      if not self._is_valid_metric_key(metric_key):
98        continue
99      table[metric_key].append(
100          Metric.format(metric["average"], metric["stddev"]))
101
102  @abc.abstractmethod
103  def _is_valid_metric_key(self, metric_key: str) -> bool:
104    pass
105
106
107
108class SpeedometerStory(PressBenchmarkStory, metaclass=abc.ABCMeta):
109  URL_LOCAL: str = "http://localhost:8000/"
110  DEFAULT_ITERATIONS: int = 10
111
112  def __init__(self,
113               substories: Sequence[str] = (),
114               iterations: Optional[int] = None,
115               url: Optional[str] = None):
116    self._iterations: int = iterations or self.DEFAULT_ITERATIONS
117    assert self.iterations >= 1, f"Invalid iterations count: '{iterations}'."
118    super().__init__(url=url, substories=substories)
119
120  @property
121  def iterations(self) -> int:
122    return self._iterations
123
124  @property
125  def substory_duration(self) -> dt.timedelta:
126    return self.iterations * self.single_substory_duration
127
128  @property
129  def single_substory_duration(self) -> dt.timedelta:
130    return dt.timedelta(seconds=0.4)
131
132  @property
133  def slow_duration(self) -> dt.timedelta:
134    """Max duration that covers run-times on slow machines and/or
135    debug-mode browsers.
136    Making this number too large might cause needless wait times on broken
137    browsers/benchmarks.
138    """
139    return dt.timedelta(seconds=60 * 20) + self.duration * 10
140
141  @property
142  def url_params(self) -> Dict[str, str]:
143    if self.iterations == self.DEFAULT_ITERATIONS:
144      return {}
145    return {"iterationCount": str(self.iterations)}
146
147  def setup(self, run: Run) -> None:
148    updated_url = self.get_run_url(run)
149    with run.actions("Setup") as actions:
150      actions.show_url(updated_url)
151      actions.wait_js_condition("return window.Suites !== undefined;", 0.5, 10)
152      self._setup_substories(actions)
153      self._setup_benchmark_client(actions)
154      actions.wait(0.5)
155
156  def get_run_url(self, run: Run) -> str:
157    url = super().get_run_url(run)
158    url = helper.update_url_query(url, self.url_params)
159    if url != self.url:
160      logging.info("CUSTOM URL: %s", url)
161    return url
162
163  def _setup_substories(self, actions: Actions) -> None:
164    if self._substories == self.SUBSTORIES:
165      return
166    actions.js(
167        """
168        let substories = arguments[0];
169        Suites.forEach((suite) => {
170          suite.disabled = substories.indexOf(suite.name) === -1;
171        });""",
172        arguments=[self._substories])
173
174  def _setup_benchmark_client(self, actions: Actions) -> None:
175    actions.js("""
176      window.testDone = false;
177      window.suiteValues = [];
178      const client = window.benchmarkClient;
179      const clientCopy = {
180        didRunSuites: client.didRunSuites,
181        didFinishLastIteration: client.didFinishLastIteration,
182      };
183      client.didRunSuites = function(measuredValues, ...arguments) {
184          clientCopy.didRunSuites.call(this, measuredValues, ...arguments);
185          window.suiteValues.push(measuredValues);
186      };
187      client.didFinishLastIteration = function(...arguments) {
188          clientCopy.didFinishLastIteration.call(this, ...arguments);
189          window.testDone = true;
190      };""")
191
192  def run(self, run: Run) -> None:
193    with run.actions("Running") as actions:
194      actions.js("""
195          if (window.startTest) {
196            window.startTest();
197          } else {
198            // Interactive Runner fallback / old 3.0 fallback.
199            let startButton = document.getElementById("runSuites") ||
200                document.querySelector("start-tests-button") ||
201                document.querySelector(".buttons button");
202            startButton.click();
203          }
204          """)
205      actions.wait(self.fast_duration)
206    with run.actions("Waiting for completion") as actions:
207      actions.wait_js_condition(
208          "return window.testDone",
209          0.5,
210          self.slow_duration,
211          delay=self.substory_duration)
212
213
214ProbeClsTupleT = Tuple[Type[SpeedometerProbe], ...]
215
216
217class SpeedometerBenchmarkStoryFilter(PressBenchmarkStoryFilter):
218  __doc__ = PressBenchmarkStoryFilter.__doc__
219
220  @classmethod
221  def add_cli_parser(
222      cls, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
223    parser = super().add_cli_parser(parser)
224    parser.add_argument(
225        "--iterations",
226        "--iteration-count",
227        default=SpeedometerStory.DEFAULT_ITERATIONS,
228        type=NumberParser.positive_int,
229        help="Number of iterations each Speedometer subtest is run "
230        "within the same session. \n"
231        "Note: --repetitions restarts the whole benchmark, --iterations runs "
232        "the same test tests n-times within the same session without the setup "
233        "overhead of starting up a whole new browser.")
234    return parser
235
236  @classmethod
237  def kwargs_from_cli(cls, args: argparse.Namespace) -> Dict[str, Any]:
238    kwargs = super().kwargs_from_cli(args)
239    kwargs["iterations"] = args.iterations
240    return kwargs
241
242  def __init__(self,
243               story_cls: Type[SpeedometerStory],
244               patterns: Sequence[str],
245               separate: bool = False,
246               url: Optional[str] = None,
247               iterations: Optional[int] = None):
248    self.iterations = iterations
249    assert issubclass(story_cls, SpeedometerStory)
250    super().__init__(story_cls, patterns, separate, url)
251
252  def create_stories_from_names(self, names: List[str],
253                                separate: bool) -> Sequence[SpeedometerStory]:
254    return self.story_cls.from_names(
255        names, separate=separate, url=self.url, iterations=self.iterations)
256
257
258class SpeedometerBenchmark(PressBenchmark, metaclass=abc.ABCMeta):
259
260  DEFAULT_STORY_CLS = SpeedometerStory
261  STORY_FILTER_CLS = SpeedometerBenchmarkStoryFilter
262
263  @classmethod
264  def short_base_name(cls) -> str:
265    return "sp"
266
267  @classmethod
268  def base_name(cls) -> str:
269    return "speedometer"
270