• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2022 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import argparse
6import datetime as dt
7import json
8from dataclasses import dataclass
9
10from crossbench.benchmarks.speedometer.speedometer_2_0 import (
11    Speedometer20Benchmark, Speedometer20Probe, Speedometer20Story)
12from crossbench.benchmarks.speedometer.speedometer_2_1 import (
13    Speedometer21Benchmark, Speedometer21Probe, Speedometer21Story)
14from crossbench.benchmarks.speedometer.speedometer_3_0 import (
15    MeasurementMethod, Speedometer30Benchmark, Speedometer30Probe,
16    Speedometer30Story)
17from crossbench.browsers.viewport import Viewport
18from tests import test_helper
19from tests.crossbench.benchmarks.speedometer_helper import (
20    Speedometer2BaseTestCase, SpeedometerBaseTestCase)
21
22
23class Speedometer20TestCase(Speedometer2BaseTestCase):
24
25  @property
26  def benchmark_cls(self):
27    return Speedometer20Benchmark
28
29  @property
30  def story_cls(self):
31    return Speedometer20Story
32
33  @property
34  def probe_cls(self):
35    return Speedometer20Probe
36
37  @property
38  def name(self):
39    return "speedometer_2.0"
40
41  def test_default_all(self):
42    default_story_names = [
43        story.name for story in self.story_cls.default(separate=True)
44    ]
45    all_story_names = [
46        story.name for story in self.story_cls.all(separate=True)
47    ]
48    self.assertListEqual(default_story_names, all_story_names)
49
50
51class Speedometer21TestCase(Speedometer2BaseTestCase):
52
53  @property
54  def benchmark_cls(self):
55    return Speedometer21Benchmark
56
57  @property
58  def story_cls(self):
59    return Speedometer21Story
60
61  @property
62  def probe_cls(self):
63    return Speedometer21Probe
64
65  @property
66  def name(self):
67    return "speedometer_2.1"
68
69
70class Speedometer30TestCase(SpeedometerBaseTestCase):
71
72  @property
73  def benchmark_cls(self):
74    return Speedometer30Benchmark
75
76  @property
77  def story_cls(self):
78    return Speedometer30Story
79
80  @property
81  def probe_cls(self):
82    return Speedometer30Probe
83
84  @property
85  def name(self):
86    return "speedometer_3.0"
87
88  @property
89  def name_all(self):
90    return "all"
91
92  @dataclass
93  class Namespace(SpeedometerBaseTestCase.Namespace):
94    sync_wait = dt.timedelta(0)
95    sync_warmup = dt.timedelta(0)
96    measurement_method = MeasurementMethod.RAF
97    story_viewport = None
98    shuffle_seed = None
99    detailed_metrics = False
100
101  EXAMPLE_STORY_DATA = {}
102
103  def _generate_s3_metrics(self, name, values):
104    return {
105        "children": [],
106        "delta": 0,
107        "geomean": 39.20000000298023,
108        "max": 39.20000000298023,
109        "mean": 39.20000000298023,
110        "min": 39.20000000298023,
111        "name": name,
112        "percentDelta": 0,
113        "sum": 39.20000000298023,
114        "unit": "ms",
115        "values": values
116    }
117
118  def _generate_test_probe_results(self, iterations, story):
119    values = [21.3] * iterations
120    probe_result = {
121        "Geomean": self._generate_s3_metrics("Geomean", values),
122        "Score": self._generate_s3_metrics("Score", values),
123    }
124    for iteration in range(iterations):
125      key = f"Iteration-{iteration}-Total"
126      probe_result[key] = self._generate_s3_metrics(key, values)
127
128    for substory_name in story.substories:
129      probe_result[substory_name] = self._generate_s3_metrics(
130          substory_name, values)
131    return probe_result
132
133  def test_run_combined(self):
134    self._run_combined(["TodoMVC-JavaScript-ES5", "TodoMVC-Backbone"])
135
136  def test_run_separate(self):
137    self._run_separate(["TodoMVC-JavaScript-ES5", "TodoMVC-Backbone"])
138
139  def test_s3_probe_results(self):
140    story_names = ("TodoMVC-JavaScript-ES5", "TodoMVC-Backbone")
141    self.browsers = [self.browsers[0]]
142    runner = self._test_run(
143        story_names=story_names, separate=False, repetitions=2)
144    self.assertEqual(len(runner.runs), 2)
145    run_1 = runner.runs[0]
146    run_2 = runner.runs[1]
147    probe_file = f"{self.probe_cls.NAME}.json"
148    with (run_1.out_dir / probe_file).open() as f:
149      data_1 = json.load(f)
150    with (run_2.out_dir / probe_file).open() as f:
151      data_2 = json.load(f)
152    keys_1 = tuple(data_1.keys())
153    keys_2 = tuple(data_2.keys())
154    self.assertTupleEqual(keys_1, keys_2)
155    # Make sure the aggregate metrics are at the end
156    expected_keys = story_names + ("Iteration-0-Total", "Iteration-1-Total",
157                                   "Geomean", "Score")
158    self.assertTupleEqual(keys_1, expected_keys)
159
160    with (runner.story_groups[0].path / probe_file).open() as f:
161      stories_data = json.load(f)
162    self.assertTupleEqual(tuple(stories_data.keys()), expected_keys)
163
164  def test_measurement_method_kwargs(self):
165    args = self.Namespace()
166    benchmark = self.benchmark_cls.from_cli_args(args)
167    for story in benchmark.stories:
168      assert isinstance(story, self.story_cls)
169      self.assertEqual(story.measurement_method, MeasurementMethod.RAF)
170
171    args.measurement_method = MeasurementMethod.TIMER
172    benchmark = self.benchmark_cls.from_cli_args(args)
173    for story in benchmark.stories:
174      assert isinstance(story, self.story_cls)
175      self.assertEqual(story.measurement_method, MeasurementMethod.TIMER)
176      self.assertDictEqual(story.url_params, {"measurementMethod": "timer"})
177
178  def test_sync_wait_kwargs(self):
179    args = self.Namespace()
180    benchmark = self.benchmark_cls.from_cli_args(args)
181    for story in benchmark.stories:
182      assert isinstance(story, self.story_cls)
183      self.assertEqual(story.sync_wait, dt.timedelta(0))
184
185    with self.assertRaises(argparse.ArgumentTypeError):
186      args.sync_wait = dt.timedelta(seconds=-123.4)
187      self.benchmark_cls.from_cli_args(args)
188
189    args.sync_wait = dt.timedelta(seconds=123.4)
190    benchmark = self.benchmark_cls.from_cli_args(args)
191    for story in benchmark.stories:
192      assert isinstance(story, self.story_cls)
193      self.assertEqual(story.sync_wait, dt.timedelta(seconds=123.4))
194      self.assertDictEqual(story.url_params, {"waitBeforeSync": "123400"})
195
196  def test_sync_warmup_kwargs(self):
197    args = self.Namespace()
198    benchmark = self.benchmark_cls.from_cli_args(args)
199    for story in benchmark.stories:
200      assert isinstance(story, self.story_cls)
201      self.assertEqual(story.sync_warmup, dt.timedelta(0))
202
203    with self.assertRaises(argparse.ArgumentTypeError):
204      args.sync_warmup = dt.timedelta(seconds=-123.4)
205      self.benchmark_cls.from_cli_args(args)
206
207    args.sync_warmup = dt.timedelta(seconds=123.4)
208    benchmark = self.benchmark_cls.from_cli_args(args)
209    for story in benchmark.stories:
210      assert isinstance(story, self.story_cls)
211      self.assertEqual(story.sync_warmup, dt.timedelta(seconds=123.4))
212      self.assertDictEqual(story.url_params, {"warmupBeforeSync": "123400"})
213
214  def test_viewport_kwargs(self):
215    args = self.Namespace()
216    benchmark = self.benchmark_cls.from_cli_args(args)
217    for story in benchmark.stories:
218      assert isinstance(story, self.story_cls)
219      self.assertEqual(story.viewport, None)
220
221    with self.assertRaises(argparse.ArgumentTypeError):
222      args.story_viewport = Viewport.FULLSCREEN
223      self.benchmark_cls.from_cli_args(args)
224
225    args.story_viewport = Viewport(999, 888)
226    benchmark = self.benchmark_cls.from_cli_args(args)
227    for story in benchmark.stories:
228      assert isinstance(story, self.story_cls)
229      self.assertEqual(story.viewport, Viewport(999, 888))
230      self.assertDictEqual(story.url_params, {"viewport": "999x888"})
231
232  def test_shuffle_seed_kwargs(self):
233    args = self.Namespace()
234    benchmark = self.benchmark_cls.from_cli_args(args)
235    for story in benchmark.stories:
236      assert isinstance(story, self.story_cls)
237      self.assertEqual(story.shuffle_seed, None)
238
239    with self.assertRaises(argparse.ArgumentTypeError):
240      args.shuffle_seed = "some invalid value"
241      self.benchmark_cls.from_cli_args(args)
242
243    args.shuffle_seed = 1234
244    benchmark = self.benchmark_cls.from_cli_args(args)
245    for story in benchmark.stories:
246      assert isinstance(story, self.story_cls)
247      self.assertEqual(story.shuffle_seed, 1234)
248      self.assertDictEqual(story.url_params, {"shuffleSeed": "1234"})
249
250#  Don't expose abstract BaseTestCase to test runner
251del SpeedometerBaseTestCase
252del Speedometer2BaseTestCase
253
254if __name__ == "__main__":
255  test_helper.run_pytest(__file__)
256