• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2023 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5from __future__ import annotations
6
7import contextlib
8import io
9import pathlib
10from typing import List, Tuple
11from unittest import mock
12
13import pytest
14
15import crossbench.browsers.all as browsers
16from crossbench import plt
17from crossbench.cli.cli import CrossBenchCLI
18from tests import test_helper
19
20
21class SysExitException(Exception):
22
23  def __init__(self):
24    super().__init__("sys.exit")
25
26
27@pytest.fixture(autouse=True)
28def cli_test_context(browser_path, driver_path):
29  # Mock out chrome's stable path to be able to run on the CQ with the
30  # --test-browser-path option.
31  with mock.patch(
32      "crossbench.browsers.all.Chrome.stable_path", return_value=browser_path):
33    if not driver_path:
34      yield
35    else:
36      # The CQ uses the latest canary, which might not have a easily publicly
37      # accessible chromedriver available.
38      with mock.patch(
39          "crossbench.browsers.chromium.webdriver.ChromeDriverFinder.download",
40          return_value=driver_path):
41        yield
42
43
44def _run_cli(*args: str) -> Tuple[CrossBenchCLI, io.StringIO]:
45  cli = CrossBenchCLI()
46  with contextlib.redirect_stdout(io.StringIO()) as stdout:
47    with mock.patch("sys.exit", side_effect=SysExitException):
48      cli.run(args)
49  return cli, stdout
50
51
52def _get_browser_dirs(results_dir: pathlib.Path) -> List[pathlib.Path]:
53  assert results_dir.is_dir()
54  browser_dirs = [path for path in results_dir.iterdir() if path.is_dir()]
55  return browser_dirs
56
57
58def _get_v8_log_files(results_dir: pathlib.Path) -> List[pathlib.Path]:
59  return list(results_dir.glob("**/*-v8.log"))
60
61
62@pytest.mark.skipif(
63    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
64@pytest.mark.skipif(
65    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
66@pytest.mark.skipif(
67    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
68@pytest.mark.xdist_group("end2end-benchmark")
69def test_speedometer_2_0(output_dir, cache_dir, root_dir) -> None:
70  # - Speedometer 2.0
71  # - Speedometer --iterations flag
72  # - Tracing probe with inline args
73  # - --browser-config
74  with pytest.raises(SysExitException):
75    _run_cli("speedometer_2.0", "--help")
76  _run_cli("describe", "benchmark", "speedometer_2.0")
77  browser_config = root_dir / "config/doc/browser.config.hjson"
78  assert browser_config.is_file()
79  results_dir = output_dir / "results"
80  assert not results_dir.exists()
81  _run_cli("sp_2.0", f"--browser-config={browser_config}", "--iterations=2",
82           "--env-validation=skip", f"--out-dir={results_dir}",
83           f"--cache-dir={cache_dir}", "--probe=tracing:{preset:'minimal'}")
84
85
86@pytest.mark.skipif(
87    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
88@pytest.mark.skipif(
89    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
90@pytest.mark.skipif(
91    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
92@pytest.mark.xdist_group("end2end-benchmark")
93def test_speedometer_2_1(output_dir, cache_dir) -> None:
94  # - Speedometer 2.1
95  # - Story filtering with regexp
96  # - V8 probes
97  # - minimal splashscreen
98  # - inline probe arguments
99  with pytest.raises(SysExitException):
100    _run_cli("speedometer_2.1", "--help")
101  _run_cli("describe", "benchmark", "speedometer_2.1")
102  results_dir = output_dir / "results"
103  assert not results_dir.exists()
104  _run_cli(
105      "sp2.1",
106      "--browser=chrome-stable",
107      "--splashscreen=minimal",
108      "--iterations=2",
109      "--env-validation=skip",
110      f"--out-dir={results_dir}",
111      f"--cache-dir={cache_dir}",
112      "--stories=.*Vanilla.*",
113      # V8 --prof doesn't always work on linux, skip it.
114      "--probe=v8.log:"
115      "{log_all:false, js_flags:['--log-maps'], prof:false, profview:false}",
116      "--probe=v8.turbolizer",
117      "--debug")
118
119  browser_dirs = _get_browser_dirs(results_dir)
120  assert len(browser_dirs) == 1
121  v8_log_files = _get_v8_log_files(results_dir)
122  assert len(v8_log_files) > 1
123
124
125@pytest.mark.skipif(
126    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
127@pytest.mark.skipif(
128    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
129def test_speedometer_2_1_custom_chrome_download(output_dir, cache_dir) -> None:
130  # - Custom chrome version downloads
131  # - headless
132  if not plt.PLATFORM.which("gsutil"):
133    pytest.skip("Missing required 'gsutil', skipping test.")
134  results_dir = output_dir / "results"
135  # TODO: speed up --browser=chrome-M111 and add it.
136  assert len(list(cache_dir.iterdir())) == 0
137  _run_cli("sp2.1", f"--cache-dir={cache_dir}", "--browser=chrome-M113",
138           "--browser=chrome-111.0.5563.110", "--headless", "--iterations=1",
139           "--env-validation=skip", f"--out-dir={results_dir}",
140           f"--cache-dir={cache_dir}", "--stories=.*Vanilla.*")
141
142  browser_dirs = _get_browser_dirs(results_dir)
143  assert len(browser_dirs) == 2
144  v8_log_files = _get_v8_log_files(results_dir)
145  assert not v8_log_files
146
147
148@pytest.mark.skipif(
149    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
150@pytest.mark.skipif(
151    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
152@pytest.mark.skipif(
153    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
154@pytest.mark.xdist_group("end2end-benchmark")
155def test_speedometer_2_1_chrome_safari(output_dir, cache_dir,
156                                       driver_path) -> None:
157  # - Speedometer 3
158  # - Merging stories over multiple iterations and browsers
159  # - Testing safari
160  # - --verbose flag
161  # - no splashscreen
162  # This fails on the CQ bot, so make sure we skip it there:
163  if driver_path:
164    pytest.skip("Skipping test on CQ.")
165  platform = plt.PLATFORM
166  if not platform.is_macos and (not platform.exists(
167      browsers.Safari.default_path(platform))):
168    pytest.skip("Test requires Safari, skipping on non macOS devices.")
169  results_dir = output_dir / "results"
170  assert not results_dir.exists()
171  _run_cli("sp2.1", "--browser=chrome", "--browser=safari",
172           "--splashscreen=none", "--iterations=1", "--repeat=2",
173           "--env-validation=skip", "--verbose", f"--out-dir={results_dir}",
174           f"--cache-dir={cache_dir}", "--stories=.*React.*")
175
176  browser_dirs = _get_browser_dirs(results_dir)
177  assert len(browser_dirs) == 2
178  v8_log_files = _get_v8_log_files(results_dir)
179  assert not v8_log_files
180
181
182@pytest.mark.skipif(
183    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
184@pytest.mark.skipif(
185    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
186@pytest.mark.skipif(
187    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
188@pytest.mark.xdist_group("end2end-benchmark")
189def test_jetstream_2_0(output_dir, cache_dir) -> None:
190  # - jetstream 2.0
191  # - merge / run separate stories
192  # - custom multiple --js-flags
193  # - custom viewport
194  # - quiet flag
195  with pytest.raises(SysExitException):
196    _run_cli("jetstream_2.0", "--help")
197  _run_cli("describe", "--json", "benchmark", "jetstream_2.0")
198  results_dir = output_dir / "results"
199  assert not results_dir.exists()
200  _run_cli("jetstream_2.0", "--browser=chrome-stable", "--separate",
201           "--repeat=2", "--env-validation=skip", f"--out-dir={results_dir}",
202           f"--cache-dir={cache_dir}", "--viewport=maximised",
203           "--stories=.*date-format.*", "--quiet",
204           "--js-flags=--log,--log-opt,--log-deopt", "--", "--no-sandbox")
205
206  v8_log_files = _get_v8_log_files(results_dir)
207  assert len(v8_log_files) > 1
208  browser_dirs = _get_browser_dirs(results_dir)
209  assert len(browser_dirs) == 1
210
211
212@pytest.mark.skipif(
213    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
214@pytest.mark.skipif(
215    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
216@pytest.mark.skipif(
217    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
218@pytest.mark.xdist_group("end2end-benchmark")
219def test_jetstream_2_1(output_dir, cache_dir, root_dir) -> None:
220  # - jetstream 2.1
221  # - custom --time-unit
222  # - explicit single story
223  # - custom splashscreen
224  # - custom viewport
225  # - --probe-config
226  with pytest.raises(SysExitException):
227    _run_cli("jetstream_2.1", "--help")
228  _run_cli("describe", "benchmark", "jetstream_2.1")
229  probe_config = root_dir / "config/doc/probe.config.hjson"
230  assert probe_config.is_file()
231  results_dir = output_dir / "results"
232  assert not results_dir.exists()
233  chrome_version = "--browser=chrome"
234  _run_cli("jetstream_2.1", chrome_version, "--env-validation=skip",
235           "--splashscreen=http://google.com", f"--out-dir={results_dir}",
236           f"--cache-dir={cache_dir}", "--viewport=900x800", "--stories=Box2D",
237           "--time-unit=0.9", f"--probe-config={probe_config}", "--throw")
238
239  browser_dirs = _get_browser_dirs(results_dir)
240  assert len(browser_dirs) == 1
241  v8_log_files = _get_v8_log_files(results_dir)
242  assert len(v8_log_files) > 1
243
244
245@pytest.mark.skipif(
246    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
247@pytest.mark.skipif(
248    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
249@pytest.mark.skipif(
250    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
251@pytest.mark.xdist_group("end2end-benchmark")
252def test_jetstream_2_2(output_dir, cache_dir, root_dir) -> None:
253  # - jetstream 2.2
254  # - custom --time-unit
255  # - explicit single story
256  # - custom splashscreen
257  # - custom viewport
258  # - --probe-config
259  with pytest.raises(SysExitException):
260    _run_cli("jetstream_2.2", "--help")
261  _run_cli("describe", "benchmark", "jetstream_2.2")
262  probe_config = root_dir / "config/doc/probe.config.hjson"
263  assert probe_config.is_file()
264  results_dir = output_dir / "results"
265  assert not results_dir.exists()
266  chrome_version = "--browser=chrome"
267  _run_cli("jetstream_2.2", chrome_version, "--env-validation=skip",
268           "--splashscreen=http://google.com", f"--out-dir={results_dir}",
269           f"--cache-dir={cache_dir}", "--viewport=900x800", "--stories=Box2D",
270           "--time-unit=0.9", f"--probe-config={probe_config}", "--throw")
271
272  browser_dirs = _get_browser_dirs(results_dir)
273  assert len(browser_dirs) == 1
274  v8_log_files = _get_v8_log_files(results_dir)
275  assert len(v8_log_files) > 1
276
277
278@pytest.mark.skipif(
279    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
280@pytest.mark.skipif(
281    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
282@pytest.mark.skipif(
283    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
284@pytest.mark.xdist_group("end2end-benchmark")
285def test_loading(output_dir, cache_dir) -> None:
286  # - loading using named pages with timeouts
287  # - custom cooldown time
288  # - custom viewport
289  # - performance.mark probe
290  with pytest.raises(SysExitException):
291    _run_cli("loading", "--help")
292  _run_cli("describe", "benchmark", "loading")
293  results_dir = output_dir / "results"
294  assert not results_dir.exists()
295  _run_cli("loading", "--browser=chr", "--env-validation=skip",
296           f"--out-dir={results_dir}", f"--cache-dir={cache_dir}",
297           "--viewport=headless", "--stories=cnn", "--cool-down-time=2.5",
298           "--probe=performance.entries")
299
300  browser_dirs = _get_browser_dirs(results_dir)
301  assert len(browser_dirs) == 1
302
303
304@pytest.mark.skipif(
305    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
306@pytest.mark.skipif(
307    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
308@pytest.mark.skipif(
309    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
310def test_loading_page_config(output_dir, cache_dir, root_dir) -> None:
311  # - loading with config file
312  page_config = root_dir / "config/doc/page.config.hjson"
313  assert page_config.is_file()
314  results_dir = output_dir / "results"
315  assert not results_dir.exists()
316  _run_cli("loading", "--env-validation=skip", f"--out-dir={results_dir}",
317           f"--cache-dir={cache_dir}", f"--page-config={page_config}",
318           "--probe=performance.entries", "--no-splash", "--cool-down-time=0",
319           "--throw")
320
321
322@pytest.mark.skipif(
323    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
324@pytest.mark.skipif(
325    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
326@pytest.mark.skipif(
327    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
328@pytest.mark.xdist_group("end2end-benchmark")
329def test_loading_playback_urls(output_dir, cache_dir) -> None:
330  # - loading using url
331  # - combined pages and --playback controller
332  results_dir = output_dir / "results"
333
334  assert not results_dir.exists()
335  _run_cli("loading", "--env-validation=skip", f"--out-dir={results_dir}",
336           f"--cache-dir={cache_dir}", "--playback=5.3s",
337           "--viewport=fullscreen",
338           "--stories=http://google.com,0.5,http://bing.com,0.4",
339           "--probe=performance.entries")
340
341
342@pytest.mark.skipif(
343    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
344@pytest.mark.skipif(
345    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
346@pytest.mark.skipif(
347    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
348@pytest.mark.xdist_group("end2end-benchmark")
349def test_loading_playback(output_dir, cache_dir) -> None:
350  # - loading using named pages with timeouts
351  # - separate pages and --playback controller
352  # - viewport-size via chrome flag
353  results_dir = output_dir / "results"
354  assert not results_dir.exists()
355  _run_cli("loading", "--browser=chr", "--env-validation=skip",
356           f"--out-dir={results_dir}", f"--cache-dir={cache_dir}",
357           "--playback=5.3s", "--separate", "--stories=twitter,2,facebook,0.4",
358           "--probe=performance.entries", "--", "--window-size=900,500",
359           "--window-position=150,150")
360
361
362@pytest.mark.skipif(
363    not plt.PLATFORM.has_display, reason="end2end test cannot run headless")
364@pytest.mark.skipif(
365    plt.PLATFORM.is_linux, reason="Tests temporarily skipped on linux")
366@pytest.mark.skipif(
367    plt.PLATFORM.is_win, reason="Tests temporarily skipped on windows")
368@pytest.mark.xdist_group("end2end-benchmark")
369def test_loading_playback_firefox(output_dir, cache_dir) -> None:
370  # - loading using named pages with timeouts
371  # - --playback controller
372  # - Firefox
373  platform = plt.PLATFORM
374  try:
375    if not platform.exists(browsers.Firefox.default_path(platform)):
376      pytest.skip("Test requires Firefox.")
377  except Exception:  # pylint: disable=broad-exception-caught
378    pytest.skip("Test requires Firefox.")
379  results_dir = output_dir / "results"
380  assert not results_dir.exists()
381  _run_cli("loading", "--browser=chr", "--browser=ff", "--env-validation=skip",
382           f"--out-dir={results_dir}", f"--cache-dir={cache_dir}",
383           "--playback=2x", "--stories=twitter,1,facebook,0.4",
384           "--probe=performance.entries")
385
386  browser_dirs = _get_browser_dirs(results_dir)
387  assert len(browser_dirs) == 2
388
389
390if __name__ == "__main__":
391  test_helper.run_pytest(__file__)
392