• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3#
4# Copyright 2016 The Chromium OS Authors. All rights reserved.
5# Use of this source code is governed by a BSD-style license that can be
6# found in the LICENSE file.
7
8"""Unittest for the results reporter."""
9
10from __future__ import division
11from __future__ import print_function
12
13import collections
14import io
15import os
16import unittest
17import unittest.mock as mock
18
19import test_flag
20
21from benchmark_run import MockBenchmarkRun
22from cros_utils import logger
23from experiment_factory import ExperimentFactory
24from experiment_file import ExperimentFile
25from machine_manager import MockCrosMachine
26from machine_manager import MockMachineManager
27from results_cache import MockResult
28from results_report import BenchmarkResults
29from results_report import HTMLResultsReport
30from results_report import JSONResultsReport
31from results_report import ParseChromeosImage
32from results_report import ParseStandardPerfReport
33from results_report import TextResultsReport
34
35
36class FreeFunctionsTest(unittest.TestCase):
37  """Tests for any free functions in results_report."""
38
39  def testParseChromeosImage(self):
40    # N.B. the cases with blank versions aren't explicitly supported by
41    # ParseChromeosImage. I'm not sure if they need to be supported, but the
42    # goal of this was to capture existing functionality as much as possible.
43    base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
44        '/chromiumos_test_image.bin'
45    self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
46
47    dir_base_case = os.path.dirname(base_case)
48    self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
49
50    buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
51        '/chromiumos_test_image.bin'
52    buildbot_img = buildbot_case.split('/chroot/tmp')[1]
53
54    self.assertEqual(
55        ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img))
56    self.assertEqual(
57        ParseChromeosImage(os.path.dirname(buildbot_case)),
58        ('', os.path.dirname(buildbot_img)))
59
60    # Ensure we don't act completely insanely given a few mildly insane paths.
61    fun_case = '/chromiumos_test_image.bin'
62    self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
63
64    fun_case2 = 'chromiumos_test_image.bin'
65    self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
66
67
68# There are many ways for this to be done better, but the linter complains
69# about all of them (that I can think of, at least).
70_fake_path_number = [0]
71
72
73def FakePath(ext):
74  """Makes a unique path that shouldn't exist on the host system.
75
76  Each call returns a different path, so if said path finds its way into an
77  error message, it may be easier to track it to its source.
78  """
79  _fake_path_number[0] += 1
80  prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],)
81  return os.path.join(prefix, ext)
82
83
84def MakeMockExperiment(compiler='gcc'):
85  """Mocks an experiment using the given compiler."""
86  mock_experiment_file = io.StringIO("""
87      board: x86-alex
88      remote: 127.0.0.1
89      locks_dir: /tmp
90      perf_args: record -a -e cycles
91      benchmark: PageCycler {
92        iterations: 3
93      }
94
95      image1 {
96        chromeos_image: %s
97      }
98
99      image2 {
100        remote: 127.0.0.2
101        chromeos_image: %s
102      }
103      """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
104  efile = ExperimentFile(mock_experiment_file)
105  experiment = ExperimentFactory().GetExperiment(efile,
106                                                 FakePath('working_directory'),
107                                                 FakePath('log_dir'))
108  for label in experiment.labels:
109    label.compiler = compiler
110  return experiment
111
112
113def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0):
114  """Injects successful experiment runs (for each label) into the experiment."""
115  # Defensive copy of keyvals, so if it's modified, we'll know.
116  keyvals = dict(keyvals)
117  num_configs = len(experiment.benchmarks) * len(experiment.labels)
118  num_runs = len(experiment.benchmark_runs) // num_configs
119
120  # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
121  # benchmark_run_unittest)
122  bench = experiment.benchmarks[for_benchmark]
123  cache_conditions = []
124  log_level = 'average'
125  share_cache = ''
126  locks_dir = ''
127  log = logger.GetLogger()
128  machine_manager = MockMachineManager(
129      FakePath('chromeos_root'), 0, log_level, locks_dir)
130  machine_manager.AddMachine('testing_machine')
131  machine = next(
132      m for m in machine_manager.GetMachines() if m.name == 'testing_machine')
133
134  def MakeSuccessfulRun(n, label):
135    run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
136                           1 + n + num_runs, cache_conditions, machine_manager,
137                           log, log_level, share_cache, {})
138    mock_result = MockResult(log, label, log_level, machine)
139    mock_result.keyvals = keyvals
140    run.result = mock_result
141    return run
142
143  for label in experiment.labels:
144    experiment.benchmark_runs.extend(
145        MakeSuccessfulRun(n, label) for n in range(how_many))
146  return experiment
147
148
149class TextResultsReportTest(unittest.TestCase):
150  """Tests that the output of a text report contains the things we pass in.
151
152  At the moment, this doesn't care deeply about the format in which said
153  things are displayed. It just cares that they're present.
154  """
155
156  def _checkReport(self, mock_getcooldown, email):
157    num_success = 2
158    success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
159    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
160                                  success_keyvals)
161    SECONDS_IN_MIN = 60
162    mock_getcooldown.return_value = {
163        experiment.remote[0]: 12 * SECONDS_IN_MIN,
164        experiment.remote[1]: 8 * SECONDS_IN_MIN
165    }
166
167    text_report = TextResultsReport.FromExperiment(
168        experiment, email=email).GetReport()
169    self.assertIn(str(success_keyvals['a_float']), text_report)
170    self.assertIn(success_keyvals['machine'], text_report)
171    self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
172    self.assertIn('\nDuration\n', text_report)
173    self.assertIn('Total experiment time:\n', text_report)
174    self.assertIn('Cooldown wait time:\n', text_report)
175    self.assertIn('DUT %s: %d min' % (experiment.remote[0], 12), text_report)
176    self.assertIn('DUT %s: %d min' % (experiment.remote[1], 8), text_report)
177    return text_report
178
179  @mock.patch.object(TextResultsReport, 'GetTotalWaitCooldownTime')
180  def testOutput(self, mock_getcooldown):
181    email_report = self._checkReport(mock_getcooldown, email=True)
182    text_report = self._checkReport(mock_getcooldown, email=False)
183
184    # Ensure that the reports somehow different. Otherwise, having the
185    # distinction is useless.
186    self.assertNotEqual(email_report, text_report)
187
188  def test_get_totalwait_cooldowntime(self):
189    experiment = MakeMockExperiment()
190    cros_machines = experiment.machine_manager.GetMachines()
191    cros_machines[0].AddCooldownWaitTime(120)
192    cros_machines[1].AddCooldownWaitTime(240)
193    text_results = TextResultsReport.FromExperiment(experiment, email=False)
194    total = text_results.GetTotalWaitCooldownTime()
195    self.assertEqual(total[experiment.remote[0]], 120)
196    self.assertEqual(total[experiment.remote[1]], 240)
197
198
199class HTMLResultsReportTest(unittest.TestCase):
200  """Tests that the output of a HTML report contains the things we pass in.
201
202  At the moment, this doesn't care deeply about the format in which said
203  things are displayed. It just cares that they're present.
204  """
205
206  _TestOutput = collections.namedtuple('TestOutput', [
207      'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table',
208      'experiment_file'
209  ])
210
211  @staticmethod
212  def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
213                     chart_divs, full_table, experiment_file):
214    # N.B. Currently we don't check chart_js; it's just passed through because
215    # cros lint complains otherwise.
216    summary_table = print_table(summary_table, 'HTML')
217    perf_html = print_table(perf_table, 'HTML')
218    full_table = print_table(full_table, 'HTML')
219    return HTMLResultsReportTest._TestOutput(
220        summary_table=summary_table,
221        perf_html=perf_html,
222        chart_js=chart_js,
223        charts=chart_divs,
224        full_table=full_table,
225        experiment_file=experiment_file)
226
227  def _GetOutput(self, experiment=None, benchmark_results=None):
228    with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
229      if experiment is not None:
230        HTMLResultsReport.FromExperiment(experiment).GetReport()
231      else:
232        HTMLResultsReport(benchmark_results).GetReport()
233      mod_mock = standin
234    self.assertEqual(mod_mock.call_count, 1)
235    # call_args[0] is positional args, call_args[1] is kwargs.
236    self.assertEqual(mod_mock.call_args[0], tuple())
237    fmt_args = mod_mock.call_args[1]
238    return self._GetTestOutput(**fmt_args)
239
240  def testNoSuccessOutput(self):
241    output = self._GetOutput(MakeMockExperiment())
242    self.assertIn('no result', output.summary_table)
243    self.assertIn('no result', output.full_table)
244    self.assertEqual(output.charts, '')
245    self.assertNotEqual(output.experiment_file, '')
246
247  def testSuccessfulOutput(self):
248    num_success = 2
249    success_keyvals = {'retval': 0, 'a_float': 3.96}
250    output = self._GetOutput(
251        _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
252
253    self.assertNotIn('no result', output.summary_table)
254    # self.assertIn(success_keyvals['machine'], output.summary_table)
255    self.assertIn('a_float', output.summary_table)
256    self.assertIn(str(success_keyvals['a_float']), output.summary_table)
257    self.assertIn('a_float', output.full_table)
258    # The _ in a_float is filtered out when we're generating HTML.
259    self.assertIn('afloat', output.charts)
260    # And make sure we have our experiment file...
261    self.assertNotEqual(output.experiment_file, '')
262
263  def testBenchmarkResultFailure(self):
264    labels = ['label1']
265    benchmark_names_and_iterations = [('bench1', 1)]
266    benchmark_keyvals = {'bench1': [[]]}
267    results = BenchmarkResults(labels, benchmark_names_and_iterations,
268                               benchmark_keyvals)
269    output = self._GetOutput(benchmark_results=results)
270    self.assertIn('no result', output.summary_table)
271    self.assertEqual(output.charts, '')
272    self.assertEqual(output.experiment_file, '')
273
274  def testBenchmarkResultSuccess(self):
275    labels = ['label1']
276    benchmark_names_and_iterations = [('bench1', 1)]
277    benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]}
278    results = BenchmarkResults(labels, benchmark_names_and_iterations,
279                               benchmark_keyvals)
280    output = self._GetOutput(benchmark_results=results)
281    self.assertNotIn('no result', output.summary_table)
282    self.assertIn('bench1', output.summary_table)
283    self.assertIn('bench1', output.full_table)
284    self.assertNotEqual(output.charts, '')
285    self.assertEqual(output.experiment_file, '')
286
287
288class JSONResultsReportTest(unittest.TestCase):
289  """Tests JSONResultsReport."""
290
291  REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass')
292  EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version',
293                            'chrome_version', 'compiler')
294
295  @staticmethod
296  def _GetRequiredKeys(is_experiment):
297    required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
298    if is_experiment:
299      required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
300    return required_keys
301
302  def _CheckRequiredKeys(self, test_output, is_experiment):
303    required_keys = self._GetRequiredKeys(is_experiment)
304    for output in test_output:
305      for key in required_keys:
306        self.assertIn(key, output)
307
308  def testAllFailedJSONReportOutput(self):
309    experiment = MakeMockExperiment()
310    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
311    self._CheckRequiredKeys(results, is_experiment=True)
312    # Nothing succeeded; we don't send anything more than what's required.
313    required_keys = self._GetRequiredKeys(is_experiment=True)
314    for result in results:
315      self.assertCountEqual(result.keys(), required_keys)
316
317  def testJSONReportOutputWithSuccesses(self):
318    success_keyvals = {
319        'retval': 0,
320        'a_float': '2.3',
321        'many_floats': [['1.0', '2.0'], ['3.0']],
322        'machine': "i'm a pirate"
323    }
324
325    # 2 is arbitrary.
326    num_success = 2
327    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
328                                  success_keyvals)
329    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
330    self._CheckRequiredKeys(results, is_experiment=True)
331
332    num_passes = num_success * len(experiment.labels)
333    non_failures = [r for r in results if r['pass']]
334    self.assertEqual(num_passes, len(non_failures))
335
336    # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
337    expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
338    for pass_ in non_failures:
339      self.assertIn('detailed_results', pass_)
340      self.assertDictEqual(expected_detailed, pass_['detailed_results'])
341      self.assertIn('machine', pass_)
342      self.assertEqual(success_keyvals['machine'], pass_['machine'])
343
344  def testFailedJSONReportOutputWithoutExperiment(self):
345    labels = ['label1']
346    # yapf:disable
347    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
348                                      ('bench3', 1), ('bench4', 0)]
349    # yapf:enable
350
351    benchmark_keyvals = {
352        'bench1': [[{
353            'retval': 1,
354            'foo': 2.0
355        }]],
356        'bench2': [[{
357            'retval': 1,
358            'foo': 4.0
359        }, {
360            'retval': -1,
361            'bar': 999
362        }]],
363        # lack of retval is considered a failure.
364        'bench3': [[{}]],
365        'bench4': [[]]
366    }
367    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
368                                     benchmark_keyvals)
369    results = JSONResultsReport(bench_results).GetReportObject()
370    self._CheckRequiredKeys(results, is_experiment=False)
371    self.assertFalse(any(r['pass'] for r in results))
372
373  def testJSONGetReportObeysJSONSettings(self):
374    labels = ['label1']
375    benchmark_names_and_iterations = [('bench1', 1)]
376    # These can be anything, really. So long as they're distinctive.
377    separators = (',\t\n\t', ':\t\n\t')
378    benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
379    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
380                                     benchmark_keyvals)
381    reporter = JSONResultsReport(
382        bench_results, json_args={'separators': separators})
383    result_str = reporter.GetReport()
384    self.assertIn(separators[0], result_str)
385    self.assertIn(separators[1], result_str)
386
387  def testSuccessfulJSONReportOutputWithoutExperiment(self):
388    labels = ['label1']
389    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
390    benchmark_keyvals = {
391        'bench1': [[{
392            'retval': 0,
393            'foo': 2.0
394        }]],
395        'bench2': [[{
396            'retval': 0,
397            'foo': 4.0
398        }, {
399            'retval': 0,
400            'bar': 999
401        }]]
402    }
403    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
404                                     benchmark_keyvals)
405    results = JSONResultsReport(bench_results).GetReportObject()
406    self._CheckRequiredKeys(results, is_experiment=False)
407    self.assertTrue(all(r['pass'] for r in results))
408    # Enforce that the results have *some* deterministic order.
409    keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0))
410    sorted_results = sorted(results, key=keyfn)
411    detailed_results = [r['detailed_results'] for r in sorted_results]
412    bench1, bench2_foo, bench2_bar = detailed_results
413    self.assertEqual(bench1['foo'], 2.0)
414    self.assertEqual(bench2_foo['foo'], 4.0)
415    self.assertEqual(bench2_bar['bar'], 999)
416    self.assertNotIn('bar', bench1)
417    self.assertNotIn('bar', bench2_foo)
418    self.assertNotIn('foo', bench2_bar)
419
420
421class PerfReportParserTest(unittest.TestCase):
422  """Tests for the perf report parser in results_report."""
423
424  @staticmethod
425  def _ReadRealPerfReport():
426    my_dir = os.path.dirname(os.path.realpath(__file__))
427    with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f:
428      return f.read()
429
430  def testParserParsesRealWorldPerfReport(self):
431    report = ParseStandardPerfReport(self._ReadRealPerfReport())
432    self.assertCountEqual(['cycles', 'instructions'], list(report.keys()))
433
434    # Arbitrarily selected known percentages from the perf report.
435    known_cycles_percentages = {
436        '0xffffffffa4a1f1c9': 0.66,
437        '0x0000115bb7ba9b54': 0.47,
438        '0x0000000000082e08': 0.00,
439        '0xffffffffa4a13e63': 0.00,
440    }
441    report_cycles = report['cycles']
442    self.assertEqual(len(report_cycles), 214)
443    for k, v in known_cycles_percentages.items():
444      self.assertIn(k, report_cycles)
445      self.assertEqual(v, report_cycles[k])
446
447    known_instrunctions_percentages = {
448        '0x0000115bb6c35d7a': 1.65,
449        '0x0000115bb7ba9b54': 0.67,
450        '0x0000000000024f56': 0.00,
451        '0xffffffffa4a0ee03': 0.00,
452    }
453    report_instructions = report['instructions']
454    self.assertEqual(len(report_instructions), 492)
455    for k, v in known_instrunctions_percentages.items():
456      self.assertIn(k, report_instructions)
457      self.assertEqual(v, report_instructions[k])
458
459
460if __name__ == '__main__':
461  test_flag.SetTestMode(True)
462  unittest.main()
463