• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3#
4# Copyright 2016 The Chromium OS Authors. All rights reserved.
5# Use of this source code is governed by a BSD-style license that can be
6# found in the LICENSE file.
7
8"""Unittest for the results reporter."""
9
10from __future__ import division
11from __future__ import print_function
12
13import collections
14import io
15import os
16import unittest
17import unittest.mock as mock
18
19import test_flag
20
21from benchmark_run import MockBenchmarkRun
22from cros_utils import logger
23from experiment_factory import ExperimentFactory
24from experiment_file import ExperimentFile
25from machine_manager import MockCrosMachine
26from machine_manager import MockMachineManager
27from results_cache import MockResult
28from results_report import BenchmarkResults
29from results_report import HTMLResultsReport
30from results_report import JSONResultsReport
31from results_report import ParseChromeosImage
32from results_report import ParseStandardPerfReport
33from results_report import TextResultsReport
34
35
36class FreeFunctionsTest(unittest.TestCase):
37  """Tests for any free functions in results_report."""
38
39  def testParseChromeosImage(self):
40    # N.B. the cases with blank versions aren't explicitly supported by
41    # ParseChromeosImage. I'm not sure if they need to be supported, but the
42    # goal of this was to capture existing functionality as much as possible.
43    base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
44        '/chromiumos_test_image.bin'
45    self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
46
47    dir_base_case = os.path.dirname(base_case)
48    self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
49
50    buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
51        '/chromiumos_test_image.bin'
52    buildbot_img = buildbot_case.split('/chroot/tmp')[1]
53
54    self.assertEqual(
55        ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img))
56    self.assertEqual(
57        ParseChromeosImage(os.path.dirname(buildbot_case)),
58        ('', os.path.dirname(buildbot_img)))
59
60    # Ensure we do something reasonable when giving paths that don't quite
61    # match the expected pattern.
62    fun_case = '/chromiumos_test_image.bin'
63    self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
64
65    fun_case2 = 'chromiumos_test_image.bin'
66    self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
67
68
69# There are many ways for this to be done better, but the linter complains
70# about all of them (that I can think of, at least).
71_fake_path_number = [0]
72
73
74def FakePath(ext):
75  """Makes a unique path that shouldn't exist on the host system.
76
77  Each call returns a different path, so if said path finds its way into an
78  error message, it may be easier to track it to its source.
79  """
80  _fake_path_number[0] += 1
81  prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],)
82  return os.path.join(prefix, ext)
83
84
85def MakeMockExperiment(compiler='gcc'):
86  """Mocks an experiment using the given compiler."""
87  mock_experiment_file = io.StringIO("""
88      board: x86-alex
89      remote: 127.0.0.1
90      locks_dir: /tmp
91      perf_args: record -a -e cycles
92      benchmark: PageCycler {
93        iterations: 3
94      }
95
96      image1 {
97        chromeos_image: %s
98      }
99
100      image2 {
101        remote: 127.0.0.2
102        chromeos_image: %s
103      }
104      """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
105  efile = ExperimentFile(mock_experiment_file)
106  experiment = ExperimentFactory().GetExperiment(efile,
107                                                 FakePath('working_directory'),
108                                                 FakePath('log_dir'))
109  for label in experiment.labels:
110    label.compiler = compiler
111  return experiment
112
113
114def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0):
115  """Injects successful experiment runs (for each label) into the experiment."""
116  # Defensive copy of keyvals, so if it's modified, we'll know.
117  keyvals = dict(keyvals)
118  num_configs = len(experiment.benchmarks) * len(experiment.labels)
119  num_runs = len(experiment.benchmark_runs) // num_configs
120
121  # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
122  # benchmark_run_unittest)
123  bench = experiment.benchmarks[for_benchmark]
124  cache_conditions = []
125  log_level = 'average'
126  share_cache = ''
127  locks_dir = ''
128  log = logger.GetLogger()
129  machine_manager = MockMachineManager(
130      FakePath('chromeos_root'), 0, log_level, locks_dir)
131  machine_manager.AddMachine('testing_machine')
132  machine = next(
133      m for m in machine_manager.GetMachines() if m.name == 'testing_machine')
134
135  def MakeSuccessfulRun(n, label):
136    run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
137                           1 + n + num_runs, cache_conditions, machine_manager,
138                           log, log_level, share_cache, {})
139    mock_result = MockResult(log, label, log_level, machine)
140    mock_result.keyvals = keyvals
141    run.result = mock_result
142    return run
143
144  for label in experiment.labels:
145    experiment.benchmark_runs.extend(
146        MakeSuccessfulRun(n, label) for n in range(how_many))
147  return experiment
148
149
150class TextResultsReportTest(unittest.TestCase):
151  """Tests that the output of a text report contains the things we pass in.
152
153  At the moment, this doesn't care deeply about the format in which said
154  things are displayed. It just cares that they're present.
155  """
156
157  def _checkReport(self, mock_getcooldown, email):
158    num_success = 2
159    success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
160    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
161                                  success_keyvals)
162    SECONDS_IN_MIN = 60
163    mock_getcooldown.return_value = {
164        experiment.remote[0]: 12 * SECONDS_IN_MIN,
165        experiment.remote[1]: 8 * SECONDS_IN_MIN
166    }
167
168    text_report = TextResultsReport.FromExperiment(
169        experiment, email=email).GetReport()
170    self.assertIn(str(success_keyvals['a_float']), text_report)
171    self.assertIn(success_keyvals['machine'], text_report)
172    self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
173    self.assertIn('\nDuration\n', text_report)
174    self.assertIn('Total experiment time:\n', text_report)
175    self.assertIn('Cooldown wait time:\n', text_report)
176    self.assertIn('DUT %s: %d min' % (experiment.remote[0], 12), text_report)
177    self.assertIn('DUT %s: %d min' % (experiment.remote[1], 8), text_report)
178    return text_report
179
180  @mock.patch.object(TextResultsReport, 'GetTotalWaitCooldownTime')
181  def testOutput(self, mock_getcooldown):
182    email_report = self._checkReport(mock_getcooldown, email=True)
183    text_report = self._checkReport(mock_getcooldown, email=False)
184
185    # Ensure that the reports somehow different. Otherwise, having the
186    # distinction is useless.
187    self.assertNotEqual(email_report, text_report)
188
189  def test_get_totalwait_cooldowntime(self):
190    experiment = MakeMockExperiment()
191    cros_machines = experiment.machine_manager.GetMachines()
192    cros_machines[0].AddCooldownWaitTime(120)
193    cros_machines[1].AddCooldownWaitTime(240)
194    text_results = TextResultsReport.FromExperiment(experiment, email=False)
195    total = text_results.GetTotalWaitCooldownTime()
196    self.assertEqual(total[experiment.remote[0]], 120)
197    self.assertEqual(total[experiment.remote[1]], 240)
198
199
200class HTMLResultsReportTest(unittest.TestCase):
201  """Tests that the output of a HTML report contains the things we pass in.
202
203  At the moment, this doesn't care deeply about the format in which said
204  things are displayed. It just cares that they're present.
205  """
206
207  _TestOutput = collections.namedtuple('TestOutput', [
208      'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table',
209      'experiment_file'
210  ])
211
212  @staticmethod
213  def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
214                     chart_divs, full_table, experiment_file):
215    # N.B. Currently we don't check chart_js; it's just passed through because
216    # cros lint complains otherwise.
217    summary_table = print_table(summary_table, 'HTML')
218    perf_html = print_table(perf_table, 'HTML')
219    full_table = print_table(full_table, 'HTML')
220    return HTMLResultsReportTest._TestOutput(
221        summary_table=summary_table,
222        perf_html=perf_html,
223        chart_js=chart_js,
224        charts=chart_divs,
225        full_table=full_table,
226        experiment_file=experiment_file)
227
228  def _GetOutput(self, experiment=None, benchmark_results=None):
229    with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
230      if experiment is not None:
231        HTMLResultsReport.FromExperiment(experiment).GetReport()
232      else:
233        HTMLResultsReport(benchmark_results).GetReport()
234      mod_mock = standin
235    self.assertEqual(mod_mock.call_count, 1)
236    # call_args[0] is positional args, call_args[1] is kwargs.
237    self.assertEqual(mod_mock.call_args[0], tuple())
238    fmt_args = mod_mock.call_args[1]
239    return self._GetTestOutput(**fmt_args)
240
241  def testNoSuccessOutput(self):
242    output = self._GetOutput(MakeMockExperiment())
243    self.assertIn('no result', output.summary_table)
244    self.assertIn('no result', output.full_table)
245    self.assertEqual(output.charts, '')
246    self.assertNotEqual(output.experiment_file, '')
247
248  def testSuccessfulOutput(self):
249    num_success = 2
250    success_keyvals = {'retval': 0, 'a_float': 3.96}
251    output = self._GetOutput(
252        _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
253
254    self.assertNotIn('no result', output.summary_table)
255    # self.assertIn(success_keyvals['machine'], output.summary_table)
256    self.assertIn('a_float', output.summary_table)
257    self.assertIn(str(success_keyvals['a_float']), output.summary_table)
258    self.assertIn('a_float', output.full_table)
259    # The _ in a_float is filtered out when we're generating HTML.
260    self.assertIn('afloat', output.charts)
261    # And make sure we have our experiment file...
262    self.assertNotEqual(output.experiment_file, '')
263
264  def testBenchmarkResultFailure(self):
265    labels = ['label1']
266    benchmark_names_and_iterations = [('bench1', 1)]
267    benchmark_keyvals = {'bench1': [[]]}
268    results = BenchmarkResults(labels, benchmark_names_and_iterations,
269                               benchmark_keyvals)
270    output = self._GetOutput(benchmark_results=results)
271    self.assertIn('no result', output.summary_table)
272    self.assertEqual(output.charts, '')
273    self.assertEqual(output.experiment_file, '')
274
275  def testBenchmarkResultSuccess(self):
276    labels = ['label1']
277    benchmark_names_and_iterations = [('bench1', 1)]
278    benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]}
279    results = BenchmarkResults(labels, benchmark_names_and_iterations,
280                               benchmark_keyvals)
281    output = self._GetOutput(benchmark_results=results)
282    self.assertNotIn('no result', output.summary_table)
283    self.assertIn('bench1', output.summary_table)
284    self.assertIn('bench1', output.full_table)
285    self.assertNotEqual(output.charts, '')
286    self.assertEqual(output.experiment_file, '')
287
288
289class JSONResultsReportTest(unittest.TestCase):
290  """Tests JSONResultsReport."""
291
292  REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass')
293  EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version',
294                            'chrome_version', 'compiler')
295
296  @staticmethod
297  def _GetRequiredKeys(is_experiment):
298    required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
299    if is_experiment:
300      required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
301    return required_keys
302
303  def _CheckRequiredKeys(self, test_output, is_experiment):
304    required_keys = self._GetRequiredKeys(is_experiment)
305    for output in test_output:
306      for key in required_keys:
307        self.assertIn(key, output)
308
309  def testAllFailedJSONReportOutput(self):
310    experiment = MakeMockExperiment()
311    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
312    self._CheckRequiredKeys(results, is_experiment=True)
313    # Nothing succeeded; we don't send anything more than what's required.
314    required_keys = self._GetRequiredKeys(is_experiment=True)
315    for result in results:
316      self.assertCountEqual(result.keys(), required_keys)
317
318  def testJSONReportOutputWithSuccesses(self):
319    success_keyvals = {
320        'retval': 0,
321        'a_float': '2.3',
322        'many_floats': [['1.0', '2.0'], ['3.0']],
323        'machine': "i'm a pirate"
324    }
325
326    # 2 is arbitrary.
327    num_success = 2
328    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
329                                  success_keyvals)
330    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
331    self._CheckRequiredKeys(results, is_experiment=True)
332
333    num_passes = num_success * len(experiment.labels)
334    non_failures = [r for r in results if r['pass']]
335    self.assertEqual(num_passes, len(non_failures))
336
337    # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
338    expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
339    for pass_ in non_failures:
340      self.assertIn('detailed_results', pass_)
341      self.assertDictEqual(expected_detailed, pass_['detailed_results'])
342      self.assertIn('machine', pass_)
343      self.assertEqual(success_keyvals['machine'], pass_['machine'])
344
345  def testFailedJSONReportOutputWithoutExperiment(self):
346    labels = ['label1']
347    # yapf:disable
348    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
349                                      ('bench3', 1), ('bench4', 0)]
350    # yapf:enable
351
352    benchmark_keyvals = {
353        'bench1': [[{
354            'retval': 1,
355            'foo': 2.0
356        }]],
357        'bench2': [[{
358            'retval': 1,
359            'foo': 4.0
360        }, {
361            'retval': -1,
362            'bar': 999
363        }]],
364        # lack of retval is considered a failure.
365        'bench3': [[{}]],
366        'bench4': [[]]
367    }
368    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
369                                     benchmark_keyvals)
370    results = JSONResultsReport(bench_results).GetReportObject()
371    self._CheckRequiredKeys(results, is_experiment=False)
372    self.assertFalse(any(r['pass'] for r in results))
373
374  def testJSONGetReportObeysJSONSettings(self):
375    labels = ['label1']
376    benchmark_names_and_iterations = [('bench1', 1)]
377    # These can be anything, really. So long as they're distinctive.
378    separators = (',\t\n\t', ':\t\n\t')
379    benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
380    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
381                                     benchmark_keyvals)
382    reporter = JSONResultsReport(
383        bench_results, json_args={'separators': separators})
384    result_str = reporter.GetReport()
385    self.assertIn(separators[0], result_str)
386    self.assertIn(separators[1], result_str)
387
388  def testSuccessfulJSONReportOutputWithoutExperiment(self):
389    labels = ['label1']
390    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
391    benchmark_keyvals = {
392        'bench1': [[{
393            'retval': 0,
394            'foo': 2.0
395        }]],
396        'bench2': [[{
397            'retval': 0,
398            'foo': 4.0
399        }, {
400            'retval': 0,
401            'bar': 999
402        }]]
403    }
404    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
405                                     benchmark_keyvals)
406    results = JSONResultsReport(bench_results).GetReportObject()
407    self._CheckRequiredKeys(results, is_experiment=False)
408    self.assertTrue(all(r['pass'] for r in results))
409    # Enforce that the results have *some* deterministic order.
410    keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0))
411    sorted_results = sorted(results, key=keyfn)
412    detailed_results = [r['detailed_results'] for r in sorted_results]
413    bench1, bench2_foo, bench2_bar = detailed_results
414    self.assertEqual(bench1['foo'], 2.0)
415    self.assertEqual(bench2_foo['foo'], 4.0)
416    self.assertEqual(bench2_bar['bar'], 999)
417    self.assertNotIn('bar', bench1)
418    self.assertNotIn('bar', bench2_foo)
419    self.assertNotIn('foo', bench2_bar)
420
421
422class PerfReportParserTest(unittest.TestCase):
423  """Tests for the perf report parser in results_report."""
424
425  @staticmethod
426  def _ReadRealPerfReport():
427    my_dir = os.path.dirname(os.path.realpath(__file__))
428    with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f:
429      return f.read()
430
431  def testParserParsesRealWorldPerfReport(self):
432    report = ParseStandardPerfReport(self._ReadRealPerfReport())
433    self.assertCountEqual(['cycles', 'instructions'], list(report.keys()))
434
435    # Arbitrarily selected known percentages from the perf report.
436    known_cycles_percentages = {
437        '0xffffffffa4a1f1c9': 0.66,
438        '0x0000115bb7ba9b54': 0.47,
439        '0x0000000000082e08': 0.00,
440        '0xffffffffa4a13e63': 0.00,
441    }
442    report_cycles = report['cycles']
443    self.assertEqual(len(report_cycles), 214)
444    for k, v in known_cycles_percentages.items():
445      self.assertIn(k, report_cycles)
446      self.assertEqual(v, report_cycles[k])
447
448    known_instrunctions_percentages = {
449        '0x0000115bb6c35d7a': 1.65,
450        '0x0000115bb7ba9b54': 0.67,
451        '0x0000000000024f56': 0.00,
452        '0xffffffffa4a0ee03': 0.00,
453    }
454    report_instructions = report['instructions']
455    self.assertEqual(len(report_instructions), 492)
456    for k, v in known_instrunctions_percentages.items():
457      self.assertIn(k, report_instructions)
458      self.assertEqual(v, report_instructions[k])
459
460
461if __name__ == '__main__':
462  test_flag.SetTestMode(True)
463  unittest.main()
464