• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python2
2#
3# Copyright 2016 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6"""Unittest for the results reporter."""
7
8from __future__ import division
9from __future__ import print_function
10
11from StringIO import StringIO
12
13import collections
14import mock
15import os
16import test_flag
17import unittest
18
19from benchmark_run import MockBenchmarkRun
20from cros_utils import logger
21from experiment_factory import ExperimentFactory
22from experiment_file import ExperimentFile
23from machine_manager import MockCrosMachine
24from machine_manager import MockMachineManager
25from results_cache import MockResult
26from results_report import BenchmarkResults
27from results_report import HTMLResultsReport
28from results_report import JSONResultsReport
29from results_report import ParseChromeosImage
30from results_report import ParseStandardPerfReport
31from results_report import TextResultsReport
32
33
34class FreeFunctionsTest(unittest.TestCase):
35  """Tests for any free functions in results_report."""
36
37  def testParseChromeosImage(self):
38    # N.B. the cases with blank versions aren't explicitly supported by
39    # ParseChromeosImage. I'm not sure if they need to be supported, but the
40    # goal of this was to capture existing functionality as much as possible.
41    base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
42        '/chromiumos_test_image.bin'
43    self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
44
45    dir_base_case = os.path.dirname(base_case)
46    self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
47
48    buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
49        '/chromiumos_test_image.bin'
50    buildbot_img = buildbot_case.split('/chroot/tmp')[1]
51
52    self.assertEqual(
53        ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img))
54    self.assertEqual(
55        ParseChromeosImage(os.path.dirname(buildbot_case)),
56        ('', os.path.dirname(buildbot_img)))
57
58    # Ensure we don't act completely insanely given a few mildly insane paths.
59    fun_case = '/chromiumos_test_image.bin'
60    self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
61
62    fun_case2 = 'chromiumos_test_image.bin'
63    self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
64
65
66# There are many ways for this to be done better, but the linter complains
67# about all of them (that I can think of, at least).
68_fake_path_number = [0]
69
70
71def FakePath(ext):
72  """Makes a unique path that shouldn't exist on the host system.
73
74  Each call returns a different path, so if said path finds its way into an
75  error message, it may be easier to track it to its source.
76  """
77  _fake_path_number[0] += 1
78  prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],)
79  return os.path.join(prefix, ext)
80
81
82def MakeMockExperiment(compiler='gcc'):
83  """Mocks an experiment using the given compiler."""
84  mock_experiment_file = StringIO("""
85      board: x86-alex
86      remote: 127.0.0.1
87      perf_args: record -a -e cycles
88      benchmark: PageCycler {
89        iterations: 3
90      }
91
92      image1 {
93        chromeos_image: %s
94      }
95
96      image2 {
97        remote: 127.0.0.2
98        chromeos_image: %s
99      }
100      """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
101  efile = ExperimentFile(mock_experiment_file)
102  experiment = ExperimentFactory().GetExperiment(efile,
103                                                 FakePath('working_directory'),
104                                                 FakePath('log_dir'))
105  for label in experiment.labels:
106    label.compiler = compiler
107  return experiment
108
109
110def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
111                     label=None):
112  """Injects successful experiment runs (for each label) into the experiment."""
113  # Defensive copy of keyvals, so if it's modified, we'll know.
114  keyvals = dict(keyvals)
115  num_configs = len(experiment.benchmarks) * len(experiment.labels)
116  num_runs = len(experiment.benchmark_runs) // num_configs
117
118  # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
119  # benchmark_run_unittest)
120  bench = experiment.benchmarks[for_benchmark]
121  cache_conditions = []
122  log_level = 'average'
123  share_cache = ''
124  locks_dir = ''
125  log = logger.GetLogger()
126  machine_manager = MockMachineManager(
127      FakePath('chromeos_root'), 0, log_level, locks_dir)
128  machine_manager.AddMachine('testing_machine')
129  machine = next(m for m in machine_manager.GetMachines()
130                 if m.name == 'testing_machine')
131  for label in experiment.labels:
132
133    def MakeSuccessfulRun(n):
134      run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
135                             1 + n + num_runs, cache_conditions,
136                             machine_manager, log, log_level, share_cache)
137      mock_result = MockResult(log, label, log_level, machine)
138      mock_result.keyvals = keyvals
139      run.result = mock_result
140      return run
141
142    experiment.benchmark_runs.extend(
143        MakeSuccessfulRun(n) for n in xrange(how_many))
144  return experiment
145
146
147class TextResultsReportTest(unittest.TestCase):
148  """Tests that the output of a text report contains the things we pass in.
149
150  At the moment, this doesn't care deeply about the format in which said
151  things are displayed. It just cares that they're present.
152  """
153
154  def _checkReport(self, email):
155    num_success = 2
156    success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
157    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
158                                  success_keyvals)
159    text_report = TextResultsReport.FromExperiment(experiment, email=email) \
160                                   .GetReport()
161    self.assertIn(str(success_keyvals['a_float']), text_report)
162    self.assertIn(success_keyvals['machine'], text_report)
163    self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
164    return text_report
165
166  def testOutput(self):
167    email_report = self._checkReport(email=True)
168    text_report = self._checkReport(email=False)
169
170    # Ensure that the reports somehow different. Otherwise, having the
171    # distinction is useless.
172    self.assertNotEqual(email_report, text_report)
173
174
175class HTMLResultsReportTest(unittest.TestCase):
176  """Tests that the output of a HTML report contains the things we pass in.
177
178  At the moment, this doesn't care deeply about the format in which said
179  things are displayed. It just cares that they're present.
180  """
181
182  _TestOutput = collections.namedtuple('TestOutput', [
183      'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table',
184      'experiment_file'
185  ])
186
187  @staticmethod
188  def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
189                     chart_divs, full_table, experiment_file):
190    # N.B. Currently we don't check chart_js; it's just passed through because
191    # cros lint complains otherwise.
192    summary_table = print_table(summary_table, 'HTML')
193    perf_html = print_table(perf_table, 'HTML')
194    full_table = print_table(full_table, 'HTML')
195    return HTMLResultsReportTest._TestOutput(
196        summary_table=summary_table,
197        perf_html=perf_html,
198        chart_js=chart_js,
199        charts=chart_divs,
200        full_table=full_table,
201        experiment_file=experiment_file)
202
203  def _GetOutput(self, experiment=None, benchmark_results=None):
204    with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
205      if experiment is not None:
206        HTMLResultsReport.FromExperiment(experiment).GetReport()
207      else:
208        HTMLResultsReport(benchmark_results).GetReport()
209      mod_mock = standin
210    self.assertEquals(mod_mock.call_count, 1)
211    # call_args[0] is positional args, call_args[1] is kwargs.
212    self.assertEquals(mod_mock.call_args[0], tuple())
213    fmt_args = mod_mock.call_args[1]
214    return self._GetTestOutput(**fmt_args)
215
216  def testNoSuccessOutput(self):
217    output = self._GetOutput(MakeMockExperiment())
218    self.assertIn('no result', output.summary_table)
219    self.assertIn('no result', output.full_table)
220    self.assertEqual(output.charts, '')
221    self.assertNotEqual(output.experiment_file, '')
222
223  def testSuccessfulOutput(self):
224    num_success = 2
225    success_keyvals = {'retval': 0, 'a_float': 3.96}
226    output = self._GetOutput(
227        _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
228
229    self.assertNotIn('no result', output.summary_table)
230    #self.assertIn(success_keyvals['machine'], output.summary_table)
231    self.assertIn('a_float', output.summary_table)
232    self.assertIn(str(success_keyvals['a_float']), output.summary_table)
233    self.assertIn('a_float', output.full_table)
234    # The _ in a_float is filtered out when we're generating HTML.
235    self.assertIn('afloat', output.charts)
236    # And make sure we have our experiment file...
237    self.assertNotEqual(output.experiment_file, '')
238
239  def testBenchmarkResultFailure(self):
240    labels = ['label1']
241    benchmark_names_and_iterations = [('bench1', 1)]
242    benchmark_keyvals = {'bench1': [[]]}
243    results = BenchmarkResults(labels, benchmark_names_and_iterations,
244                               benchmark_keyvals)
245    output = self._GetOutput(benchmark_results=results)
246    self.assertIn('no result', output.summary_table)
247    self.assertEqual(output.charts, '')
248    self.assertEqual(output.experiment_file, '')
249
250  def testBenchmarkResultSuccess(self):
251    labels = ['label1']
252    benchmark_names_and_iterations = [('bench1', 1)]
253    benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]}
254    results = BenchmarkResults(labels, benchmark_names_and_iterations,
255                               benchmark_keyvals)
256    output = self._GetOutput(benchmark_results=results)
257    self.assertNotIn('no result', output.summary_table)
258    self.assertIn('bench1', output.summary_table)
259    self.assertIn('bench1', output.full_table)
260    self.assertNotEqual(output.charts, '')
261    self.assertEqual(output.experiment_file, '')
262
263
264class JSONResultsReportTest(unittest.TestCase):
265  """Tests JSONResultsReport."""
266
267  REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass')
268  EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version',
269                            'chrome_version', 'compiler')
270
271  @staticmethod
272  def _GetRequiredKeys(is_experiment):
273    required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
274    if is_experiment:
275      required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
276    return required_keys
277
278  def _CheckRequiredKeys(self, test_output, is_experiment):
279    required_keys = self._GetRequiredKeys(is_experiment)
280    for output in test_output:
281      for key in required_keys:
282        self.assertIn(key, output)
283
284  def testAllFailedJSONReportOutput(self):
285    experiment = MakeMockExperiment()
286    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
287    self._CheckRequiredKeys(results, is_experiment=True)
288    # Nothing succeeded; we don't send anything more than what's required.
289    required_keys = self._GetRequiredKeys(is_experiment=True)
290    for result in results:
291      self.assertItemsEqual(result.iterkeys(), required_keys)
292
293  def testJSONReportOutputWithSuccesses(self):
294    success_keyvals = {
295        'retval': 0,
296        'a_float': '2.3',
297        'many_floats': [['1.0', '2.0'], ['3.0']],
298        'machine': "i'm a pirate"
299    }
300
301    # 2 is arbitrary.
302    num_success = 2
303    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
304                                  success_keyvals)
305    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
306    self._CheckRequiredKeys(results, is_experiment=True)
307
308    num_passes = num_success * len(experiment.labels)
309    non_failures = [r for r in results if r['pass']]
310    self.assertEqual(num_passes, len(non_failures))
311
312    # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
313    expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
314    for pass_ in non_failures:
315      self.assertIn('detailed_results', pass_)
316      self.assertDictEqual(expected_detailed, pass_['detailed_results'])
317      self.assertIn('machine', pass_)
318      self.assertEqual(success_keyvals['machine'], pass_['machine'])
319
320  def testFailedJSONReportOutputWithoutExperiment(self):
321    labels = ['label1']
322    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
323                                      ('bench3', 1), ('bench4', 0)]
324    benchmark_keyvals = {
325        'bench1': [[{
326            'retval': 1,
327            'foo': 2.0
328        }]],
329        'bench2': [[{
330            'retval': 1,
331            'foo': 4.0
332        }, {
333            'retval': -1,
334            'bar': 999
335        }]],
336        # lack of retval is considered a failure.
337        'bench3': [[{}]],
338        'bench4': [[]]
339    }
340    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
341                                     benchmark_keyvals)
342    results = JSONResultsReport(bench_results).GetReportObject()
343    self._CheckRequiredKeys(results, is_experiment=False)
344    self.assertFalse(any(r['pass'] for r in results))
345
346  def testJSONGetReportObeysJSONSettings(self):
347    labels = ['label1']
348    benchmark_names_and_iterations = [('bench1', 1)]
349    # These can be anything, really. So long as they're distinctive.
350    separators = (',\t\n\t', ':\t\n\t')
351    benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
352    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
353                                     benchmark_keyvals)
354    reporter = JSONResultsReport(
355        bench_results, json_args={'separators': separators})
356    result_str = reporter.GetReport()
357    self.assertIn(separators[0], result_str)
358    self.assertIn(separators[1], result_str)
359
360  def testSuccessfulJSONReportOutputWithoutExperiment(self):
361    labels = ['label1']
362    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
363    benchmark_keyvals = {
364        'bench1': [[{
365            'retval': 0,
366            'foo': 2.0
367        }]],
368        'bench2': [[{
369            'retval': 0,
370            'foo': 4.0
371        }, {
372            'retval': 0,
373            'bar': 999
374        }]]
375    }
376    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
377                                     benchmark_keyvals)
378    results = JSONResultsReport(bench_results).GetReportObject()
379    self._CheckRequiredKeys(results, is_experiment=False)
380    self.assertTrue(all(r['pass'] for r in results))
381    # Enforce that the results have *some* deterministic order.
382    keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0))
383    sorted_results = sorted(results, key=keyfn)
384    detailed_results = [r['detailed_results'] for r in sorted_results]
385    bench1, bench2_foo, bench2_bar = detailed_results
386    self.assertEqual(bench1['foo'], 2.0)
387    self.assertEqual(bench2_foo['foo'], 4.0)
388    self.assertEqual(bench2_bar['bar'], 999)
389    self.assertNotIn('bar', bench1)
390    self.assertNotIn('bar', bench2_foo)
391    self.assertNotIn('foo', bench2_bar)
392
393
394class PerfReportParserTest(unittest.TestCase):
395  """Tests for the perf report parser in results_report."""
396
397  @staticmethod
398  def _ReadRealPerfReport():
399    my_dir = os.path.dirname(os.path.realpath(__file__))
400    with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f:
401      return f.read()
402
403  def testParserParsesRealWorldPerfReport(self):
404    report = ParseStandardPerfReport(self._ReadRealPerfReport())
405    self.assertItemsEqual(['cycles', 'instructions'], report.keys())
406
407    # Arbitrarily selected known percentages from the perf report.
408    known_cycles_percentages = {
409        '0xffffffffa4a1f1c9': 0.66,
410        '0x0000115bb7ba9b54': 0.47,
411        '0x0000000000082e08': 0.00,
412        '0xffffffffa4a13e63': 0.00,
413    }
414    report_cycles = report['cycles']
415    self.assertEqual(len(report_cycles), 214)
416    for k, v in known_cycles_percentages.iteritems():
417      self.assertIn(k, report_cycles)
418      self.assertEqual(v, report_cycles[k])
419
420    known_instrunctions_percentages = {
421        '0x0000115bb6c35d7a': 1.65,
422        '0x0000115bb7ba9b54': 0.67,
423        '0x0000000000024f56': 0.00,
424        '0xffffffffa4a0ee03': 0.00,
425    }
426    report_instructions = report['instructions']
427    self.assertEqual(len(report_instructions), 492)
428    for k, v in known_instrunctions_percentages.iteritems():
429      self.assertIn(k, report_instructions)
430      self.assertEqual(v, report_instructions[k])
431
432
433if __name__ == '__main__':
434  test_flag.SetTestMode(True)
435  unittest.main()
436