• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import logging
6import os
7
8from autotest_lib.client.bin import utils
9from autotest_lib.client.common_lib import error
10from autotest_lib.client.cros import perf
11from autotest_lib.client.cros import service_stopper
12from autotest_lib.client.cros.graphics import graphics_utils
13
14
15class graphics_GLBench(graphics_utils.GraphicsTest):
16  """Run glbench, a benchmark that times graphics intensive activities."""
17  version = 1
18  preserve_srcdir = True
19  _services = None
20
21  glbench_directory = '/usr/local/glbench/'
22  # Good images.
23  reference_images_file = os.path.join(glbench_directory,
24                                       'files/glbench_reference_images.txt')
25  # Images that are bad but for which the bug has not been fixed yet.
26  knownbad_images_file = os.path.join(glbench_directory,
27                                      'files/glbench_knownbad_images.txt')
28  # Images that are bad and for which a fix has been submitted.
29  fixedbad_images_file = os.path.join(glbench_directory,
30                                      'files/glbench_fixedbad_images.txt')
31
32  # These tests do not draw anything, they can only be used to check
33  # performance.
34  no_checksum_tests = set([
35      'compositing_no_fill',
36      'pixel_read',
37      'texture_reuse_luminance_teximage2d',
38      'texture_reuse_luminance_texsubimage2d',
39      'texture_reuse_rgba_teximage2d',
40      'texture_reuse_rgba_texsubimage2d',
41      'context_glsimple',
42      'swap_glsimple',
43  ])
44
45  blacklist = ''
46
47  unit_higher_is_better = {
48      'mbytes_sec': True,
49      'mpixels_sec': True,
50      'mtexel_sec': True,
51      'mtri_sec': True,
52      'mvtx_sec': True,
53      'us': False,
54      '1280x768_fps': True
55  }
56
57  def initialize(self):
58    super(graphics_GLBench, self).initialize()
59    # If UI is running, we must stop it and restore later.
60    self._services = service_stopper.ServiceStopper(['ui'])
61    self._services.stop_services()
62
63  def cleanup(self):
64    if self._services:
65      self._services.restore_services()
66    super(graphics_GLBench, self).cleanup()
67
68  def is_no_checksum_test(self, testname):
69    """Check if given test requires no screenshot checksum.
70
71    @param testname: name of test to check.
72    """
73    for prefix in self.no_checksum_tests:
74      if testname.startswith(prefix):
75        return True
76    return False
77
78  def load_imagenames(self, filename):
79    """Loads text file with MD5 file names.
80
81    @param filename: name of file to load.
82    """
83    imagenames = os.path.join(self.autodir, filename)
84    with open(imagenames, 'r') as f:
85      imagenames = f.read()
86      return imagenames
87
88  @graphics_utils.GraphicsTest.failure_report_decorator('graphics_GLBench')
89  def run_once(self, options='', hasty=False):
90    """Run the test.
91
92    @param options: String of options to run the glbench executable with.
93    @param hasty: Run the test more quickly by running fewer iterations,
94        lower resolution, and without waiting for the dut to cool down.
95    """
96    options += self.blacklist
97    # Run the test, saving is optional and helps with debugging
98    # and reference image management. If unknown images are
99    # encountered one can take them from the outdir and copy
100    # them (after verification) into the reference image dir.
101    exefile = os.path.join(self.glbench_directory, 'bin/glbench')
102    outdir = self.outputdir
103    options += ' -save -outdir=' + outdir
104    # Using the -hasty option we run only a subset of tests without waiting
105    # for thermals to normalize. Test should complete in 15-20 seconds.
106    if hasty:
107      options += ' -hasty'
108
109    cmd = '%s %s' % (exefile, options)
110    summary = None
111    pc_error_reason = None
112    try:
113      if hasty:
114        # On BVT the test will not monitor thermals so we will not verify its
115        # correct status using PerfControl
116        summary = utils.run(cmd,
117                            stderr_is_expected=False,
118                            stdout_tee=utils.TEE_TO_LOGS,
119                            stderr_tee=utils.TEE_TO_LOGS).stdout
120      else:
121        utils.report_temperature(self, 'temperature_1_start')
122        # Wrap the test run inside of a PerfControl instance to make machine
123        # behavior more consistent.
124        with perf.PerfControl() as pc:
125          if not pc.verify_is_valid():
126            raise error.TestFail('Failed: %s' % pc.get_error_reason())
127          utils.report_temperature(self, 'temperature_2_before_test')
128
129          # Run the test. If it gets the CPU too hot pc should notice.
130          summary = utils.run(cmd,
131                              stderr_is_expected=False,
132                              stdout_tee=utils.TEE_TO_LOGS,
133                              stderr_tee=utils.TEE_TO_LOGS).stdout
134          if not pc.verify_is_valid():
135            # Defer error handling until after perf report.
136            pc_error_reason = pc.get_error_reason()
137    except error.CmdError:
138      raise error.TestFail('Failed: CmdError running %s' % cmd)
139    except error.CmdTimeoutError:
140      raise error.TestFail('Failed: CmdTimeout running %s' % cmd)
141
142    # Write a copy of stdout to help debug failures.
143    results_path = os.path.join(self.outputdir, 'summary.txt')
144    f = open(results_path, 'w+')
145    f.write('# ---------------------------------------------------\n')
146    f.write('# [' + cmd + ']\n')
147    f.write(summary)
148    f.write('\n# -------------------------------------------------\n')
149    f.write('# [graphics_GLBench.py postprocessing]\n')
150
151    # Analyze the output. Sample:
152    ## board_id: NVIDIA Corporation - Quadro FX 380/PCI/SSE2
153    ## Running: ../glbench -save -outdir=img
154    #swap_swap = 221.36 us [swap_swap.pixmd5-20dbc...f9c700d2f.png]
155    results = summary.splitlines()
156    if not results:
157      f.close()
158      raise error.TestFail('Failed: No output from test. Check /tmp/' +
159                           'test_that_latest/graphics_GLBench/summary.txt' +
160                           ' for details.')
161
162    # The good images, the silenced and the zombie/recurring failures.
163    reference_imagenames = self.load_imagenames(self.reference_images_file)
164    knownbad_imagenames = self.load_imagenames(self.knownbad_images_file)
165    fixedbad_imagenames = self.load_imagenames(self.fixedbad_images_file)
166
167    # Check if we saw GLBench end as expected (without crashing).
168    test_ended_normal = False
169    for line in results:
170      if line.strip().startswith('@TEST_END'):
171        test_ended_normal = True
172
173    # Analyze individual test results in summary.
174    # TODO(pwang): Raise TestFail if an error is detected during glbench.
175    keyvals = {}
176    failed_tests = {}
177    for line in results:
178      if not line.strip().startswith('@RESULT: '):
179        continue
180      keyval, remainder = line[9:].split('[')
181      key, val = keyval.split('=')
182      testname = key.strip()
183      score, unit = val.split()
184      testrating = float(score)
185      imagefile = remainder.split(']')[0]
186
187      if not hasty:
188        higher = self.unit_higher_is_better.get(unit)
189        if higher is None:
190          raise error.TestFail('Failed: Unknown test unit "%s" for %s' %
191                               (unit, testname))
192        # Prepend unit to test name to maintain backwards compatibility with
193        # existing per data.
194        perf_value_name = '%s_%s' % (unit, testname)
195        self.output_perf_value(
196            description=perf_value_name,
197            value=testrating,
198            units=unit,
199            higher_is_better=higher,
200            graph=perf_value_name)
201
202      # Classify result image.
203      if testrating == -1.0:
204        # Tests that generate GL Errors.
205        glerror = imagefile.split('=')[1]
206        f.write('# GLError ' + glerror + ' during test (perf set to -3.0)\n')
207        keyvals[testname] = -3.0
208        failed_tests[testname] = 'GLError'
209      elif testrating == 0.0:
210        # Tests for which glbench does not generate a meaningful perf score.
211        f.write('# No score for test\n')
212        keyvals[testname] = 0.0
213      elif imagefile in fixedbad_imagenames:
214        # We know the image looked bad at some point in time but we thought
215        # it was fixed. Throw an exception as a reminder.
216        keyvals[testname] = -2.0
217        f.write('# fixedbad [' + imagefile + '] (setting perf as -2.0)\n')
218        failed_tests[testname] = imagefile
219      elif imagefile in knownbad_imagenames:
220        # We have triaged the failure and have filed a tracking bug.
221        # Don't throw an exception and remind there is a problem.
222        keyvals[testname] = -1.0
223        f.write('# knownbad [' + imagefile + '] (setting perf as -1.0)\n')
224        # This failure is whitelisted so don't add to failed_tests.
225      elif imagefile in reference_imagenames:
226        # Known good reference images (default).
227        keyvals[testname] = testrating
228      elif imagefile == 'none':
229        # Tests that do not write images can't fail because of them.
230        keyvals[testname] = testrating
231      elif self.is_no_checksum_test(testname):
232        # TODO(ihf): these really should not write any images
233        keyvals[testname] = testrating
234      else:
235        # Completely unknown images. Raise a failure.
236        keyvals[testname] = -2.0
237        failed_tests[testname] = imagefile
238        f.write('# unknown [' + imagefile + '] (setting perf as -2.0)\n')
239    f.close()
240    if not hasty:
241      utils.report_temperature(self, 'temperature_3_after_test')
242      self.write_perf_keyval(keyvals)
243
244    # Raise exception if images don't match.
245    if failed_tests:
246      logging.info('Some images are not matching their reference in %s.',
247                   self.reference_images_file)
248      logging.info('Please verify that the output images are correct '
249                   'and if so copy them to the reference directory.')
250      raise error.TestFail('Failed: Some images are not matching their '
251                           'references. Check /tmp/'
252                           'test_that_latest/graphics_GLBench/summary.txt'
253                           ' for details.')
254
255    if not test_ended_normal:
256      raise error.TestFail(
257          'Failed: No end marker. Presumed crash/missing images.')
258    if pc_error_reason:
259      raise error.TestFail('Failed: %s' % pc_error_reason)
260