• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import logging
6import os
7
8from autotest_lib.client.bin import test, utils
9from autotest_lib.client.common_lib import error
10from autotest_lib.client.cros import perf
11from autotest_lib.client.cros import service_stopper
12from autotest_lib.client.cros.graphics import graphics_utils
13
14
15class graphics_GLBench(test.test):
16  """Run glbench, a benchmark that times graphics intensive activities."""
17  version = 1
18  preserve_srcdir = True
19  _services = None
20
21  # Good images.
22  reference_images_file = 'deps/glbench/glbench_reference_images.txt'
23  # Images that are bad but for which the bug has not been fixed yet.
24  knownbad_images_file = 'deps/glbench/glbench_knownbad_images.txt'
25  # Images that are bad and for which a fix has been submitted.
26  fixedbad_images_file = 'deps/glbench/glbench_fixedbad_images.txt'
27
28  # These tests do not draw anything, they can only be used to check
29  # performance.
30  no_checksum_tests = set([
31      'compositing_no_fill',
32      'pixel_read',
33      'texture_reuse_luminance_teximage2d',
34      'texture_reuse_luminance_texsubimage2d',
35      'texture_reuse_rgba_teximage2d',
36      'texture_reuse_rgba_texsubimage2d',
37      'context_glsimple',
38      'swap_glsimple', ])
39
40  blacklist = ''
41
42  unit_higher_is_better = {
43    'mpixels_sec': True,
44    'mtexel_sec': True,
45    'mtri_sec': True,
46    'mvtx_sec': True,
47    'us': False,
48    '1280x768_fps': True }
49
50  GSC = None
51
52  def setup(self):
53    self.job.setup_dep(['glbench'])
54
55  def initialize(self):
56    self.GSC = graphics_utils.GraphicsStateChecker()
57    # If UI is running, we must stop it and restore later.
58    self._services = service_stopper.ServiceStopper(['ui'])
59    self._services.stop_services()
60
61  def cleanup(self):
62    if self._services:
63      self._services.restore_services()
64    if self.GSC:
65      keyvals = self.GSC.get_memory_keyvals()
66      for key, val in keyvals.iteritems():
67        self.output_perf_value(description=key, value=val,
68                               units='bytes', higher_is_better=False)
69      self.GSC.finalize()
70      self.write_perf_keyval(keyvals)
71
72  def report_temperature(self, keyname):
73    """Report current max observed temperature with given keyname.
74
75    @param keyname: key to be used when reporting perf value.
76    """
77    temperature = utils.get_temperature_input_max()
78    logging.info('%s = %f degree Celsius', keyname, temperature)
79    self.output_perf_value(description=keyname, value=temperature,
80                           units='Celsius', higher_is_better=False)
81
82  def report_temperature_critical(self, keyname):
83    """Report temperature at which we will see throttling with given keyname.
84
85    @param keyname: key to be used when reporting perf value.
86    """
87    temperature = utils.get_temperature_critical()
88    logging.info('%s = %f degree Celsius', keyname, temperature)
89    self.output_perf_value(description=keyname, value=temperature,
90                           units='Celsius', higher_is_better=False)
91
92  def is_no_checksum_test(self, testname):
93    """Check if given test requires no screenshot checksum.
94
95    @param testname: name of test to check.
96    """
97    for prefix in self.no_checksum_tests:
98      if testname.startswith(prefix):
99        return True
100    return False
101
102  def load_imagenames(self, filename):
103    """Loads text file with MD5 file names.
104
105    @param filename: name of file to load.
106    """
107    imagenames = os.path.join(self.autodir, filename)
108    with open(imagenames, 'r') as f:
109      imagenames = f.read()
110      return imagenames
111
112  def run_once(self, options='', hasty=False):
113    dep = 'glbench'
114    dep_dir = os.path.join(self.autodir, 'deps', dep)
115    self.job.install_pkg(dep, 'dep', dep_dir)
116
117    options += self.blacklist
118
119    # Run the test, saving is optional and helps with debugging
120    # and reference image management. If unknown images are
121    # encountered one can take them from the outdir and copy
122    # them (after verification) into the reference image dir.
123    exefile = os.path.join(self.autodir, 'deps/glbench/glbench')
124    outdir = self.outputdir
125    options += ' -save -outdir=' + outdir
126    # Using the -hasty option we run only a subset of tests without waiting
127    # for thermals to normalize. Test should complete in 15-20 seconds.
128    if hasty:
129      options += ' -hasty'
130
131    cmd = '%s %s' % (exefile, options)
132    if not utils.is_freon():
133      cmd = 'X :1 vt1 & sleep 1; chvt 1 && DISPLAY=:1 ' + cmd
134    summary = None
135    try:
136      if hasty:
137        # On BVT the test will not monitor thermals so we will not verify its
138        # correct status using PerfControl
139        summary = utils.run(cmd,
140                            stderr_is_expected = False,
141                            stdout_tee = utils.TEE_TO_LOGS,
142                            stderr_tee = utils.TEE_TO_LOGS).stdout
143      else:
144        self.report_temperature_critical('temperature_critical')
145        self.report_temperature('temperature_1_start')
146        # Wrap the test run inside of a PerfControl instance to make machine
147        # behavior more consistent.
148        with perf.PerfControl() as pc:
149          if not pc.verify_is_valid():
150            raise error.TestError(pc.get_error_reason())
151          self.report_temperature('temperature_2_before_test')
152
153          # Run the test. If it gets the CPU too hot pc should notice.
154          summary = utils.run(cmd,
155                              stderr_is_expected = False,
156                              stdout_tee = utils.TEE_TO_LOGS,
157                              stderr_tee = utils.TEE_TO_LOGS).stdout
158          if not pc.verify_is_valid():
159            raise error.TestError(pc.get_error_reason())
160    finally:
161      if not utils.is_freon():
162        # Just sending SIGTERM to X is not enough; we must wait for it to
163        # really die before we start a new X server (ie start ui).
164        utils.ensure_processes_are_dead_by_name('^X$')
165
166    # Write a copy of stdout to help debug failures.
167    results_path = os.path.join(self.outputdir, 'summary.txt')
168    f = open(results_path, 'w+')
169    f.write('# ---------------------------------------------------\n')
170    f.write('# [' + cmd + ']\n')
171    f.write(summary)
172    f.write('\n# -------------------------------------------------\n')
173    f.write('# [graphics_GLBench.py postprocessing]\n')
174
175    # Analyze the output. Sample:
176    ## board_id: NVIDIA Corporation - Quadro FX 380/PCI/SSE2
177    ## Running: ../glbench -save -outdir=img
178    #swap_swap = 221.36 us [swap_swap.pixmd5-20dbc...f9c700d2f.png]
179    results = summary.splitlines()
180    if not results:
181      f.close()
182      raise error.TestFail('No output from test. Check /tmp/' +
183                           'test_that_latest/graphics_GLBench/summary.txt' +
184                           ' for details.')
185
186    # The good images, the silenced and the zombie/recurring failures.
187    reference_imagenames = self.load_imagenames(self.reference_images_file)
188    knownbad_imagenames = self.load_imagenames(self.knownbad_images_file)
189    fixedbad_imagenames = self.load_imagenames(self.fixedbad_images_file)
190
191    # Check if we saw GLBench end as expected (without crashing).
192    test_ended_normal = False
193    for line in results:
194      if line.strip().startswith('@TEST_END'):
195        test_ended_normal = True
196
197    # Analyze individual test results in summary.
198    keyvals = {}
199    failed_tests = {}
200    for line in results:
201      if not line.strip().startswith('@RESULT: '):
202        continue
203      keyval, remainder = line[9:].split('[')
204      key, val = keyval.split('=')
205      testname = key.strip()
206      score, unit = val.split()
207      testrating = float(score)
208      imagefile = remainder.split(']')[0]
209
210      higher = self.unit_higher_is_better.get(unit)
211      if higher is None:
212        raise error.TestFail('Unknown test unit "%s" for %s' % (unit, testname))
213
214      if not hasty:
215        # Prepend unit to test name to maintain backwards compatibility with
216        # existing per data.
217        perf_value_name = '%s_%s' % (unit, testname)
218        self.output_perf_value(description=perf_value_name, value=testrating,
219                               units=unit, higher_is_better=higher,
220                               graph=perf_value_name)
221        # Add extra value to the graph distinguishing different boards.
222        variant = utils.get_board_with_frequency_and_memory()
223        desc = '%s-%s' % (perf_value_name, variant)
224        self.output_perf_value(description=desc, value=testrating,
225                               units=unit, higher_is_better=higher,
226                               graph=perf_value_name)
227
228      # Classify result image.
229      if testrating == -1.0:
230        # Tests that generate GL Errors.
231        glerror = imagefile.split('=')[1]
232        f.write('# GLError ' + glerror + ' during test (perf set to -3.0)\n')
233        keyvals[testname] = -3.0
234        failed_tests[testname] = 'GLError'
235      elif testrating == 0.0:
236        # Tests for which glbench does not generate a meaningful perf score.
237        f.write('# No score for test\n')
238        keyvals[testname] = 0.0
239      elif imagefile in fixedbad_imagenames:
240        # We know the image looked bad at some point in time but we thought
241        # it was fixed. Throw an exception as a reminder.
242        keyvals[testname] = -2.0
243        f.write('# fixedbad [' + imagefile + '] (setting perf as -2.0)\n')
244        failed_tests[testname] = imagefile
245      elif imagefile in knownbad_imagenames:
246        # We have triaged the failure and have filed a tracking bug.
247        # Don't throw an exception and remind there is a problem.
248        keyvals[testname] = -1.0
249        f.write('# knownbad [' + imagefile + '] (setting perf as -1.0)\n')
250        # This failure is whitelisted so don't add to failed_tests.
251      elif imagefile in reference_imagenames:
252        # Known good reference images (default).
253        keyvals[testname] = testrating
254      elif imagefile == 'none':
255        # Tests that do not write images can't fail because of them.
256        keyvals[testname] = testrating
257      elif self.is_no_checksum_test(testname):
258        # TODO(ihf): these really should not write any images
259        keyvals[testname] = testrating
260      else:
261        # Completely unknown images. Raise a failure.
262        keyvals[testname] = -2.0
263        failed_tests[testname] = imagefile
264        f.write('# unknown [' + imagefile + '] (setting perf as -2.0)\n')
265    f.close()
266    if not hasty:
267      self.report_temperature('temperature_3_after_test')
268      self.write_perf_keyval(keyvals)
269
270    # Raise exception if images don't match.
271    if failed_tests:
272      logging.info('Some images are not matching their reference in %s.',
273                   self.reference_images_file)
274      logging.info('Please verify that the output images are correct '
275                   'and if so copy them to the reference directory.')
276      raise error.TestFail('Some images are not matching their '
277                           'references. Check /tmp/'
278                           'test_that_latest/graphics_GLBench/summary.txt'
279                           ' for details.')
280
281    if not test_ended_normal:
282      raise error.TestFail('No end marker. Presumed crash/missing images.')
283