1# Copyright (c) 2010 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import logging 6import os 7 8from autotest_lib.client.bin import test, utils 9from autotest_lib.client.common_lib import error 10from autotest_lib.client.cros import perf 11from autotest_lib.client.cros import service_stopper 12from autotest_lib.client.cros.graphics import graphics_utils 13 14 15class graphics_GLBench(test.test): 16 """Run glbench, a benchmark that times graphics intensive activities.""" 17 version = 1 18 preserve_srcdir = True 19 _services = None 20 21 # Good images. 22 reference_images_file = 'deps/glbench/glbench_reference_images.txt' 23 # Images that are bad but for which the bug has not been fixed yet. 24 knownbad_images_file = 'deps/glbench/glbench_knownbad_images.txt' 25 # Images that are bad and for which a fix has been submitted. 26 fixedbad_images_file = 'deps/glbench/glbench_fixedbad_images.txt' 27 28 # These tests do not draw anything, they can only be used to check 29 # performance. 30 no_checksum_tests = set([ 31 'compositing_no_fill', 32 'pixel_read', 33 'texture_reuse_luminance_teximage2d', 34 'texture_reuse_luminance_texsubimage2d', 35 'texture_reuse_rgba_teximage2d', 36 'texture_reuse_rgba_texsubimage2d', 37 'context_glsimple', 38 'swap_glsimple', 39 ]) 40 41 blacklist = '' 42 43 unit_higher_is_better = { 44 'mpixels_sec': True, 45 'mtexel_sec': True, 46 'mtri_sec': True, 47 'mvtx_sec': True, 48 'us': False, 49 '1280x768_fps': True 50 } 51 52 GSC = None 53 54 def setup(self): 55 self.job.setup_dep(['glbench']) 56 57 def initialize(self): 58 self.GSC = graphics_utils.GraphicsStateChecker() 59 # If UI is running, we must stop it and restore later. 60 self._services = service_stopper.ServiceStopper(['ui']) 61 self._services.stop_services() 62 63 def cleanup(self): 64 if self._services: 65 self._services.restore_services() 66 if self.GSC: 67 keyvals = self.GSC.get_memory_keyvals() 68 for key, val in keyvals.iteritems(): 69 self.output_perf_value( 70 description=key, value=val, units='bytes', higher_is_better=False) 71 self.GSC.finalize() 72 self.write_perf_keyval(keyvals) 73 74 def report_temperature(self, keyname): 75 """Report current max observed temperature with given keyname. 76 77 @param keyname: key to be used when reporting perf value. 78 """ 79 temperature = utils.get_temperature_input_max() 80 logging.info('%s = %f degree Celsius', keyname, temperature) 81 self.output_perf_value( 82 description=keyname, 83 value=temperature, 84 units='Celsius', 85 higher_is_better=False) 86 87 def report_temperature_critical(self, keyname): 88 """Report temperature at which we will see throttling with given keyname. 89 90 @param keyname: key to be used when reporting perf value. 91 """ 92 temperature = utils.get_temperature_critical() 93 logging.info('%s = %f degree Celsius', keyname, temperature) 94 self.output_perf_value( 95 description=keyname, 96 value=temperature, 97 units='Celsius', 98 higher_is_better=False) 99 100 def is_no_checksum_test(self, testname): 101 """Check if given test requires no screenshot checksum. 102 103 @param testname: name of test to check. 104 """ 105 for prefix in self.no_checksum_tests: 106 if testname.startswith(prefix): 107 return True 108 return False 109 110 def load_imagenames(self, filename): 111 """Loads text file with MD5 file names. 112 113 @param filename: name of file to load. 114 """ 115 imagenames = os.path.join(self.autodir, filename) 116 with open(imagenames, 'r') as f: 117 imagenames = f.read() 118 return imagenames 119 120 def run_once(self, options='', hasty=False): 121 dep = 'glbench' 122 dep_dir = os.path.join(self.autodir, 'deps', dep) 123 self.job.install_pkg(dep, 'dep', dep_dir) 124 125 options += self.blacklist 126 127 # Run the test, saving is optional and helps with debugging 128 # and reference image management. If unknown images are 129 # encountered one can take them from the outdir and copy 130 # them (after verification) into the reference image dir. 131 exefile = os.path.join(self.autodir, 'deps/glbench/glbench') 132 outdir = self.outputdir 133 options += ' -save -outdir=' + outdir 134 # Using the -hasty option we run only a subset of tests without waiting 135 # for thermals to normalize. Test should complete in 15-20 seconds. 136 if hasty: 137 options += ' -hasty' 138 139 cmd = '%s %s' % (exefile, options) 140 summary = None 141 try: 142 if hasty: 143 # On BVT the test will not monitor thermals so we will not verify its 144 # correct status using PerfControl 145 summary = utils.run(cmd, 146 stderr_is_expected=False, 147 stdout_tee=utils.TEE_TO_LOGS, 148 stderr_tee=utils.TEE_TO_LOGS).stdout 149 else: 150 self.report_temperature_critical('temperature_critical') 151 self.report_temperature('temperature_1_start') 152 # Wrap the test run inside of a PerfControl instance to make machine 153 # behavior more consistent. 154 with perf.PerfControl() as pc: 155 if not pc.verify_is_valid(): 156 raise error.TestFail('Failed: %s' % pc.get_error_reason()) 157 self.report_temperature('temperature_2_before_test') 158 159 # Run the test. If it gets the CPU too hot pc should notice. 160 summary = utils.run(cmd, 161 stderr_is_expected=False, 162 stdout_tee=utils.TEE_TO_LOGS, 163 stderr_tee=utils.TEE_TO_LOGS).stdout 164 if not pc.verify_is_valid(): 165 raise error.TestFail('Failed: %s' % pc.get_error_reason()) 166 except error.CmdError: 167 raise error.TestFail('Failed: CmdError running %s' % cmd) 168 except error.CmdTimeoutError: 169 raise error.TestFail('Failed: CmdTimeout running %s' % cmd) 170 171 # Write a copy of stdout to help debug failures. 172 results_path = os.path.join(self.outputdir, 'summary.txt') 173 f = open(results_path, 'w+') 174 f.write('# ---------------------------------------------------\n') 175 f.write('# [' + cmd + ']\n') 176 f.write(summary) 177 f.write('\n# -------------------------------------------------\n') 178 f.write('# [graphics_GLBench.py postprocessing]\n') 179 180 # Analyze the output. Sample: 181 ## board_id: NVIDIA Corporation - Quadro FX 380/PCI/SSE2 182 ## Running: ../glbench -save -outdir=img 183 #swap_swap = 221.36 us [swap_swap.pixmd5-20dbc...f9c700d2f.png] 184 results = summary.splitlines() 185 if not results: 186 f.close() 187 raise error.TestFail('Failed: No output from test. Check /tmp/' + 188 'test_that_latest/graphics_GLBench/summary.txt' + 189 ' for details.') 190 191 # The good images, the silenced and the zombie/recurring failures. 192 reference_imagenames = self.load_imagenames(self.reference_images_file) 193 knownbad_imagenames = self.load_imagenames(self.knownbad_images_file) 194 fixedbad_imagenames = self.load_imagenames(self.fixedbad_images_file) 195 196 # Check if we saw GLBench end as expected (without crashing). 197 test_ended_normal = False 198 for line in results: 199 if line.strip().startswith('@TEST_END'): 200 test_ended_normal = True 201 202 # Analyze individual test results in summary. 203 keyvals = {} 204 failed_tests = {} 205 for line in results: 206 if not line.strip().startswith('@RESULT: '): 207 continue 208 keyval, remainder = line[9:].split('[') 209 key, val = keyval.split('=') 210 testname = key.strip() 211 score, unit = val.split() 212 testrating = float(score) 213 imagefile = remainder.split(']')[0] 214 215 higher = self.unit_higher_is_better.get(unit) 216 if higher is None: 217 raise error.TestFail('Failed: Unknown test unit "%s" for %s' % 218 (unit, testname)) 219 220 if not hasty: 221 # Prepend unit to test name to maintain backwards compatibility with 222 # existing per data. 223 perf_value_name = '%s_%s' % (unit, testname) 224 self.output_perf_value( 225 description=perf_value_name, 226 value=testrating, 227 units=unit, 228 higher_is_better=higher, 229 graph=perf_value_name) 230 # Add extra value to the graph distinguishing different boards. 231 variant = utils.get_board_with_frequency_and_memory() 232 desc = '%s-%s' % (perf_value_name, variant) 233 self.output_perf_value( 234 description=desc, 235 value=testrating, 236 units=unit, 237 higher_is_better=higher, 238 graph=perf_value_name) 239 240 # Classify result image. 241 if testrating == -1.0: 242 # Tests that generate GL Errors. 243 glerror = imagefile.split('=')[1] 244 f.write('# GLError ' + glerror + ' during test (perf set to -3.0)\n') 245 keyvals[testname] = -3.0 246 failed_tests[testname] = 'GLError' 247 elif testrating == 0.0: 248 # Tests for which glbench does not generate a meaningful perf score. 249 f.write('# No score for test\n') 250 keyvals[testname] = 0.0 251 elif imagefile in fixedbad_imagenames: 252 # We know the image looked bad at some point in time but we thought 253 # it was fixed. Throw an exception as a reminder. 254 keyvals[testname] = -2.0 255 f.write('# fixedbad [' + imagefile + '] (setting perf as -2.0)\n') 256 failed_tests[testname] = imagefile 257 elif imagefile in knownbad_imagenames: 258 # We have triaged the failure and have filed a tracking bug. 259 # Don't throw an exception and remind there is a problem. 260 keyvals[testname] = -1.0 261 f.write('# knownbad [' + imagefile + '] (setting perf as -1.0)\n') 262 # This failure is whitelisted so don't add to failed_tests. 263 elif imagefile in reference_imagenames: 264 # Known good reference images (default). 265 keyvals[testname] = testrating 266 elif imagefile == 'none': 267 # Tests that do not write images can't fail because of them. 268 keyvals[testname] = testrating 269 elif self.is_no_checksum_test(testname): 270 # TODO(ihf): these really should not write any images 271 keyvals[testname] = testrating 272 else: 273 # Completely unknown images. Raise a failure. 274 keyvals[testname] = -2.0 275 failed_tests[testname] = imagefile 276 f.write('# unknown [' + imagefile + '] (setting perf as -2.0)\n') 277 f.close() 278 if not hasty: 279 self.report_temperature('temperature_3_after_test') 280 self.write_perf_keyval(keyvals) 281 282 # Raise exception if images don't match. 283 if failed_tests: 284 logging.info('Some images are not matching their reference in %s.', 285 self.reference_images_file) 286 logging.info('Please verify that the output images are correct ' 287 'and if so copy them to the reference directory.') 288 raise error.TestFail('Failed: Some images are not matching their ' 289 'references. Check /tmp/' 290 'test_that_latest/graphics_GLBench/summary.txt' 291 ' for details.') 292 293 if not test_ended_normal: 294 raise error.TestFail( 295 'Failed: No end marker. Presumed crash/missing images.') 296