• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python
2# Copyright 2014 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5from __future__ import print_function
6from collections import namedtuple
7import json, os, re, sys
8
9AUTOTEST_NAME = 'graphics_PiglitBVT'
10INPUT_DIR = './piglit_logs/'
11OUTPUT_DIR = './test_scripts/'
12OUTPUT_FILE_PATTERN = OUTPUT_DIR + '/%s/' + AUTOTEST_NAME + '_%d.sh'
13OUTPUT_FILE_SLICES = 20
14PIGLIT_PATH = '/usr/local/piglit/lib/piglit/'
15PIGLIT64_PATH = '/usr/local/piglit/lib64/piglit/'
16
17# Do not generate scripts with "bash -e" as we want to handle errors ourself.
18FILE_HEADER = '#!/bin/bash\n\n'
19
20# Script fragment function that kicks off individual piglit tests.
21FILE_RUN_TEST = '\n\
22function run_test()\n\
23{\n\
24  local name="$1"\n\
25  local time="$2"\n\
26  local command="$3"\n\
27  echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"\n\
28  echo "+ Running test [$name] of expected runtime $time sec: [$command]"\n\
29  sync\n\
30  $command\n\
31  if [ $? == 0 ] ; then\n\
32    let "need_pass--"\n\
33    echo "+ pass :: $name"\n\
34  else\n\
35    let "failures++"\n\
36    echo "+ fail :: $name"\n\
37  fi\n\
38}\n\
39'
40
41# Script fragment that sumarizes the overall status.
42FILE_SUMMARY = 'popd\n\
43\n\
44if [ $need_pass == 0 ] ; then\n\
45  echo "+---------------------------------------------+"\n\
46  echo "| Overall pass, as all %d tests have passed. |"\n\
47  echo "+---------------------------------------------+"\n\
48else\n\
49  echo "+-----------------------------------------------------------+"\n\
50  echo "| Overall failure, as $need_pass tests did not pass and $failures failed. |"\n\
51  echo "+-----------------------------------------------------------+"\n\
52fi\n\
53exit $need_pass\n\
54'
55
56# Control file template for executing a slice.
57CONTROL_FILE = "\
58# Copyright 2014 The Chromium OS Authors. All rights reserved.\n\
59# Use of this source code is governed by a BSD-style license that can be\n\
60# found in the LICENSE file.\n\
61\n\
62NAME = '" + AUTOTEST_NAME + "'\n\
63AUTHOR = 'chromeos-gfx'\n\
64PURPOSE = 'Collection of automated tests for OpenGL implementations.'\n\
65CRITERIA = 'All tests in a slice have to pass, otherwise it will fail.'\n\
66TIME='SHORT'\n\
67TEST_CATEGORY = 'Functional'\n\
68TEST_CLASS = 'graphics'\n\
69TEST_TYPE = 'client'\n\
70JOB_RETRIES = 2\n\
71\n\
72BUG_TEMPLATE = {\n\
73    'labels': ['Cr-OS-Kernel-Graphics'],\n\
74}\n\
75\n\
76DOC = \"\"\"\n\
77Piglit is a collection of automated tests for OpenGL implementations.\n\
78\n\
79The goal of Piglit is to help improve the quality of open source OpenGL drivers\n\
80by providing developers with a simple means to perform regression tests.\n\
81\n\
82This control file runs slice %d out of %d slices of a passing subset of the\n\
83original collection.\n\
84\n\
85http://piglit.freedesktop.org\n\
86\"\"\"\n\
87\n\
88job.run_test('" + AUTOTEST_NAME + "', test_slice=%d)\
89"
90
91def output_control_file(sl, slices):
92  """
93  Write control file for slice sl to disk.
94  """
95  filename = 'control.%d' % sl
96  with open(filename, 'w+') as f:
97    print(CONTROL_FILE % (sl, slices, sl), file=f)
98
99
100def append_script_header(f, need_pass, piglit_path):
101  """
102  Write the beginning of the test script to f.
103  """
104  print(FILE_HEADER, file=f)
105  # need_pass is the script variable that counts down to zero and gets returned.
106  print('need_pass=%d' % need_pass, file=f)
107  print('failures=0', file=f)
108  print('PIGLIT_PATH=%s' % piglit_path, file=f)
109  print('export PIGLIT_SOURCE_DIR=%s' % piglit_path, file=f)
110  print('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PIGLIT_PATH/lib', file=f)
111  print('export DISPLAY=:0', file=f)
112  print('export XAUTHORITY=/home/chronos/.Xauthority', file=f)
113  print('', file=f)
114  print(FILE_RUN_TEST, file=f)
115  print('', file=f)
116  print('pushd $PIGLIT_PATH', file=f)
117
118
119def append_script_summary(f, need_pass):
120  """
121  Append the summary to the test script f with a required pass count.
122  """
123  print(FILE_SUMMARY % need_pass, file=f)
124
125
126def mkdir_p(path):
127  """
128  Create all directories in path.
129  """
130  try:
131    os.makedirs(path)
132  except OSError:
133    if os.path.isdir(path):
134      pass
135    else:
136      raise
137
138def get_filepaths(family_root, regex):
139  """
140  Find all files that were placed into family_root.
141  Used to find regular log files (*results.json) and expectations*.json.
142  """
143  main_files = []
144  for root, _, files in os.walk(family_root):
145    for filename in files:
146      if re.search(regex, filename):
147        main_files.append(os.path.join(root, filename))
148  return main_files
149
150
151def load_files(main_files):
152  """
153  The log files are just python dictionaries, load them from disk.
154  """
155  d = {}
156  for main_file in main_files:
157    d[main_file] = json.loads(open(main_file).read())
158  return d
159
160
161# Define a Test data structure containing the command line and runtime.
162Test = namedtuple('Test', 'command time passing_count not_passing_count')
163
164def get_test_statistics(log_dict):
165  """
166  Figures out for each test how often is passed/failed, the command line and
167  how long it runs.
168  """
169  statistics = {}
170  for main_file in log_dict:
171    for test in log_dict[main_file]['tests']:
172      # Initialize for all known test names to zero stats.
173      statistics[test] = Test(None, 0.0, 0, 0)
174
175  for main_file in log_dict:
176    print('Updating statistics from %s.' % main_file, file=sys.stderr)
177    tests = log_dict[main_file]['tests']
178    for test in tests:
179      command = statistics[test].command
180      # Verify that each board uses the same command.
181      if 'command' in tests[test]:
182        if command:
183          assert(command == tests[test]['command'])
184        else:
185          command = tests[test]['command']
186      # Bump counts.
187      if tests[test]['result'] == 'pass':
188        statistics[test] = Test(command,
189                                max(tests[test]['time'],
190                                    statistics[test].time),
191                                statistics[test].passing_count + 1,
192                                statistics[test].not_passing_count)
193      else:
194        statistics[test] = Test(command,
195                                statistics[test].time,
196                                statistics[test].passing_count,
197                                statistics[test].not_passing_count + 1)
198
199  return statistics
200
201
202def get_max_passing(statistics):
203  """
204  Gets the maximum count of passes a test has.
205  """
206  max_passing_count = 0
207  for test in statistics:
208    max_passing_count = max(statistics[test].passing_count, max_passing_count)
209  return max_passing_count
210
211
212def get_passing_tests(statistics, expectations):
213  """
214  Gets a list of all tests that never failed and have a maximum pass count.
215  """
216  tests = []
217  max_passing_count = get_max_passing(statistics)
218  for test in statistics:
219    if (statistics[test].passing_count == max_passing_count and
220        statistics[test].not_passing_count == 0):
221      if test not in expectations:
222        tests.append(test)
223  return sorted(tests)
224
225
226def get_intermittent_tests(statistics):
227  """
228  Gets tests that failed at least once and passed at least once.
229  """
230  tests = []
231  max_passing_count = get_max_passing(statistics)
232  for test in statistics:
233    if (statistics[test].passing_count > 0 and
234        statistics[test].passing_count < max_passing_count and
235        statistics[test].not_passing_count > 0):
236      tests.append(test)
237  return sorted(tests)
238
239
240def cleanup_command(cmd, piglit_path):
241  """
242  Make script less location dependent by stripping path from commands.
243  """
244  cmd = cmd.replace(piglit_path, '')
245  cmd = cmd.replace('framework/../', '')
246  cmd = cmd.replace('tests/../', '')
247  return cmd
248
249def process_gpu_family(family, family_root):
250  """
251  This takes a directory with log files from the same gpu family and processes
252  the result log into |slices| runable scripts.
253  """
254  print('--> Processing "%s".' % family, file=sys.stderr)
255  piglit_path = PIGLIT_PATH
256  if family == 'other':
257    piglit_path = PIGLIT64_PATH
258
259  log_dict = load_files(get_filepaths(family_root, 'results\.json$'))
260  # Load all expectations but ignore suggested.
261  exp_dict = load_files(get_filepaths(family_root, 'expectations.*\.json$'))
262  statistics = get_test_statistics(log_dict)
263  expectations = compute_expectations(exp_dict, statistics, family, piglit_path)
264  # Try to help the person updating piglit by collecting the variance
265  # across different log files into one expectations file per family.
266  output_suggested_expectations(expectations, family, family_root)
267
268  # Now start computing the new test scripts.
269  passing_tests = get_passing_tests(statistics, expectations)
270
271  slices = OUTPUT_FILE_SLICES
272  current_slice = 1
273  slice_tests = []
274  time_slice = 0
275  num_processed = 0
276  num_pass_total = len(passing_tests)
277  time_total = 0
278  for test in passing_tests:
279    time_total += statistics[test].time
280
281  # Generate one script containing all tests. This can be used as a simpler way
282  # to run everything, but also to have an easier diff when updating piglit.
283  filename = OUTPUT_FILE_PATTERN % (family, 0)
284  # Ensure the output directory for this family exists.
285  mkdir_p(os.path.dirname(os.path.realpath(filename)))
286  if passing_tests:
287    with open(filename, 'w+') as f:
288      append_script_header(f, num_pass_total, piglit_path)
289      for test in passing_tests:
290        cmd = cleanup_command(statistics[test].command, piglit_path)
291        time_test = statistics[test].time
292        print('run_test "%s" %.1f "%s"' % (test, 0.0, cmd), file=f)
293      append_script_summary(f, num_pass_total)
294
295  # Slice passing tests into several pieces to get below BVT's 20 minute limit.
296  # TODO(ihf): If we ever get into the situation that one test takes more than
297  # time_total / slice we would get an empty slice afterward. Fortunately the
298  # stderr spew should warn the operator of this.
299  for test in passing_tests:
300    # We are still writing all the tests that belong in the current slice.
301    if time_slice < time_total / slices:
302      slice_tests.append(test)
303      time_test = statistics[test].time
304      time_slice += time_test
305      num_processed += 1
306
307    # We finished the slice. Now output the file with all tests in this slice.
308    if time_slice >= time_total / slices or num_processed == num_pass_total:
309      filename = OUTPUT_FILE_PATTERN % (family, current_slice)
310      with open(filename, 'w+') as f:
311        need_pass = len(slice_tests)
312        append_script_header(f, need_pass, piglit_path)
313        for test in slice_tests:
314          # Make script less location dependent by stripping path from commands.
315          cmd = cleanup_command(statistics[test].command, piglit_path)
316          time_test = statistics[test].time
317          # TODO(ihf): Pass proper time_test instead of 0.0 once we can use it.
318          print('run_test "%s" %.1f "%s"'
319                % (test, 0.0, cmd), file=f)
320        append_script_summary(f, need_pass)
321        output_control_file(current_slice, slices)
322
323      print('Slice %d: max runtime for %d passing tests is %.1f seconds.'
324            % (current_slice, need_pass, time_slice), file=sys.stderr)
325      current_slice += 1
326      slice_tests = []
327      time_slice = 0
328
329  print('Total max runtime on "%s" for %d passing tests is %.1f seconds.' %
330          (family, num_pass_total, time_total), file=sys.stderr)
331
332
333def insert_expectation(expectations, test, expectation):
334  """
335  Insert test with expectation into expectations directory.
336  """
337  if not test in expectations:
338    # Just copy the whole expectation.
339    expectations[test] = expectation
340  else:
341    # Copy over known fields one at a time but don't overwrite existing.
342    expectations[test]['result'] = expectation['result']
343    if (not 'crbug' in expectations[test] and 'crbug' in expectation):
344      expectations[test]['crbug'] = expectation['crbug']
345    if (not 'comment' in expectations[test] and 'comment' in expectation):
346      expectations[test]['comment'] = expectation['comment']
347    if (not 'command' in expectations[test] and 'command' in expectation):
348      expectations[test]['command'] = expectation['command']
349    if (not 'pass rate' in expectations[test] and 'pass rate' in expectation):
350      expectations[test]['pass rate'] = expectation['pass rate']
351
352
353def compute_expectations(exp_dict, statistics, family, piglit_path):
354  """
355  Analyze intermittency and output suggested test expectations.
356  The suggested test expectation
357  Test expectations are dictionaries with roughly the same structure as logs.
358  """
359  flaky_tests = get_intermittent_tests(statistics)
360  print('Encountered %d tests that do not always pass in "%s" logs.' %
361        (len(flaky_tests), family), file=sys.stderr)
362
363  max_passing = get_max_passing(statistics)
364  expectations = {}
365  # Merge exp_dict which we loaded from disk into new expectations.
366  for filename in exp_dict:
367    for test in exp_dict[filename]['tests']:
368      expectation = exp_dict[filename]['tests'][test]
369      # Historic results not considered flaky as pass rate makes no sense
370      # without current logs.
371      expectation['result'] = 'skip'
372      if 'pass rate' in expectation:
373        expectation.pop('pass rate')
374      # Overwrite historic commands with recently observed ones.
375      if test in statistics:
376        expectation['command'] = cleanup_command(statistics[test].command,
377                                                 piglit_path)
378        insert_expectation(expectations, test, expectation)
379      else:
380        print ('Historic test [%s] not found in new logs. '
381               'Dropping it from expectations.' % test, file=sys.stderr)
382
383  # Handle the computed flakiness from the result logs that we just processed.
384  for test in flaky_tests:
385    pass_rate = statistics[test].passing_count / float(max_passing)
386    command = statistics[test].command
387    # Loading a json converts everything to string anyways, so save it as such
388    # and make it only 2 significiant digits.
389    expectation = {'result': 'flaky',
390                   'pass rate': '%.2f' % pass_rate,
391                   'command': command}
392    insert_expectation(expectations, test, expectation)
393
394  return expectations
395
396
397def output_suggested_expectations(expectations, family, family_root):
398  filename = os.path.join(family_root,
399                          'suggested_exp_to_rename_%s.json' % family)
400  with open(filename, 'w+') as f:
401    json.dump({'tests': expectations}, f, indent=2, sort_keys=True,
402              separators=(',', ': '))
403
404
405def get_gpu_families(root):
406  """
407  We consider each directory under root a possible gpu family.
408  """
409  files = os.listdir(root)
410  families = []
411  for f in files:
412    if os.path.isdir(os.path.join(root, f)):
413      families.append(f)
414  return families
415
416
417def generate_scripts(root):
418  """
419  For each family under root create the corresponding set of passing test
420  scripts.
421  """
422  families = get_gpu_families(root)
423  for family in families:
424    process_gpu_family(family, os.path.join(root, family))
425
426
427# We check the log files in as highly compressed binaries.
428print('Uncompressing log files...', file=sys.stderr)
429os.system('bunzip2 ' + INPUT_DIR + '/*/*/*results.json.bz2')
430
431# Generate the scripts.
432generate_scripts(INPUT_DIR)
433
434# Binary should remain the same, otherwise use
435#   git checkout -- piglit_output
436# or similar to reverse.
437print('Recompressing log files...', file=sys.stderr)
438os.system('bzip2 -9 ' + INPUT_DIR + '/*/*/*results.json')
439