• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python2
2#
3# Copyright 2017 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# pylint: disable=cros-logging-import
8
9# This is the script to run specified benchmark with different toolchain
10# settings. It includes the process of building benchmark locally and running
11# benchmark on DUT.
12
13"""Main script to run the benchmark suite from building to testing."""
14from __future__ import print_function
15
16import argparse
17import config
18import ConfigParser
19import logging
20import os
21import subprocess
22import sys
23
24logging.basicConfig(level=logging.INFO)
25
26def _parse_arguments(argv):
27  parser = argparse.ArgumentParser(description='Build and run specific '
28                                   'benchamrk')
29  parser.add_argument(
30      '-b',
31      '--bench',
32      action='append',
33      default=[],
34      help='Select which benchmark to run')
35
36  # Only one of compiler directory and llvm prebuilts version can be indicated
37  # at the beginning, so set -c and -l into a exclusive group.
38  group = parser.add_mutually_exclusive_group()
39
40  # The toolchain setting arguments has action of 'append', so that users
41  # could compare performance with several toolchain settings together.
42  group.add_argument(
43      '-c',
44      '--compiler_dir',
45      metavar='DIR',
46      action='append',
47      default=[],
48      help='Specify path to the compiler\'s bin directory. '
49      'You shall give several paths, each with a -c, to '
50      'compare performance differences in '
51      'each compiler.')
52
53  parser.add_argument(
54      '-o',
55      '--build_os',
56      action='append',
57      default=[],
58      help='Specify the host OS to build the benchmark.')
59
60  group.add_argument(
61      '-l',
62      '--llvm_prebuilts_version',
63      action='append',
64      default=[],
65      help='Specify the version of prebuilt LLVM. When '
66      'specific prebuilt version of LLVM already '
67      'exists, no need to pass the path to compiler '
68      'directory.')
69
70  parser.add_argument(
71      '-f',
72      '--cflags',
73      action='append',
74      default=[],
75      help='Specify the cflags options for the toolchain. '
76      'Be sure to quote all the cflags with quotation '
77      'mark("") or use equal(=).')
78  parser.add_argument(
79      '--ldflags',
80      action='append',
81      default=[],
82      help='Specify linker flags for the toolchain.')
83
84  parser.add_argument(
85      '-i',
86      '--iterations',
87      type=int,
88      default=1,
89      help='Specify how many iterations does the test '
90      'take.')
91
92  # Arguments -s and -r are for connecting to DUT.
93  parser.add_argument(
94      '-s',
95      '--serials',
96      help='Comma separate list of device serials under '
97      'test.')
98
99  parser.add_argument(
100      '-r',
101      '--remote',
102      default='localhost',
103      help='hostname[:port] if the ADB device is connected '
104      'to a remote machine. Ensure this workstation '
105      'is configured for passwordless ssh access as '
106      'users "root" or "adb"')
107
108  # Arguments -frequency and -m are for device settings
109  parser.add_argument(
110      '--frequency',
111      type=int,
112      default=960000,
113      help='Specify the CPU frequency of the device. The '
114      'unit is KHZ. The available value is defined in'
115      'cpufreq/scaling_available_frequency file in '
116      'device\'s each core directory. '
117      'The default value is 960000, which shows a '
118      'balance in noise and performance. Lower '
119      'frequency will slow down the performance but '
120      'reduce noise.')
121
122  parser.add_argument(
123      '-m',
124      '--mode',
125      default='little',
126      help='User can specify whether \'little\' or \'big\' '
127      'mode to use. The default one is little mode. '
128      'The little mode runs on a single core of '
129      'Cortex-A53, while big mode runs on single core '
130      'of Cortex-A57.')
131
132  # Configure file for benchmark test
133  parser.add_argument(
134      '-t',
135      '--test',
136      help='Specify the test settings with configuration '
137      'file.')
138
139  # Whether to keep old json result or not
140  parser.add_argument(
141      '-k',
142      '--keep',
143      default='False',
144      help='User can specify whether to keep the old json '
145      'results from last run. This can be useful if you '
146      'want to compare performance differences in two or '
147      'more different runs. Default is False(off).')
148
149  return parser.parse_args(argv)
150
151
152# Clear old log files in bench suite directory
153def clear_logs():
154  logging.info('Removing old logfiles...')
155  for f in ['build_log', 'device_log', 'test_log']:
156    logfile = os.path.join(config.bench_suite_dir, f)
157    try:
158      os.remove(logfile)
159    except OSError:
160      logging.info('No logfile %s need to be removed. Ignored.', f)
161  logging.info('Old logfiles been removed.')
162
163
164# Clear old json files in bench suite directory
165def clear_results():
166  logging.info('Clearing old json results...')
167  for bench in config.bench_list:
168    result = os.path.join(config.bench_suite_dir, bench + '.json')
169    try:
170      os.remove(result)
171    except OSError:
172      logging.info('no %s json file need to be removed. Ignored.', bench)
173  logging.info('Old json results been removed.')
174
175
176# Use subprocess.check_call to run other script, and put logs to files
177def check_call_with_log(cmd, log_file):
178  log_file = os.path.join(config.bench_suite_dir, log_file)
179  with open(log_file, 'a') as logfile:
180    log_header = 'Log for command: %s\n' % (cmd)
181    logfile.write(log_header)
182    try:
183      subprocess.check_call(cmd, stdout=logfile)
184    except subprocess.CalledProcessError:
185      logging.error('Error running %s, please check %s for more info.', cmd,
186                    log_file)
187      raise
188  logging.info('Logs for %s are written to %s.', cmd, log_file)
189
190
191def set_device(serials, remote, frequency):
192  setting_cmd = [
193      os.path.join(
194          os.path.join(config.android_home, config.autotest_dir),
195          'site_utils/set_device.py')
196  ]
197  setting_cmd.append('-r=' + remote)
198  setting_cmd.append('-q=' + str(frequency))
199
200  # Deal with serials.
201  # If there is no serails specified, try to run test on the only device.
202  # If specified, split the serials into a list and run test on each device.
203  if serials:
204    for serial in serials.split(','):
205      setting_cmd.append('-s=' + serial)
206      check_call_with_log(setting_cmd, 'device_log')
207      setting_cmd.pop()
208  else:
209    check_call_with_log(setting_cmd, 'device_log')
210
211  logging.info('CPU mode and frequency set successfully!')
212
213
214def log_ambiguous_args():
215  logging.error('The count of arguments does not match!')
216  raise ValueError('The count of arguments does not match.')
217
218
219# Check if the count of building arguments are log_ambiguous or not.  The
220# number of -c/-l, -f, and -os should be either all 0s or all the same.
221def check_count(compiler, llvm_version, build_os, cflags, ldflags):
222  # Count will be set to 0 if no compiler or llvm_version specified.
223  # Otherwise, one of these two args length should be 0 and count will be
224  # the other one.
225  count = max(len(compiler), len(llvm_version))
226
227  # Check if number of cflags is 0 or the same with before.
228  if len(cflags) != 0:
229    if count != 0 and len(cflags) != count:
230      log_ambiguous_args()
231    count = len(cflags)
232
233  if len(ldflags) != 0:
234    if count != 0 and len(ldflags) != count:
235      log_ambiguous_args()
236    count = len(ldflags)
237
238  if len(build_os) != 0:
239    if count != 0 and len(build_os) != count:
240      log_ambiguous_args()
241    count = len(build_os)
242
243  # If no settings are passed, only run default once.
244  return max(1, count)
245
246
247# Build benchmark binary with toolchain settings
248def build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
249                ldflags):
250  # Build benchmark locally
251  build_cmd = ['./build_bench.py', '-b=' + bench]
252  if compiler:
253    build_cmd.append('-c=' + compiler[setting_no])
254  if llvm_version:
255    build_cmd.append('-l=' + llvm_version[setting_no])
256  if build_os:
257    build_cmd.append('-o=' + build_os[setting_no])
258  if cflags:
259    build_cmd.append('-f=' + cflags[setting_no])
260  if ldflags:
261    build_cmd.append('--ldflags=' + ldflags[setting_no])
262
263  logging.info('Building benchmark for toolchain setting No.%d...', setting_no)
264  logging.info('Command: %s', build_cmd)
265
266  try:
267    subprocess.check_call(build_cmd)
268  except:
269    logging.error('Error while building benchmark!')
270    raise
271
272
273def run_and_collect_result(test_cmd, setting_no, i, bench, serial='default'):
274
275  # Run autotest script for benchmark on DUT
276  check_call_with_log(test_cmd, 'test_log')
277
278  logging.info('Benchmark with setting No.%d, iter.%d finished testing on '
279               'device %s.', setting_no, i, serial)
280
281  # Rename results from the bench_result generated in autotest
282  bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
283  if not os.path.exists(bench_result):
284    logging.error('No result found at %s, '
285                  'please check test_log for details.', bench_result)
286    raise OSError('Result file %s not found.' % bench_result)
287
288  new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial, setting_no, i)
289  new_bench_result_path = os.path.join(config.bench_suite_dir, new_bench_result)
290  try:
291    os.rename(bench_result, new_bench_result_path)
292  except OSError:
293    logging.error('Error while renaming raw result %s to %s', bench_result,
294                  new_bench_result_path)
295    raise
296
297  logging.info('Benchmark result saved at %s.', new_bench_result_path)
298
299
300def test_bench(bench, setting_no, iterations, serials, remote, mode):
301  logging.info('Start running benchmark on device...')
302
303  # Run benchmark and tests on DUT
304  for i in range(iterations):
305    logging.info('Iteration No.%d:', i)
306    test_cmd = [
307        os.path.join(
308            os.path.join(config.android_home, config.autotest_dir),
309            'site_utils/test_bench.py')
310    ]
311    test_cmd.append('-b=' + bench)
312    test_cmd.append('-r=' + remote)
313    test_cmd.append('-m=' + mode)
314
315    # Deal with serials.
316    # If there is no serails specified, try to run test on the only device.
317    # If specified, split the serials into a list and run test on each device.
318    if serials:
319      for serial in serials.split(','):
320        test_cmd.append('-s=' + serial)
321
322        run_and_collect_result(test_cmd, setting_no, i, bench, serial)
323        test_cmd.pop()
324    else:
325      run_and_collect_result(test_cmd, setting_no, i, bench)
326
327
328def gen_json(bench, setting_no, iterations, serials):
329  bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
330
331  logging.info('Generating JSON file for Crosperf...')
332
333  if not serials:
334    serials = 'default'
335
336  for serial in serials.split(','):
337
338    # Platform will be used as device lunch combo instead
339    #experiment = '_'.join([serial, str(setting_no)])
340    experiment = config.product_combo
341
342    # Input format: bench_result_{bench}_{serial}_{setting_no}_
343    input_file = '_'.join([bench_result, bench, serial, str(setting_no), ''])
344    gen_json_cmd = [
345        './gen_json.py', '--input=' + input_file,
346        '--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
347        '--bench=' + bench, '--platform=' + experiment,
348        '--iterations=' + str(iterations)
349    ]
350
351    logging.info('Command: %s', gen_json_cmd)
352    if subprocess.call(gen_json_cmd):
353      logging.error('Error while generating JSON file, please check raw data'
354                    'of the results at %s.', input_file)
355
356
357def gen_crosperf(infile, outfile):
358  # Set environment variable for crosperf
359  os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
360
361  logging.info('Generating Crosperf Report...')
362  crosperf_cmd = [
363      os.path.join(config.toolchain_utils, 'generate_report.py'),
364      '-i=' + infile, '-o=' + outfile, '-f'
365  ]
366
367  # Run crosperf generate_report.py
368  logging.info('Command: %s', crosperf_cmd)
369  subprocess.call(crosperf_cmd)
370
371  logging.info('Report generated successfully!')
372  logging.info('Report Location: ' + outfile + '.html at bench'
373               'suite directory.')
374
375
376def main(argv):
377  # Set environment variable for the local loacation of benchmark suite.
378  # This is for collecting testing results to benchmark suite directory.
379  os.environ['BENCH_SUITE_DIR'] = config.bench_suite_dir
380
381  # Set Android type, used for the difference part between aosp and internal.
382  os.environ['ANDROID_TYPE'] = config.android_type
383
384  # Set ANDROID_HOME for both building and testing.
385  os.environ['ANDROID_HOME'] = config.android_home
386
387  # Set environment variable for architecture, this will be used in
388  # autotest.
389  os.environ['PRODUCT'] = config.product
390
391  arguments = _parse_arguments(argv)
392
393  bench_list = arguments.bench
394  if not bench_list:
395    bench_list = config.bench_list
396
397  compiler = arguments.compiler_dir
398  build_os = arguments.build_os
399  llvm_version = arguments.llvm_prebuilts_version
400  cflags = arguments.cflags
401  ldflags = arguments.ldflags
402  iterations = arguments.iterations
403  serials = arguments.serials
404  remote = arguments.remote
405  frequency = arguments.frequency
406  mode = arguments.mode
407  keep = arguments.keep
408
409  # Clear old logs every time before run script
410  clear_logs()
411
412  if keep == 'False':
413    clear_results()
414
415  # Set test mode and frequency of CPU on the DUT
416  set_device(serials, remote, frequency)
417
418  test = arguments.test
419  # if test configuration file has been given, use the build settings
420  # in the configuration file and run the test.
421  if test:
422    test_config = ConfigParser.ConfigParser(allow_no_value=True)
423    if not test_config.read(test):
424      logging.error('Error while reading from building '
425                    'configuration file %s.', test)
426      raise RuntimeError('Error while reading configuration file %s.' % test)
427
428    for setting_no, section in enumerate(test_config.sections()):
429      bench = test_config.get(section, 'bench')
430      compiler = [test_config.get(section, 'compiler')]
431      build_os = [test_config.get(section, 'build_os')]
432      llvm_version = [test_config.get(section, 'llvm_version')]
433      cflags = [test_config.get(section, 'cflags')]
434      ldflags = [test_config.get(section, 'ldflags')]
435
436      # Set iterations from test_config file, if not exist, use the one from
437      # command line.
438      it = test_config.get(section, 'iterations')
439      if not it:
440        it = iterations
441      it = int(it)
442
443      # Build benchmark for each single test configuration
444      build_bench(0, bench, compiler, llvm_version, build_os, cflags, ldflags)
445
446      test_bench(bench, setting_no, it, serials, remote, mode)
447
448      gen_json(bench, setting_no, it, serials)
449
450    for bench in config.bench_list:
451      infile = os.path.join(config.bench_suite_dir, bench + '.json')
452      if os.path.exists(infile):
453        outfile = os.path.join(config.bench_suite_dir, bench + '_report')
454        gen_crosperf(infile, outfile)
455
456    # Stop script if there is only config file provided
457    return 0
458
459  # If no configuration file specified, continue running.
460  # Check if the count of the setting arguments are log_ambiguous.
461  setting_count = check_count(compiler, llvm_version, build_os, cflags, ldflags)
462
463  for bench in bench_list:
464    logging.info('Start building and running benchmark: [%s]', bench)
465    # Run script for each toolchain settings
466    for setting_no in range(setting_count):
467      build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
468                  ldflags)
469
470      # Run autotest script for benchmark test on device
471      test_bench(bench, setting_no, iterations, serials, remote, mode)
472
473      gen_json(bench, setting_no, iterations, serials)
474
475    infile = os.path.join(config.bench_suite_dir, bench + '.json')
476    outfile = os.path.join(config.bench_suite_dir, bench + '_report')
477    gen_crosperf(infile, outfile)
478
479
480if __name__ == '__main__':
481  main(sys.argv[1:])
482