• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2#
3# Copyright 2013 The Chromium Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""Runs all types of tests from one unified interface."""
8
9import collections
10import logging
11import optparse
12import os
13import shutil
14import signal
15import sys
16import threading
17
18from pylib import android_commands
19from pylib import constants
20from pylib import forwarder
21from pylib import ports
22from pylib.base import base_test_result
23from pylib.base import test_dispatcher
24from pylib.gtest import gtest_config
25from pylib.gtest import setup as gtest_setup
26from pylib.gtest import test_options as gtest_test_options
27from pylib.linker import setup as linker_setup
28from pylib.host_driven import setup as host_driven_setup
29from pylib.instrumentation import setup as instrumentation_setup
30from pylib.instrumentation import test_options as instrumentation_test_options
31from pylib.junit import setup as junit_setup
32from pylib.junit import test_dispatcher as junit_dispatcher
33from pylib.monkey import setup as monkey_setup
34from pylib.monkey import test_options as monkey_test_options
35from pylib.perf import setup as perf_setup
36from pylib.perf import test_options as perf_test_options
37from pylib.perf import test_runner as perf_test_runner
38from pylib.uiautomator import setup as uiautomator_setup
39from pylib.uiautomator import test_options as uiautomator_test_options
40from pylib.utils import apk_helper
41from pylib.utils import command_option_parser
42from pylib.utils import report_results
43from pylib.utils import reraiser_thread
44from pylib.utils import run_tests_helper
45
46
47def AddCommonOptions(option_parser):
48  """Adds all common options to |option_parser|."""
49
50  group = optparse.OptionGroup(option_parser, 'Common Options')
51  default_build_type = os.environ.get('BUILDTYPE', 'Debug')
52  group.add_option('--debug', action='store_const', const='Debug',
53                   dest='build_type', default=default_build_type,
54                   help=('If set, run test suites under out/Debug. '
55                         'Default is env var BUILDTYPE or Debug.'))
56  group.add_option('--release', action='store_const',
57                   const='Release', dest='build_type',
58                   help=('If set, run test suites under out/Release.'
59                         ' Default is env var BUILDTYPE or Debug.'))
60  group.add_option('-c', dest='cleanup_test_files',
61                   help='Cleanup test files on the device after run',
62                   action='store_true')
63  group.add_option('--num_retries', dest='num_retries', type='int',
64                   default=2,
65                   help=('Number of retries for a test before '
66                         'giving up.'))
67  group.add_option('-v',
68                   '--verbose',
69                   dest='verbose_count',
70                   default=0,
71                   action='count',
72                   help='Verbose level (multiple times for more)')
73  group.add_option('--tool',
74                   dest='tool',
75                   help=('Run the test under a tool '
76                         '(use --tool help to list them)'))
77  group.add_option('--flakiness-dashboard-server',
78                   dest='flakiness_dashboard_server',
79                   help=('Address of the server that is hosting the '
80                         'Chrome for Android flakiness dashboard.'))
81  group.add_option('--skip-deps-push', dest='push_deps',
82                   action='store_false', default=True,
83                   help=('Do not push dependencies to the device. '
84                         'Use this at own risk for speeding up test '
85                         'execution on local machine.'))
86  group.add_option('-d', '--device', dest='test_device',
87                   help=('Target device for the test suite '
88                         'to run on.'))
89  option_parser.add_option_group(group)
90
91
92def ProcessCommonOptions(options):
93  """Processes and handles all common options."""
94  run_tests_helper.SetLogLevel(options.verbose_count)
95  constants.SetBuildType(options.build_type)
96
97
98def AddGTestOptions(option_parser):
99  """Adds gtest options to |option_parser|."""
100
101  option_parser.usage = '%prog gtest [options]'
102  option_parser.commands_dict = {}
103  option_parser.example = '%prog gtest -s base_unittests'
104
105  # TODO(gkanwar): Make this option required
106  option_parser.add_option('-s', '--suite', dest='suite_name',
107                           help=('Executable name of the test suite to run '
108                                 '(use -s help to list them).'))
109  option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
110                           dest='test_filter',
111                           help='googletest-style filter string.')
112  option_parser.add_option('--gtest_also_run_disabled_tests',
113                           '--gtest-also-run-disabled-tests',
114                           dest='run_disabled', action='store_true',
115                           help='Also run disabled tests if applicable.')
116  option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
117                           default='',
118                           help='Additional arguments to pass to the test.')
119  option_parser.add_option('-t', dest='timeout',
120                           help='Timeout to wait for each test',
121                           type='int',
122                           default=60)
123  option_parser.add_option('--isolate_file_path',
124                           '--isolate-file-path',
125                           dest='isolate_file_path',
126                           help='.isolate file path to override the default '
127                                'path')
128  # TODO(gkanwar): Move these to Common Options once we have the plumbing
129  # in our other test types to handle these commands
130  AddCommonOptions(option_parser)
131
132
133def AddLinkerTestOptions(option_parser):
134  option_parser.usage = '%prog linker'
135  option_parser.commands_dict = {}
136  option_parser.example = '%prog linker'
137
138  option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
139                           help='googletest-style filter string.')
140  AddCommonOptions(option_parser)
141
142
143def ProcessGTestOptions(options):
144  """Intercept test suite help to list test suites.
145
146  Args:
147    options: Command line options.
148  """
149  if options.suite_name == 'help':
150    print 'Available test suites are:'
151    for test_suite in (gtest_config.STABLE_TEST_SUITES +
152                       gtest_config.EXPERIMENTAL_TEST_SUITES):
153      print test_suite
154    sys.exit(0)
155
156  # Convert to a list, assuming all test suites if nothing was specified.
157  # TODO(gkanwar): Require having a test suite
158  if options.suite_name:
159    options.suite_name = [options.suite_name]
160  else:
161    options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
162
163
164def AddJavaTestOptions(option_parser):
165  """Adds the Java test options to |option_parser|."""
166
167  option_parser.add_option('-f', '--test-filter', dest='test_filter',
168                           help=('Test filter (if not fully qualified, '
169                                 'will run all matches).'))
170  option_parser.add_option(
171      '-A', '--annotation', dest='annotation_str',
172      help=('Comma-separated list of annotations. Run only tests with any of '
173            'the given annotations. An annotation can be either a key or a '
174            'key-values pair. A test that has no annotation is considered '
175            '"SmallTest".'))
176  option_parser.add_option(
177      '-E', '--exclude-annotation', dest='exclude_annotation_str',
178      help=('Comma-separated list of annotations. Exclude tests with these '
179            'annotations.'))
180  option_parser.add_option(
181      '--screenshot', dest='screenshot_failures', action='store_true',
182      help='Capture screenshots of test failures')
183  option_parser.add_option(
184      '--save-perf-json', action='store_true',
185      help='Saves the JSON file for each UI Perf test.')
186  option_parser.add_option(
187      '--official-build', action='store_true', help='Run official build tests.')
188  option_parser.add_option(
189      '--test_data', '--test-data', action='append', default=[],
190      help=('Each instance defines a directory of test data that should be '
191            'copied to the target(s) before running the tests. The argument '
192            'should be of the form <target>:<source>, <target> is relative to '
193            'the device data directory, and <source> is relative to the '
194            'chromium build directory.'))
195
196
197def ProcessJavaTestOptions(options):
198  """Processes options/arguments and populates |options| with defaults."""
199
200  if options.annotation_str:
201    options.annotations = options.annotation_str.split(',')
202  elif options.test_filter:
203    options.annotations = []
204  else:
205    options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
206                           'EnormousTest', 'IntegrationTest']
207
208  if options.exclude_annotation_str:
209    options.exclude_annotations = options.exclude_annotation_str.split(',')
210  else:
211    options.exclude_annotations = []
212
213
214def AddInstrumentationTestOptions(option_parser):
215  """Adds Instrumentation test options to |option_parser|."""
216
217  option_parser.usage = '%prog instrumentation [options]'
218  option_parser.commands_dict = {}
219  option_parser.example = ('%prog instrumentation '
220                           '--test-apk=ChromeShellTest')
221
222  AddJavaTestOptions(option_parser)
223  AddCommonOptions(option_parser)
224
225  option_parser.add_option('-j', '--java-only', action='store_true',
226                           default=False, help='Run only the Java tests.')
227  option_parser.add_option('-p', '--python-only', action='store_true',
228                           default=False,
229                           help='Run only the host-driven tests.')
230  option_parser.add_option('--host-driven-root',
231                           help='Root of the host-driven tests.')
232  option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
233                           action='store_true',
234                           help='Wait for debugger.')
235  option_parser.add_option(
236      '--test-apk', dest='test_apk',
237      help=('The name of the apk containing the tests '
238            '(without the .apk extension; e.g. "ContentShellTest").'))
239  option_parser.add_option('--coverage-dir',
240                           help=('Directory in which to place all generated '
241                                 'EMMA coverage files.'))
242  option_parser.add_option('--device-flags', dest='device_flags', default='',
243                           help='The relative filepath to a file containing '
244                                'command-line flags to set on the device')
245
246
247def ProcessInstrumentationOptions(options, error_func):
248  """Processes options/arguments and populate |options| with defaults.
249
250  Args:
251    options: optparse.Options object.
252    error_func: Function to call with the error message in case of an error.
253
254  Returns:
255    An InstrumentationOptions named tuple which contains all options relevant to
256    instrumentation tests.
257  """
258
259  ProcessJavaTestOptions(options)
260
261  if options.java_only and options.python_only:
262    error_func('Options java_only (-j) and python_only (-p) '
263               'are mutually exclusive.')
264  options.run_java_tests = True
265  options.run_python_tests = True
266  if options.java_only:
267    options.run_python_tests = False
268  elif options.python_only:
269    options.run_java_tests = False
270
271  if not options.host_driven_root:
272    options.run_python_tests = False
273
274  if not options.test_apk:
275    error_func('--test-apk must be specified.')
276
277
278  options.test_apk_path = os.path.join(
279      constants.GetOutDirectory(),
280      constants.SDK_BUILD_APKS_DIR,
281      '%s.apk' % options.test_apk)
282  options.test_apk_jar_path = os.path.join(
283      constants.GetOutDirectory(),
284      constants.SDK_BUILD_TEST_JAVALIB_DIR,
285      '%s.jar' %  options.test_apk)
286  options.test_support_apk_path = '%sSupport%s' % (
287      os.path.splitext(options.test_apk_path))
288
289  options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path)
290
291  return instrumentation_test_options.InstrumentationOptions(
292      options.tool,
293      options.cleanup_test_files,
294      options.push_deps,
295      options.annotations,
296      options.exclude_annotations,
297      options.test_filter,
298      options.test_data,
299      options.save_perf_json,
300      options.screenshot_failures,
301      options.wait_for_debugger,
302      options.coverage_dir,
303      options.test_apk,
304      options.test_apk_path,
305      options.test_apk_jar_path,
306      options.test_runner,
307      options.test_support_apk_path,
308      options.device_flags
309      )
310
311
312def AddUIAutomatorTestOptions(option_parser):
313  """Adds UI Automator test options to |option_parser|."""
314
315  option_parser.usage = '%prog uiautomator [options]'
316  option_parser.commands_dict = {}
317  option_parser.example = (
318      '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests'
319      ' --package=chrome_shell')
320  option_parser.add_option(
321      '--package',
322      help=('Package under test. Possible values: %s' %
323            constants.PACKAGE_INFO.keys()))
324  option_parser.add_option(
325      '--test-jar', dest='test_jar',
326      help=('The name of the dexed jar containing the tests (without the '
327            '.dex.jar extension). Alternatively, this can be a full path '
328            'to the jar.'))
329
330  AddJavaTestOptions(option_parser)
331  AddCommonOptions(option_parser)
332
333
334def ProcessUIAutomatorOptions(options, error_func):
335  """Processes UIAutomator options/arguments.
336
337  Args:
338    options: optparse.Options object.
339    error_func: Function to call with the error message in case of an error.
340
341  Returns:
342    A UIAutomatorOptions named tuple which contains all options relevant to
343    uiautomator tests.
344  """
345
346  ProcessJavaTestOptions(options)
347
348  if not options.package:
349    error_func('--package is required.')
350
351  if options.package not in constants.PACKAGE_INFO:
352    error_func('Invalid package.')
353
354  if not options.test_jar:
355    error_func('--test-jar must be specified.')
356
357  if os.path.exists(options.test_jar):
358    # The dexed JAR is fully qualified, assume the info JAR lives along side.
359    options.uiautomator_jar = options.test_jar
360  else:
361    options.uiautomator_jar = os.path.join(
362        constants.GetOutDirectory(),
363        constants.SDK_BUILD_JAVALIB_DIR,
364        '%s.dex.jar' % options.test_jar)
365  options.uiautomator_info_jar = (
366      options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
367      '_java.jar')
368
369  return uiautomator_test_options.UIAutomatorOptions(
370      options.tool,
371      options.cleanup_test_files,
372      options.push_deps,
373      options.annotations,
374      options.exclude_annotations,
375      options.test_filter,
376      options.test_data,
377      options.save_perf_json,
378      options.screenshot_failures,
379      options.uiautomator_jar,
380      options.uiautomator_info_jar,
381      options.package)
382
383
384def AddJUnitTestOptions(option_parser):
385  """Adds junit test options to |option_parser|."""
386  option_parser.usage = '%prog junit -s [test suite name]'
387  option_parser.commands_dict = {}
388
389  option_parser.add_option(
390      '-s', '--test-suite', dest='test_suite',
391      help=('JUnit test suite to run.'))
392  option_parser.add_option(
393      '-f', '--test-filter', dest='test_filter',
394      help='Filters tests googletest-style.')
395  option_parser.add_option(
396      '--package-filter', dest='package_filter',
397      help='Filters tests by package.')
398  option_parser.add_option(
399      '--runner-filter', dest='runner_filter',
400      help='Filters tests by runner class. Must be fully qualified.')
401  option_parser.add_option(
402      '--sdk-version', dest='sdk_version', type="int",
403      help='The Android SDK version.')
404  AddCommonOptions(option_parser)
405
406
407def ProcessJUnitTestOptions(options, error_func):
408  """Processes all JUnit test options."""
409  if not options.test_suite:
410    error_func('No test suite specified.')
411  return options
412
413
414def AddMonkeyTestOptions(option_parser):
415  """Adds monkey test options to |option_parser|."""
416
417  option_parser.usage = '%prog monkey [options]'
418  option_parser.commands_dict = {}
419  option_parser.example = (
420      '%prog monkey --package=chrome_shell')
421
422  option_parser.add_option(
423      '--package',
424      help=('Package under test. Possible values: %s' %
425            constants.PACKAGE_INFO.keys()))
426  option_parser.add_option(
427      '--event-count', default=10000, type='int',
428      help='Number of events to generate [default: %default].')
429  option_parser.add_option(
430      '--category', default='',
431      help='A list of allowed categories.')
432  option_parser.add_option(
433      '--throttle', default=100, type='int',
434      help='Delay between events (ms) [default: %default]. ')
435  option_parser.add_option(
436      '--seed', type='int',
437      help=('Seed value for pseudo-random generator. Same seed value generates '
438            'the same sequence of events. Seed is randomized by default.'))
439  option_parser.add_option(
440      '--extra-args', default='',
441      help=('String of other args to pass to the command verbatim '
442            '[default: "%default"].'))
443
444  AddCommonOptions(option_parser)
445
446
447def ProcessMonkeyTestOptions(options, error_func):
448  """Processes all monkey test options.
449
450  Args:
451    options: optparse.Options object.
452    error_func: Function to call with the error message in case of an error.
453
454  Returns:
455    A MonkeyOptions named tuple which contains all options relevant to
456    monkey tests.
457  """
458  if not options.package:
459    error_func('--package is required.')
460
461  if options.package not in constants.PACKAGE_INFO:
462    error_func('Invalid package.')
463
464  category = options.category
465  if category:
466    category = options.category.split(',')
467
468  return monkey_test_options.MonkeyOptions(
469      options.verbose_count,
470      options.package,
471      options.event_count,
472      category,
473      options.throttle,
474      options.seed,
475      options.extra_args)
476
477
478def AddPerfTestOptions(option_parser):
479  """Adds perf test options to |option_parser|."""
480
481  option_parser.usage = '%prog perf [options]'
482  option_parser.commands_dict = {}
483  option_parser.example = ('%prog perf '
484                           '[--single-step -- command args] or '
485                           '[--steps perf_steps.json] or '
486                           '[--print-step step]')
487
488  option_parser.add_option(
489      '--single-step',
490      action='store_true',
491      help='Execute the given command with retries, but only print the result '
492           'for the "most successful" round.')
493  option_parser.add_option(
494      '--steps',
495      help='JSON file containing the list of commands to run.')
496  option_parser.add_option(
497      '--flaky-steps',
498      help=('A JSON file containing steps that are flaky '
499            'and will have its exit code ignored.'))
500  option_parser.add_option(
501      '--output-json-list',
502      help='Write a simple list of names from --steps into the given file.')
503  option_parser.add_option(
504      '--print-step',
505      help='The name of a previously executed perf step to print.')
506  option_parser.add_option(
507      '--no-timeout', action='store_true',
508      help=('Do not impose a timeout. Each perf step is responsible for '
509            'implementing the timeout logic.'))
510  option_parser.add_option(
511      '-f', '--test-filter',
512      help=('Test filter (will match against the names listed in --steps).'))
513  option_parser.add_option(
514      '--dry-run',
515      action='store_true',
516      help='Just print the steps without executing.')
517  AddCommonOptions(option_parser)
518
519
520def ProcessPerfTestOptions(options, args, error_func):
521  """Processes all perf test options.
522
523  Args:
524    options: optparse.Options object.
525    error_func: Function to call with the error message in case of an error.
526
527  Returns:
528    A PerfOptions named tuple which contains all options relevant to
529    perf tests.
530  """
531  # Only one of steps, print_step or single_step must be provided.
532  count = len(filter(None,
533                     [options.steps, options.print_step, options.single_step]))
534  if count != 1:
535    error_func('Please specify one of: --steps, --print-step, --single-step.')
536  single_step = None
537  if options.single_step:
538    single_step = ' '.join(args[2:])
539  return perf_test_options.PerfOptions(
540      options.steps, options.flaky_steps, options.output_json_list,
541      options.print_step, options.no_timeout, options.test_filter,
542      options.dry_run, single_step)
543
544
545def _RunGTests(options, devices):
546  """Subcommand of RunTestsCommands which runs gtests."""
547  ProcessGTestOptions(options)
548
549  exit_code = 0
550  for suite_name in options.suite_name:
551    # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
552    # the gtest command.
553    gtest_options = gtest_test_options.GTestOptions(
554        options.tool,
555        options.cleanup_test_files,
556        options.push_deps,
557        options.test_filter,
558        options.run_disabled,
559        options.test_arguments,
560        options.timeout,
561        options.isolate_file_path,
562        suite_name)
563    runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
564
565    results, test_exit_code = test_dispatcher.RunTests(
566        tests, runner_factory, devices, shard=True, test_timeout=None,
567        num_retries=options.num_retries)
568
569    if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
570      exit_code = test_exit_code
571
572    report_results.LogFull(
573        results=results,
574        test_type='Unit test',
575        test_package=suite_name,
576        flakiness_server=options.flakiness_dashboard_server)
577
578  if os.path.isdir(constants.ISOLATE_DEPS_DIR):
579    shutil.rmtree(constants.ISOLATE_DEPS_DIR)
580
581  return exit_code
582
583
584def _RunLinkerTests(options, devices):
585  """Subcommand of RunTestsCommands which runs linker tests."""
586  runner_factory, tests = linker_setup.Setup(options, devices)
587
588  results, exit_code = test_dispatcher.RunTests(
589      tests, runner_factory, devices, shard=True, test_timeout=60,
590      num_retries=options.num_retries)
591
592  report_results.LogFull(
593      results=results,
594      test_type='Linker test',
595      test_package='ChromiumLinkerTest')
596
597  return exit_code
598
599
600def _RunInstrumentationTests(options, error_func, devices):
601  """Subcommand of RunTestsCommands which runs instrumentation tests."""
602  instrumentation_options = ProcessInstrumentationOptions(options, error_func)
603
604  if len(devices) > 1 and options.wait_for_debugger:
605    logging.warning('Debugger can not be sharded, using first available device')
606    devices = devices[:1]
607
608  results = base_test_result.TestRunResults()
609  exit_code = 0
610
611  if options.run_java_tests:
612    runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
613
614    test_results, exit_code = test_dispatcher.RunTests(
615        tests, runner_factory, devices, shard=True, test_timeout=None,
616        num_retries=options.num_retries)
617
618    results.AddTestRunResults(test_results)
619
620  if options.run_python_tests:
621    runner_factory, tests = host_driven_setup.InstrumentationSetup(
622        options.host_driven_root, options.official_build,
623        instrumentation_options)
624
625    if tests:
626      test_results, test_exit_code = test_dispatcher.RunTests(
627          tests, runner_factory, devices, shard=True, test_timeout=None,
628          num_retries=options.num_retries)
629
630      results.AddTestRunResults(test_results)
631
632      # Only allow exit code escalation
633      if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
634        exit_code = test_exit_code
635
636  if options.device_flags:
637    options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
638                                        options.device_flags)
639
640  report_results.LogFull(
641      results=results,
642      test_type='Instrumentation',
643      test_package=os.path.basename(options.test_apk),
644      annotation=options.annotations,
645      flakiness_server=options.flakiness_dashboard_server)
646
647  return exit_code
648
649
650def _RunUIAutomatorTests(options, error_func, devices):
651  """Subcommand of RunTestsCommands which runs uiautomator tests."""
652  uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
653
654  runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
655
656  results, exit_code = test_dispatcher.RunTests(
657      tests, runner_factory, devices, shard=True, test_timeout=None,
658      num_retries=options.num_retries)
659
660  report_results.LogFull(
661      results=results,
662      test_type='UIAutomator',
663      test_package=os.path.basename(options.test_jar),
664      annotation=options.annotations,
665      flakiness_server=options.flakiness_dashboard_server)
666
667  return exit_code
668
669
670def _RunJUnitTests(options, error_func):
671  """Subcommand of RunTestsCommand which runs junit tests."""
672  junit_options = ProcessJUnitTestOptions(options, error_func)
673  runner_factory, tests = junit_setup.Setup(junit_options)
674  _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
675
676  return exit_code
677
678
679def _RunMonkeyTests(options, error_func, devices):
680  """Subcommand of RunTestsCommands which runs monkey tests."""
681  monkey_options = ProcessMonkeyTestOptions(options, error_func)
682
683  runner_factory, tests = monkey_setup.Setup(monkey_options)
684
685  results, exit_code = test_dispatcher.RunTests(
686      tests, runner_factory, devices, shard=False, test_timeout=None,
687      num_retries=options.num_retries)
688
689  report_results.LogFull(
690      results=results,
691      test_type='Monkey',
692      test_package='Monkey')
693
694  return exit_code
695
696
697def _RunPerfTests(options, args, error_func):
698  """Subcommand of RunTestsCommands which runs perf tests."""
699  perf_options = ProcessPerfTestOptions(options, args, error_func)
700
701  # Just save a simple json with a list of test names.
702  if perf_options.output_json_list:
703    return perf_test_runner.OutputJsonList(
704        perf_options.steps, perf_options.output_json_list)
705
706  # Just print the results from a single previously executed step.
707  if perf_options.print_step:
708    return perf_test_runner.PrintTestOutput(perf_options.print_step)
709
710  runner_factory, tests, devices = perf_setup.Setup(perf_options)
711
712  # shard=False means that each device will get the full list of tests
713  # and then each one will decide their own affinity.
714  # shard=True means each device will pop the next test available from a queue,
715  # which increases throughput but have no affinity.
716  results, _ = test_dispatcher.RunTests(
717      tests, runner_factory, devices, shard=False, test_timeout=None,
718      num_retries=options.num_retries)
719
720  report_results.LogFull(
721      results=results,
722      test_type='Perf',
723      test_package='Perf')
724
725  if perf_options.single_step:
726    return perf_test_runner.PrintTestOutput('single_step')
727
728  perf_test_runner.PrintSummary(tests)
729
730  # Always return 0 on the sharding stage. Individual tests exit_code
731  # will be returned on the print_step stage.
732  return 0
733
734
735def _GetAttachedDevices(test_device=None):
736  """Get all attached devices.
737
738  Args:
739    test_device: Name of a specific device to use.
740
741  Returns:
742    A list of attached devices.
743  """
744  attached_devices = []
745
746  attached_devices = android_commands.GetAttachedDevices()
747  if test_device:
748    assert test_device in attached_devices, (
749        'Did not find device %s among attached device. Attached devices: %s'
750        % (test_device, ', '.join(attached_devices)))
751    attached_devices = [test_device]
752
753  assert attached_devices, 'No devices attached.'
754
755  return sorted(attached_devices)
756
757
758def RunTestsCommand(command, options, args, option_parser):
759  """Checks test type and dispatches to the appropriate function.
760
761  Args:
762    command: String indicating the command that was received to trigger
763        this function.
764    options: optparse options dictionary.
765    args: List of extra args from optparse.
766    option_parser: optparse.OptionParser object.
767
768  Returns:
769    Integer indicated exit code.
770
771  Raises:
772    Exception: Unknown command name passed in, or an exception from an
773        individual test runner.
774  """
775
776  # Check for extra arguments
777  if len(args) > 2 and command != 'perf':
778    option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
779    return constants.ERROR_EXIT_CODE
780  if command == 'perf':
781    if ((options.single_step and len(args) <= 2) or
782        (not options.single_step and len(args) > 2)):
783      option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
784      return constants.ERROR_EXIT_CODE
785
786  ProcessCommonOptions(options)
787
788  devices = _GetAttachedDevices(options.test_device)
789
790  forwarder.Forwarder.RemoveHostLog()
791  if not ports.ResetTestServerPortAllocation():
792    raise Exception('Failed to reset test server port.')
793
794  if command == 'gtest':
795    return _RunGTests(options, devices)
796  elif command == 'linker':
797    return _RunLinkerTests(options, devices)
798  elif command == 'instrumentation':
799    return _RunInstrumentationTests(options, option_parser.error, devices)
800  elif command == 'uiautomator':
801    return _RunUIAutomatorTests(options, option_parser.error, devices)
802  elif command == 'junit':
803    return _RunJUnitTests(options, option_parser.error)
804  elif command == 'monkey':
805    return _RunMonkeyTests(options, option_parser.error, devices)
806  elif command == 'perf':
807    return _RunPerfTests(options, args, option_parser.error)
808  else:
809    raise Exception('Unknown test type.')
810
811
812def HelpCommand(command, _options, args, option_parser):
813  """Display help for a certain command, or overall help.
814
815  Args:
816    command: String indicating the command that was received to trigger
817        this function.
818    options: optparse options dictionary. unused.
819    args: List of extra args from optparse.
820    option_parser: optparse.OptionParser object.
821
822  Returns:
823    Integer indicated exit code.
824  """
825  # If we don't have any args, display overall help
826  if len(args) < 3:
827    option_parser.print_help()
828    return 0
829  # If we have too many args, print an error
830  if len(args) > 3:
831    option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
832    return constants.ERROR_EXIT_CODE
833
834  command = args[2]
835
836  if command not in VALID_COMMANDS:
837    option_parser.error('Unrecognized command.')
838
839  # Treat the help command as a special case. We don't care about showing a
840  # specific help page for itself.
841  if command == 'help':
842    option_parser.print_help()
843    return 0
844
845  VALID_COMMANDS[command].add_options_func(option_parser)
846  option_parser.usage = '%prog ' + command + ' [options]'
847  option_parser.commands_dict = {}
848  option_parser.print_help()
849
850  return 0
851
852
853# Define a named tuple for the values in the VALID_COMMANDS dictionary so the
854# syntax is a bit prettier. The tuple is two functions: (add options, run
855# command).
856CommandFunctionTuple = collections.namedtuple(
857    'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
858VALID_COMMANDS = {
859    'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
860    'instrumentation': CommandFunctionTuple(
861        AddInstrumentationTestOptions, RunTestsCommand),
862    'uiautomator': CommandFunctionTuple(
863        AddUIAutomatorTestOptions, RunTestsCommand),
864    'junit': CommandFunctionTuple(
865        AddJUnitTestOptions, RunTestsCommand),
866    'monkey': CommandFunctionTuple(
867        AddMonkeyTestOptions, RunTestsCommand),
868    'perf': CommandFunctionTuple(
869        AddPerfTestOptions, RunTestsCommand),
870    'linker': CommandFunctionTuple(
871        AddLinkerTestOptions, RunTestsCommand),
872    'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
873    }
874
875
876def DumpThreadStacks(_signal, _frame):
877  for thread in threading.enumerate():
878    reraiser_thread.LogThreadStack(thread)
879
880
881def main():
882  signal.signal(signal.SIGUSR1, DumpThreadStacks)
883  option_parser = command_option_parser.CommandOptionParser(
884      commands_dict=VALID_COMMANDS)
885  return command_option_parser.ParseAndExecute(option_parser)
886
887
888if __name__ == '__main__':
889  sys.exit(main())
890