• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Lint as: python2, python3
2# Shell class for a test, inherited by all individual tests
3#
4# Methods:
5#       __init__        initialise
6#       initialize      run once for each job
7#       setup           run once for each new version of the test installed
8#       run             run the test (wrapped by job.run_test())
9#
10# Data:
11#       job             backreference to the job this test instance is part of
12#       outputdir       eg. results/<job>/<testname.tag>
13#       resultsdir      eg. results/<job>/<testname.tag>/results
14#       profdir         eg. results/<job>/<testname.tag>/profiling
15#       debugdir        eg. results/<job>/<testname.tag>/debug
16#       bindir          eg. tests/<test>
17#       src             eg. tests/<test>/src
18#       tmpdir          eg. tmp/<tempname>_<testname.tag>
19
20#pylint: disable=C0111
21
22from __future__ import absolute_import
23from __future__ import division
24from __future__ import print_function
25
26import errno
27import fcntl
28import json
29import logging
30import os
31import re
32import shutil
33import six
34from six.moves import map
35from six.moves import range
36import stat
37import sys
38import tempfile
39import time
40import traceback
41
42from autotest_lib.client.bin import utils
43from autotest_lib.client.common_lib import error
44from autotest_lib.client.common_lib import utils as client_utils
45
46try:
47    from chromite.lib import metrics
48except ImportError:
49    metrics = client_utils.metrics_mock
50
51
52class base_test(object):
53    preserve_srcdir = False
54
55    def __init__(self, job, bindir, outputdir):
56        self.job = job
57        self.pkgmgr = job.pkgmgr
58        self.autodir = job.autodir
59        self.outputdir = outputdir
60        self.tagged_testname = os.path.basename(self.outputdir)
61        self.resultsdir = os.path.join(self.outputdir, 'results')
62        os.mkdir(self.resultsdir)
63        self.profdir = os.path.join(self.outputdir, 'profiling')
64        os.mkdir(self.profdir)
65        self.debugdir = os.path.join(self.outputdir, 'debug')
66        os.mkdir(self.debugdir)
67        # TODO(ericli): figure out how autotest crash handler work with cros
68        # Once this is re-enabled import getpass. crosbug.com/31232
69        # crash handler, we should restore it in near term.
70        # if getpass.getuser() == 'root':
71        #     self.configure_crash_handler()
72        # else:
73        self.crash_handling_enabled = False
74        self.bindir = bindir
75        self.srcdir = os.path.join(self.bindir, 'src')
76        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
77                                       dir=job.tmpdir)
78        # The crash_reporter uses this file to determine which test is in
79        # progress.
80        self.test_in_prog_file = '/run/crash_reporter/test-in-prog'
81        self._keyvals = []
82        self._new_keyval = False
83        self.failed_constraints = []
84        self.iteration = 0
85        self.before_iteration_hooks = []
86        self.after_iteration_hooks = []
87
88        # Flag to indicate if the test has succeeded or failed.
89        self.success = False
90
91
92    def configure_crash_handler(self):
93        pass
94
95
96    def crash_handler_report(self):
97        pass
98
99
100    def assert_(self, expr, msg='Assertion failed.'):
101        if not expr:
102            raise error.TestError(msg)
103
104
105    def write_test_keyval(self, attr_dict):
106        utils.write_keyval(self.outputdir, attr_dict)
107
108
109    @staticmethod
110    def _append_type_to_keys(dictionary, typename):
111        new_dict = {}
112        for key, value in six.iteritems(dictionary):
113            new_key = "%s{%s}" % (key, typename)
114            new_dict[new_key] = value
115        return new_dict
116
117    def output_perf_value(self, description, value, units=None,
118                          higher_is_better=None, graph=None,
119                          replacement='_', replace_existing_values=False,
120                          resultsdir=None):
121        """
122        Records a measured performance value in an output file.
123
124        The output file will subsequently be parsed by the TKO parser to have
125        the information inserted into the results database.
126
127        @param description: A string describing the measured perf value. Must
128                be maximum length 256, and may only contain letters, numbers,
129                periods, dashes, and underscores.  For example:
130                "page_load_time", "scrolling-frame-rate".
131        @param value: A number representing the measured perf value, or a list
132                of measured values if a test takes multiple measurements.
133                Measured perf values can be either ints or floats.
134        @param units: A string describing the units associated with the
135                measured perf value. Must be maximum length 32, and may only
136                contain letters, numbers, periods, dashes, and underscores.
137                For example: "msec", "fps", "score", "runs_per_second".
138        @param higher_is_better: A boolean indicating whether or not a "higher"
139                measured perf value is considered to be better. If False, it is
140                assumed that a "lower" measured value is considered to be
141                better. This impacts dashboard plotting and email notification.
142                Pure autotests are expected to specify either True or False!
143                This value can be set to "None" to indicate that the perf
144                dashboard should apply the rules encoded via Chromium
145                unit-info.json. This is only used for tracking Chromium based
146                tests (in particular telemetry).
147        @param graph: A string indicating the name of the graph on which
148                the perf value will be subsequently displayed on the chrome perf
149                dashboard. This allows multiple metrics be grouped together on
150                the same graphs. Defaults to None, indicating that the perf
151                value should be displayed individually on a separate graph.
152        @param replacement: string to replace illegal characters in
153                |description| and |units| with.
154        @param replace_existing_values: A boolean indicating whether or not a
155                new added perf value should replace existing perf.
156        @param resultsdir: An optional path to specify a custom output
157                directory.
158        """
159        if len(description) > 256:
160            raise ValueError('The description must be at most 256 characters.')
161        if units and len(units) > 32:
162            raise ValueError('The units must be at most 32 characters.')
163
164        # If |replacement| is legal replace illegal characters with it.
165        string_regex = re.compile(r'[^-\.\w]')
166        if replacement is None or re.search(string_regex, replacement):
167            raise ValueError('Invalid replacement string to mask illegal '
168                             'characters. May only contain letters, numbers, '
169                             'periods, dashes, and underscores. '
170                             'replacement: %s' % replacement)
171        description = re.sub(string_regex, replacement, description)
172        units = re.sub(string_regex, replacement, units) if units else None
173
174        charts = {}
175        if not resultsdir:
176            resultsdir = self.resultsdir
177        if not os.path.exists(resultsdir):
178            os.makedirs(resultsdir)
179        output_file = os.path.join(resultsdir, 'results-chart.json')
180        if os.path.isfile(output_file):
181            with open(output_file, 'r') as fp:
182                contents = fp.read()
183                if contents:
184                    charts = json.loads(contents)
185
186        if graph:
187            first_level = graph
188            second_level = description
189        else:
190            first_level = description
191            second_level = 'summary'
192
193        direction = 'up' if higher_is_better else 'down'
194
195        # All input should be a number - but at times there are strings
196        # representing numbers logged, attempt to convert them to numbers.
197        # If a non number string is logged an exception will be thrown.
198        if isinstance(value, list):
199            value = list(map(float, value))
200        else:
201            value = float(value)
202
203        result_type = 'scalar'
204        value_key = 'value'
205        result_value = value
206
207        # The chart json spec go/telemetry-json differenciates between a single
208        # value vs a list of values.  Lists of values get extra processing in
209        # the chromeperf dashboard ( mean, standard deviation etc)
210        # Tests can log one or more values for the same metric, to adhere stricly
211        # to the specification the first value logged is a scalar but if another
212        # value is logged the results become a list of scalar.
213        # TODO Figure out if there would be any difference of always using list
214        # of scalar even if there is just one item in the list.
215        if isinstance(value, list):
216            result_type = 'list_of_scalar_values'
217            value_key = 'values'
218            if first_level in charts and second_level in charts[first_level]:
219                if 'values' in charts[first_level][second_level]:
220                    result_value = charts[first_level][second_level]['values']
221                elif 'value' in charts[first_level][second_level]:
222                    result_value = [charts[first_level][second_level]['value']]
223                if replace_existing_values:
224                    result_value = value
225                else:
226                    result_value.extend(value)
227            else:
228                result_value = value
229        elif (first_level in charts and second_level in charts[first_level] and
230              not replace_existing_values):
231            result_type = 'list_of_scalar_values'
232            value_key = 'values'
233            if 'values' in charts[first_level][second_level]:
234                result_value = charts[first_level][second_level]['values']
235                result_value.append(value)
236            else:
237                result_value = [charts[first_level][second_level]['value'], value]
238
239        test_data = {
240            second_level: {
241                 'type': result_type,
242                 'units': units,
243                 value_key: result_value,
244                 'improvement_direction': direction
245           }
246        }
247
248        if first_level in charts:
249            charts[first_level].update(test_data)
250        else:
251            charts.update({first_level: test_data})
252
253        with open(output_file, 'w') as fp:
254            fp.write(json.dumps(charts, indent=2))
255
256
257    def write_perf_keyval(self, perf_dict):
258        self.write_iteration_keyval({}, perf_dict)
259
260
261    def write_attr_keyval(self, attr_dict):
262        self.write_iteration_keyval(attr_dict, {})
263
264
265    def write_iteration_keyval(self, attr_dict, perf_dict):
266        # append the dictionaries before they have the {perf} and {attr} added
267        self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
268        self._new_keyval = True
269
270        if attr_dict:
271            attr_dict = self._append_type_to_keys(attr_dict, "attr")
272            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
273
274        if perf_dict:
275            perf_dict = self._append_type_to_keys(perf_dict, "perf")
276            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
277
278        keyval_path = os.path.join(self.resultsdir, "keyval")
279        print("", file=open(keyval_path, "a"))
280
281
282    def analyze_perf_constraints(self, constraints):
283        if not self._new_keyval:
284            return
285
286        # create a dict from the keyvals suitable as an environment for eval
287        keyval_env = self._keyvals[-1]['perf'].copy()
288        keyval_env['__builtins__'] = None
289        self._new_keyval = False
290        failures = []
291
292        # evaluate each constraint using the current keyvals
293        for constraint in constraints:
294            logging.info('___________________ constraint = %s', constraint)
295            logging.info('___________________ keyvals = %s', keyval_env)
296
297            try:
298                if not eval(constraint, keyval_env):
299                    failures.append('%s: constraint was not met' % constraint)
300            except:
301                failures.append('could not evaluate constraint: %s'
302                                % constraint)
303
304        # keep track of the errors for each iteration
305        self.failed_constraints.append(failures)
306
307
308    def process_failed_constraints(self):
309        msg = ''
310        for i, failures in enumerate(self.failed_constraints):
311            if failures:
312                msg += 'iteration %d:%s  ' % (i, ','.join(failures))
313
314        if msg:
315            raise error.TestFail(msg)
316
317
318    def register_before_iteration_hook(self, iteration_hook):
319        """
320        This is how we expect test writers to register a before_iteration_hook.
321        This adds the method to the list of hooks which are executed
322        before each iteration.
323
324        @param iteration_hook: Method to run before each iteration. A valid
325                               hook accepts a single argument which is the
326                               test object.
327        """
328        self.before_iteration_hooks.append(iteration_hook)
329
330
331    def register_after_iteration_hook(self, iteration_hook):
332        """
333        This is how we expect test writers to register an after_iteration_hook.
334        This adds the method to the list of hooks which are executed
335        after each iteration. Hooks are executed starting with the most-
336        recently registered, in stack fashion.
337
338        @param iteration_hook: Method to run after each iteration. A valid
339                               hook accepts a single argument which is the
340                               test object.
341        """
342        self.after_iteration_hooks.append(iteration_hook)
343
344
345    def initialize(self):
346        pass
347
348
349    def setup(self):
350        pass
351
352
353    def warmup(self, *args, **dargs):
354        pass
355
356
357    def drop_caches_between_iterations(self):
358        if self.job.drop_caches_between_iterations:
359            utils.drop_caches()
360
361
362    def _call_run_once(self, constraints, profile_only,
363                       postprocess_profiled_run, args, dargs):
364        self.drop_caches_between_iterations()
365        # execute iteration hooks
366        if not self.job.fast:
367            logging.debug('Starting before_iteration_hooks for %s',
368                          self.tagged_testname)
369            with metrics.SecondsTimer(
370                    'chromeos/autotest/job/before_iteration_hook_duration'):
371                for hook in self.before_iteration_hooks:
372                    hook(self)
373            logging.debug('before_iteration_hooks completed')
374
375        finished = False
376
377        # Mark the current test in progress so that crash_reporter can report
378        # it in uploaded crashes.
379        # if the file already exists, truncate and overwrite.
380        # TODO(mutexlox): Determine what to do if the file already exists, which
381        # could happen for a few reasons:
382        #   * An earlier tast or autotest run crashed before removing the
383        #     test-in-prog file. In this case, we would ideally overwrite the
384        #     existing test name and _not_ restore it.
385        #   * An autotest ran another autotest (e.g. logging_GenerateCrashFiles
386        #     runs desktopui_SimpleLogin). In this case, arguably it makes sense
387        #     to attribute the crash to logging_GenerateCrashFiles and not to
388        #     desktopui_SimpleLogin (or to attribute it to both), since the
389        #     context in which it's running is different than if it were run on
390        #     its own.
391        #   * Every tast test is kicked off by the 'tast' autotest (see
392        #     server/site_tests/tast/tast.py), so the file will always be
393        #     populated when the tast suite starts running. In this case, we
394        #     want to attribute crashes that happen during a specific test to
395        #     that test, but if the tast infra causes a crash we should
396        #     attribute the crash to it (e.g. to the 'tast.critical-system'
397        #     "autotest").  For this case, we should save the contents of the
398        #     file before a test and restore it after.
399        if ('host' in dargs and hasattr(dargs['host'], 'is_up_fast') and
400                dargs['host'].is_up_fast()):
401            dargs['host'].run('echo %s > %s' %
402                              (self.tagged_testname, self.test_in_prog_file),
403                              ignore_status=True)
404        else:
405            crash_run_dir = os.path.dirname(self.test_in_prog_file)
406            try:
407                # Only try to create the file if the directory already exists
408                # (otherwise, we may not be on a CrOS device)
409                if os.path.exists(crash_run_dir):
410                    with open(self.test_in_prog_file, 'w') as f:
411                        f.write(self.tagged_testname)
412            except:  # Broad 'except' because we don't want this to block tests
413                logging.warning('failed to write in progress test name')
414        try:
415            if profile_only:
416                if not self.job.profilers.present():
417                    self.job.record('WARN', None, None,
418                                    'No profilers have been added but '
419                                    'profile_only is set - nothing '
420                                    'will be run')
421                self.run_once_profiling(postprocess_profiled_run,
422                                        *args, **dargs)
423            else:
424                self.before_run_once()
425                logging.debug('starting test(run_once()), test details follow'
426                              '\nargs: %r\nkwargs: %r', args, dargs)
427                self.run_once(*args, **dargs)
428                logging.debug('The test has completed successfully')
429                self.after_run_once()
430
431            self.postprocess_iteration()
432            self.analyze_perf_constraints(constraints)
433            finished = True
434        # Catch and re-raise to let after_iteration_hooks see the exception.
435        except Exception as e:
436            logging.debug('Test failed due to %s. Exception log follows the '
437                          'after_iteration_hooks.', str(e))
438            raise
439        finally:
440            if ('host' in dargs and hasattr(dargs['host'], 'is_up_fast') and
441                    dargs['host'].is_up_fast()):
442                dargs['host'].run('rm -f %s' % self.test_in_prog_file)
443            else:
444                try:
445                    # Unmark the test as running.
446                    os.remove(self.test_in_prog_file)
447                except OSError as e:
448                    # If something removed it, do nothing--we're in the desired
449                    # state (the file is gone). Otherwise, log.
450                    if e.errno != errno.ENOENT:
451                        logging.warning(
452                                "Couldn't remove test-in-prog file: %s",
453                                traceback.format_exc())
454
455            if not finished or not self.job.fast:
456                logging.debug('Starting after_iteration_hooks for %s',
457                              self.tagged_testname)
458                with metrics.SecondsTimer(
459                        'chromeos/autotest/job/after_iteration_hook_duration'):
460                    for hook in reversed(self.after_iteration_hooks):
461                        hook(self)
462                logging.debug('after_iteration_hooks completed')
463
464
465    def execute(self, iterations=None, test_length=None, profile_only=None,
466                _get_time=time.time, postprocess_profiled_run=None,
467                constraints=(), *args, **dargs):
468        """
469        This is the basic execute method for the tests inherited from base_test.
470        If you want to implement a benchmark test, it's better to implement
471        the run_once function, to cope with the profiling infrastructure. For
472        other tests, you can just override the default implementation.
473
474        @param test_length: The minimum test length in seconds. We'll run the
475            run_once function for a number of times large enough to cover the
476            minimum test length.
477
478        @param iterations: A number of iterations that we'll run the run_once
479            function. This parameter is incompatible with test_length and will
480            be silently ignored if you specify both.
481
482        @param profile_only: If true run X iterations with profilers enabled.
483            If false run X iterations and one with profiling if profiles are
484            enabled. If None, default to the value of job.default_profile_only.
485
486        @param _get_time: [time.time] Used for unit test time injection.
487
488        @param postprocess_profiled_run: Run the postprocessing for the
489            profiled run.
490        """
491
492        # For our special class of tests, the benchmarks, we don't want
493        # profilers to run during the test iterations. Let's reserve only
494        # the last iteration for profiling, if needed. So let's stop
495        # all profilers if they are present and active.
496        profilers = self.job.profilers
497        if profilers.active():
498            profilers.stop(self)
499        if profile_only is None:
500            profile_only = self.job.default_profile_only
501        # If the user called this test in an odd way (specified both iterations
502        # and test_length), let's warn them.
503        if iterations and test_length:
504            logging.debug('Iterations parameter ignored (timed execution)')
505        if test_length:
506            test_start = _get_time()
507            time_elapsed = 0
508            timed_counter = 0
509            logging.debug('Test started. Specified %d s as the minimum test '
510                          'length', test_length)
511            while time_elapsed < test_length:
512                timed_counter = timed_counter + 1
513                if time_elapsed == 0:
514                    logging.debug('Executing iteration %d', timed_counter)
515                elif time_elapsed > 0:
516                    logging.debug('Executing iteration %d, time_elapsed %d s',
517                                  timed_counter, time_elapsed)
518                self._call_run_once(constraints, profile_only,
519                                    postprocess_profiled_run, args, dargs)
520                test_iteration_finish = _get_time()
521                time_elapsed = test_iteration_finish - test_start
522            logging.debug('Test finished after %d iterations, '
523                          'time elapsed: %d s', timed_counter, time_elapsed)
524        else:
525            if iterations is None:
526                iterations = 1
527            if iterations > 1:
528                logging.debug('Test started. Specified %d iterations',
529                              iterations)
530            for self.iteration in range(1, iterations + 1):
531                if iterations > 1:
532                    logging.debug('Executing iteration %d of %d',
533                                  self.iteration, iterations)
534                self._call_run_once(constraints, profile_only,
535                                    postprocess_profiled_run, args, dargs)
536
537        if not profile_only:
538            self.iteration += 1
539            self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
540
541        # Do any postprocessing, normally extracting performance keyvals, etc
542        self.postprocess()
543        self.process_failed_constraints()
544
545
546    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
547        profilers = self.job.profilers
548        # Do a profiling run if necessary
549        if profilers.present():
550            self.drop_caches_between_iterations()
551            profilers.before_start(self)
552
553            self.before_run_once()
554            profilers.start(self)
555            logging.debug('Profilers present. Profiling run started')
556
557            try:
558                self.run_once(*args, **dargs)
559
560                # Priority to the run_once() argument over the attribute.
561                postprocess_attribute = getattr(self,
562                                                'postprocess_profiled_run',
563                                                False)
564
565                if (postprocess_profiled_run or
566                    (postprocess_profiled_run is None and
567                     postprocess_attribute)):
568                    self.postprocess_iteration()
569
570            finally:
571                profilers.stop(self)
572                profilers.report(self)
573
574            self.after_run_once()
575
576
577    def postprocess(self):
578        pass
579
580
581    def postprocess_iteration(self):
582        pass
583
584
585    def cleanup(self):
586        pass
587
588
589    def before_run_once(self):
590        """
591        Override in tests that need it, will be called before any run_once()
592        call including the profiling run (when it's called before starting
593        the profilers).
594        """
595        pass
596
597
598    def after_run_once(self):
599        """
600        Called after every run_once (including from a profiled run when it's
601        called after stopping the profilers).
602        """
603        pass
604
605
606    @staticmethod
607    def _make_writable_to_others(directory):
608        mode = os.stat(directory).st_mode
609        mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
610        os.chmod(directory, mode)
611
612
613    def _exec(self, args, dargs):
614        self.job.logging.tee_redirect_debug_dir(self.debugdir,
615                                                log_name=self.tagged_testname)
616        try:
617            # write out the test attributes into a keyval
618            dargs   = dargs.copy()
619            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
620            keyvals = dargs.pop('test_attributes', {}).copy()
621            keyvals['version'] = self.version
622            for i, arg in enumerate(args):
623                keyvals['param-%d' % i] = repr(arg)
624            for name, arg in six.iteritems(dargs):
625                keyvals['param-%s' % name] = repr(arg)
626            self.write_test_keyval(keyvals)
627
628            _validate_args(args, dargs, self.initialize, self.setup,
629                           self.execute, self.cleanup)
630
631            try:
632                # Make resultsdir and tmpdir accessible to everyone. We may
633                # output data to these directories as others, e.g., chronos.
634                self._make_writable_to_others(self.tmpdir)
635                self._make_writable_to_others(self.resultsdir)
636
637                # Initialize:
638                utils.cherry_pick_call(self.initialize, *args, **dargs)
639
640                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
641                try:
642                    fcntl.flock(lockfile, fcntl.LOCK_EX)
643                    # Setup: (compile and install the test, if needed)
644                    p_args, p_dargs = utils.cherry_pick_args(self.setup, args, dargs)
645                    utils.update_version(self.srcdir, self.preserve_srcdir,
646                                         self.version, self.setup,
647                                         *p_args, **p_dargs)
648                finally:
649                    fcntl.flock(lockfile, fcntl.LOCK_UN)
650                    lockfile.close()
651
652                # Execute:
653                os.chdir(self.outputdir)
654
655                # call self.warmup cherry picking the arguments it accepts and
656                # translate exceptions if needed
657                _call_test_function(utils.cherry_pick_call, self.warmup,
658                                    *args, **dargs)
659
660                if hasattr(self, 'run_once'):
661                    p_args, p_dargs = utils.cherry_pick_args(self.run_once,
662                                                        args, dargs)
663                    # pull in any non-* and non-** args from self.execute
664                    for param in utils.get_nonstar_args(self.execute):
665                        if param in dargs:
666                            p_dargs[param] = dargs[param]
667                else:
668                    p_args, p_dargs = utils.cherry_pick_args(self.execute,
669                                                        args, dargs)
670
671                _call_test_function(self.execute, *p_args, **p_dargs)
672            except Exception:
673                # Save the exception while we run our cleanup() before
674                # reraising it, but log it to so actual time of error is known.
675                exc_info = sys.exc_info()
676                logging.warning('The test failed with the following exception',
677                                exc_info=True)
678
679                try:
680                    try:
681                        if run_cleanup:
682                            logging.debug('Running cleanup for test.')
683                            utils.cherry_pick_call(self.cleanup, *args, **dargs)
684                    except Exception:
685                        logging.error('Ignoring exception during cleanup() '
686                                      'phase:')
687                        traceback.print_exc()
688                        logging.error('Now raising the earlier %s error',
689                                      exc_info[0])
690                    self.crash_handler_report()
691                finally:
692                    # Raise exception after running cleanup, reporting crash,
693                    # and restoring job's logging, even if the first two
694                    # actions fail.
695                    self.job.logging.restore()
696                    try:
697                        six.reraise(exc_info[0], exc_info[1], exc_info[2])
698                    finally:
699                        # http://docs.python.org/library/sys.html#sys.exc_info
700                        # Be nice and prevent a circular reference.
701                        del exc_info
702            else:
703                try:
704                    if run_cleanup:
705                        utils.cherry_pick_call(self.cleanup, *args, **dargs)
706                    self.crash_handler_report()
707                finally:
708                    self.job.logging.restore()
709        except error.AutotestError:
710            # Pass already-categorized errors on up.
711            raise
712        except Exception as e:
713            # Anything else is an ERROR in our own code, not execute().
714            raise error.UnhandledTestError(e)
715
716    def runsubtest(self, url, *args, **dargs):
717        """
718        Execute another autotest test from inside the current test's scope.
719
720        @param test: Parent test.
721        @param url: Url of new test.
722        @param tag: Tag added to test name.
723        @param args: Args for subtest.
724        @param dargs: Dictionary with args for subtest.
725        @iterations: Number of subtest iterations.
726        @profile_only: If true execute one profiled run.
727        """
728        dargs["profile_only"] = dargs.get("profile_only", False)
729        test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
730        return self.job.run_test(url, main_testpath=test_basepath,
731                                 *args, **dargs)
732
733
734def _validate_args(args, dargs, *funcs):
735    """Verify that arguments are appropriate for at least one callable.
736
737    Given a list of callables as additional parameters, verify that
738    the proposed keyword arguments in dargs will each be accepted by at least
739    one of the callables.
740
741    NOTE: args is currently not supported and must be empty.
742
743    Args:
744      args: A tuple of proposed positional arguments.
745      dargs: A dictionary of proposed keyword arguments.
746      *funcs: Callables to be searched for acceptance of args and dargs.
747    Raises:
748      error.AutotestError: if an arg won't be accepted by any of *funcs.
749    """
750    all_co_flags = 0
751    all_varnames = ()
752    for func in funcs:
753        all_co_flags |= func.__code__.co_flags
754        all_varnames += func.__code__.co_varnames[:func.__code__.co_argcount]
755
756    # Check if given args belongs to at least one of the methods below.
757    if len(args) > 0:
758        # Current implementation doesn't allow the use of args.
759        raise error.TestError('Unnamed arguments not accepted. Please '
760                              'call job.run_test with named args only')
761
762    # Check if given dargs belongs to at least one of the methods below.
763    if len(dargs) > 0:
764        if not all_co_flags & 0x08:
765            # no func accepts *dargs, so:
766            for param in dargs:
767                if not param in all_varnames:
768                    raise error.AutotestError('Unknown parameter: %s' % param)
769
770
771def _installtest(job, url):
772    (group, name) = job.pkgmgr.get_package_name(url, 'test')
773
774    # Bail if the test is already installed
775    group_dir = os.path.join(job.testdir, "download", group)
776    if os.path.exists(os.path.join(group_dir, name)):
777        return (group, name)
778
779    # If the group directory is missing create it and add
780    # an empty  __init__.py so that sub-directories are
781    # considered for import.
782    if not os.path.exists(group_dir):
783        os.makedirs(group_dir)
784        f = open(os.path.join(group_dir, '__init__.py'), 'w+')
785        f.close()
786
787    logging.debug("%s: installing test url=%s", name, url)
788    tarball = os.path.basename(url)
789    tarball_path = os.path.join(group_dir, tarball)
790    test_dir = os.path.join(group_dir, name)
791    job.pkgmgr.fetch_pkg(tarball, tarball_path,
792                         repo_url = os.path.dirname(url))
793
794    # Create the directory for the test
795    if not os.path.exists(test_dir):
796        os.mkdir(os.path.join(group_dir, name))
797
798    job.pkgmgr.untar_pkg(tarball_path, test_dir)
799
800    os.remove(tarball_path)
801
802    # For this 'sub-object' to be importable via the name
803    # 'group.name' we need to provide an __init__.py,
804    # so link the main entry point to this.
805    os.symlink(name + '.py', os.path.join(group_dir, name,
806                            '__init__.py'))
807
808    # The test is now installed.
809    return (group, name)
810
811
812def _call_test_function(func, *args, **dargs):
813    """Calls a test function and translates exceptions so that errors
814    inside test code are considered test failures."""
815    try:
816        return func(*args, **dargs)
817    except error.AutotestError:
818        raise
819    except Exception as e:
820        # Other exceptions must be treated as a FAIL when
821        # raised during the test functions
822        raise error.UnhandledTestFail(e)
823
824
825def runtest(job,
826            url,
827            tag,
828            args,
829            dargs,
830            local_namespace={},
831            global_namespace={},
832            before_test_hook=None,
833            after_test_hook=None,
834            before_iteration_hook=None,
835            after_iteration_hook=None,
836            override_test_in_prog_file=None):
837    local_namespace = local_namespace.copy()
838    global_namespace = global_namespace.copy()
839    # if this is not a plain test name then download and install the
840    # specified test
841    if url.endswith('.tar.bz2'):
842        (testgroup, testname) = _installtest(job, url)
843        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
844        importdir = os.path.join(job.testdir, 'download')
845        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
846        classname = '%s.%s' % (modulename, testname)
847        path = testname
848    else:
849        # If the test is local, it may be under either testdir or site_testdir.
850        # Tests in site_testdir override tests defined in testdir
851        testname = path = url
852        testgroup = ''
853        path = re.sub(':', '/', testname)
854        modulename = os.path.basename(path)
855        classname = '%s.%s' % (modulename, modulename)
856
857        # Try installing the test package
858        # The job object may be either a server side job or a client side job.
859        # 'install_pkg' method will be present only if it's a client side job.
860        if hasattr(job, 'install_pkg'):
861            try:
862                bindir = os.path.join(job.testdir, testname)
863                job.install_pkg(testname, 'test', bindir)
864            except error.PackageInstallError:
865                # continue as a fall back mechanism and see if the test code
866                # already exists on the machine
867                pass
868
869        bindir = None
870        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
871            if dir is not None and os.path.exists(os.path.join(dir, path)):
872                importdir = bindir = os.path.join(dir, path)
873        if not bindir:
874            raise error.TestError(testname + ': test does not exist')
875
876    subdir = os.path.join(dargs.pop('main_testpath', ""), testname)
877    outputdir = os.path.join(job.resultdir, subdir)
878    if tag:
879        outputdir += '.' + tag
880
881    local_namespace['job'] = job
882    local_namespace['bindir'] = bindir
883    local_namespace['outputdir'] = outputdir
884
885    sys.path.insert(0, importdir)
886    try:
887        exec ('import %s' % modulename, local_namespace, global_namespace)
888        exec ("mytest = %s(job, bindir, outputdir)" % classname,
889              local_namespace, global_namespace)
890    finally:
891        sys.path.pop(0)
892
893    pwd = os.getcwd()
894    os.chdir(outputdir)
895
896    try:
897        mytest = global_namespace['mytest']
898        if override_test_in_prog_file:
899            mytest.test_in_prog_file = override_test_in_prog_file
900        mytest.success = False
901        if not job.fast and before_test_hook:
902            logging.info('Starting before_hook for %s', mytest.tagged_testname)
903            with metrics.SecondsTimer(
904                    'chromeos/autotest/job/before_hook_duration'):
905                before_test_hook(mytest)
906            logging.info('before_hook completed')
907
908        # we use the register iteration hooks methods to register the passed
909        # in hooks
910        if before_iteration_hook:
911            mytest.register_before_iteration_hook(before_iteration_hook)
912        if after_iteration_hook:
913            mytest.register_after_iteration_hook(after_iteration_hook)
914        mytest._exec(args, dargs)
915        mytest.success = True
916    finally:
917        os.chdir(pwd)
918        if after_test_hook and (not mytest.success or not job.fast):
919            logging.info('Starting after_hook for %s', mytest.tagged_testname)
920            with metrics.SecondsTimer(
921                    'chromeos/autotest/job/after_hook_duration'):
922                after_test_hook(mytest)
923            logging.info('after_hook completed')
924
925        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
926