• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Shell class for a test, inherited by all individual tests
2#
3# Methods:
4#       __init__        initialise
5#       initialize      run once for each job
6#       setup           run once for each new version of the test installed
7#       run             run the test (wrapped by job.run_test())
8#
9# Data:
10#       job             backreference to the job this test instance is part of
11#       outputdir       eg. results/<job>/<testname.tag>
12#       resultsdir      eg. results/<job>/<testname.tag>/results
13#       profdir         eg. results/<job>/<testname.tag>/profiling
14#       debugdir        eg. results/<job>/<testname.tag>/debug
15#       bindir          eg. tests/<test>
16#       src             eg. tests/<test>/src
17#       tmpdir          eg. tmp/<tempname>_<testname.tag>
18
19#pylint: disable=C0111
20
21import fcntl
22import json
23import logging
24import os
25import re
26import shutil
27import stat
28import sys
29import tempfile
30import time
31import traceback
32
33from autotest_lib.client.bin import utils
34from autotest_lib.client.common_lib import error
35from autotest_lib.client.common_lib import utils as client_utils
36
37try:
38    from chromite.lib import metrics
39except ImportError:
40    metrics = client_utils.metrics_mock
41
42
43class base_test(object):
44    preserve_srcdir = False
45
46    def __init__(self, job, bindir, outputdir):
47        self.job = job
48        self.pkgmgr = job.pkgmgr
49        self.autodir = job.autodir
50        self.outputdir = outputdir
51        self.tagged_testname = os.path.basename(self.outputdir)
52        self.resultsdir = os.path.join(self.outputdir, 'results')
53        os.mkdir(self.resultsdir)
54        self.profdir = os.path.join(self.outputdir, 'profiling')
55        os.mkdir(self.profdir)
56        self.debugdir = os.path.join(self.outputdir, 'debug')
57        os.mkdir(self.debugdir)
58        # TODO(ericli): figure out how autotest crash handler work with cros
59        # Once this is re-enabled import getpass. crosbug.com/31232
60        # crash handler, we should restore it in near term.
61        # if getpass.getuser() == 'root':
62        #     self.configure_crash_handler()
63        # else:
64        self.crash_handling_enabled = False
65        self.bindir = bindir
66        self.srcdir = os.path.join(self.bindir, 'src')
67        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
68                                       dir=job.tmpdir)
69        self._keyvals = []
70        self._new_keyval = False
71        self.failed_constraints = []
72        self.iteration = 0
73        self.before_iteration_hooks = []
74        self.after_iteration_hooks = []
75
76        # Flag to indicate if the test has succeeded or failed.
77        self.success = False
78
79
80    def configure_crash_handler(self):
81        pass
82
83
84    def crash_handler_report(self):
85        pass
86
87
88    def assert_(self, expr, msg='Assertion failed.'):
89        if not expr:
90            raise error.TestError(msg)
91
92
93    def write_test_keyval(self, attr_dict):
94        utils.write_keyval(self.outputdir, attr_dict)
95
96
97    @staticmethod
98    def _append_type_to_keys(dictionary, typename):
99        new_dict = {}
100        for key, value in dictionary.iteritems():
101            new_key = "%s{%s}" % (key, typename)
102            new_dict[new_key] = value
103        return new_dict
104
105    def output_perf_value(self, description, value, units=None,
106                          higher_is_better=None, graph=None,
107                          replacement='_', replace_existing_values=False,
108                          resultsdir=None):
109        """
110        Records a measured performance value in an output file.
111
112        The output file will subsequently be parsed by the TKO parser to have
113        the information inserted into the results database.
114
115        @param description: A string describing the measured perf value. Must
116                be maximum length 256, and may only contain letters, numbers,
117                periods, dashes, and underscores.  For example:
118                "page_load_time", "scrolling-frame-rate".
119        @param value: A number representing the measured perf value, or a list
120                of measured values if a test takes multiple measurements.
121                Measured perf values can be either ints or floats.
122        @param units: A string describing the units associated with the
123                measured perf value. Must be maximum length 32, and may only
124                contain letters, numbers, periods, dashes, and underscores.
125                For example: "msec", "fps", "score", "runs_per_second".
126        @param higher_is_better: A boolean indicating whether or not a "higher"
127                measured perf value is considered to be better. If False, it is
128                assumed that a "lower" measured value is considered to be
129                better. This impacts dashboard plotting and email notification.
130                Pure autotests are expected to specify either True or False!
131                This value can be set to "None" to indicate that the perf
132                dashboard should apply the rules encoded via Chromium
133                unit-info.json. This is only used for tracking Chromium based
134                tests (in particular telemetry).
135        @param graph: A string indicating the name of the graph on which
136                the perf value will be subsequently displayed on the chrome perf
137                dashboard. This allows multiple metrics be grouped together on
138                the same graphs. Defaults to None, indicating that the perf
139                value should be displayed individually on a separate graph.
140        @param replacement: string to replace illegal characters in
141                |description| and |units| with.
142        @param replace_existing_values: A boolean indicating whether or not a
143                new added perf value should replace existing perf.
144        @param resultsdir: An optional path to specify a custom output
145                directory.
146        """
147        if len(description) > 256:
148            raise ValueError('The description must be at most 256 characters.')
149        if units and len(units) > 32:
150            raise ValueError('The units must be at most 32 characters.')
151
152        # If |replacement| is legal replace illegal characters with it.
153        string_regex = re.compile(r'[^-\.\w]')
154        if replacement is None or re.search(string_regex, replacement):
155            raise ValueError('Invalid replacement string to mask illegal '
156                             'characters. May only contain letters, numbers, '
157                             'periods, dashes, and underscores. '
158                             'replacement: %s' % replacement)
159        description = re.sub(string_regex, replacement, description)
160        units = re.sub(string_regex, replacement, units) if units else None
161
162        charts = {}
163        if not resultsdir:
164            resultsdir = self.resultsdir
165        if not os.path.exists(resultsdir):
166            os.makedirs(resultsdir)
167        output_file = os.path.join(resultsdir, 'results-chart.json')
168        if os.path.isfile(output_file):
169            with open(output_file, 'r') as fp:
170                contents = fp.read()
171                if contents:
172                     charts = json.loads(contents)
173
174        if graph:
175            first_level = graph
176            second_level = description
177        else:
178            first_level = description
179            second_level = 'summary'
180
181        direction = 'up' if higher_is_better else 'down'
182
183        # All input should be a number - but at times there are strings
184        # representing numbers logged, attempt to convert them to numbers.
185        # If a non number string is logged an exception will be thrown.
186        if isinstance(value, list):
187          value = map(float, value)
188        else:
189          value = float(value)
190
191        result_type = 'scalar'
192        value_key = 'value'
193        result_value = value
194
195        # The chart json spec go/telemetry-json differenciates between a single
196        # value vs a list of values.  Lists of values get extra processing in
197        # the chromeperf dashboard ( mean, standard deviation etc)
198        # Tests can log one or more values for the same metric, to adhere stricly
199        # to the specification the first value logged is a scalar but if another
200        # value is logged the results become a list of scalar.
201        # TODO Figure out if there would be any difference of always using list
202        # of scalar even if there is just one item in the list.
203        if isinstance(value, list):
204            result_type = 'list_of_scalar_values'
205            value_key = 'values'
206            if first_level in charts and second_level in charts[first_level]:
207                if 'values' in charts[first_level][second_level]:
208                    result_value = charts[first_level][second_level]['values']
209                elif 'value' in charts[first_level][second_level]:
210                    result_value = [charts[first_level][second_level]['value']]
211                if replace_existing_values:
212                    result_value = value
213                else:
214                    result_value.extend(value)
215            else:
216                result_value = value
217        elif (first_level in charts and second_level in charts[first_level] and
218              not replace_existing_values):
219            result_type = 'list_of_scalar_values'
220            value_key = 'values'
221            if 'values' in charts[first_level][second_level]:
222                result_value = charts[first_level][second_level]['values']
223                result_value.append(value)
224            else:
225                result_value = [charts[first_level][second_level]['value'], value]
226
227        test_data = {
228            second_level: {
229                 'type': result_type,
230                 'units': units,
231                 value_key: result_value,
232                 'improvement_direction': direction
233           }
234        }
235
236        if first_level in charts:
237            charts[first_level].update(test_data)
238        else:
239            charts.update({first_level: test_data})
240
241        with open(output_file, 'w') as fp:
242            fp.write(json.dumps(charts, indent=2))
243
244
245    def write_perf_keyval(self, perf_dict):
246        self.write_iteration_keyval({}, perf_dict)
247
248
249    def write_attr_keyval(self, attr_dict):
250        self.write_iteration_keyval(attr_dict, {})
251
252
253    def write_iteration_keyval(self, attr_dict, perf_dict):
254        # append the dictionaries before they have the {perf} and {attr} added
255        self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
256        self._new_keyval = True
257
258        if attr_dict:
259            attr_dict = self._append_type_to_keys(attr_dict, "attr")
260            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
261
262        if perf_dict:
263            perf_dict = self._append_type_to_keys(perf_dict, "perf")
264            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
265
266        keyval_path = os.path.join(self.resultsdir, "keyval")
267        print >> open(keyval_path, "a"), ""
268
269
270    def analyze_perf_constraints(self, constraints):
271        if not self._new_keyval:
272            return
273
274        # create a dict from the keyvals suitable as an environment for eval
275        keyval_env = self._keyvals[-1]['perf'].copy()
276        keyval_env['__builtins__'] = None
277        self._new_keyval = False
278        failures = []
279
280        # evaluate each constraint using the current keyvals
281        for constraint in constraints:
282            logging.info('___________________ constraint = %s', constraint)
283            logging.info('___________________ keyvals = %s', keyval_env)
284
285            try:
286                if not eval(constraint, keyval_env):
287                    failures.append('%s: constraint was not met' % constraint)
288            except:
289                failures.append('could not evaluate constraint: %s'
290                                % constraint)
291
292        # keep track of the errors for each iteration
293        self.failed_constraints.append(failures)
294
295
296    def process_failed_constraints(self):
297        msg = ''
298        for i, failures in enumerate(self.failed_constraints):
299            if failures:
300                msg += 'iteration %d:%s  ' % (i, ','.join(failures))
301
302        if msg:
303            raise error.TestFail(msg)
304
305
306    def register_before_iteration_hook(self, iteration_hook):
307        """
308        This is how we expect test writers to register a before_iteration_hook.
309        This adds the method to the list of hooks which are executed
310        before each iteration.
311
312        @param iteration_hook: Method to run before each iteration. A valid
313                               hook accepts a single argument which is the
314                               test object.
315        """
316        self.before_iteration_hooks.append(iteration_hook)
317
318
319    def register_after_iteration_hook(self, iteration_hook):
320        """
321        This is how we expect test writers to register an after_iteration_hook.
322        This adds the method to the list of hooks which are executed
323        after each iteration. Hooks are executed starting with the most-
324        recently registered, in stack fashion.
325
326        @param iteration_hook: Method to run after each iteration. A valid
327                               hook accepts a single argument which is the
328                               test object.
329        """
330        self.after_iteration_hooks.append(iteration_hook)
331
332
333    def initialize(self):
334        pass
335
336
337    def setup(self):
338        pass
339
340
341    def warmup(self, *args, **dargs):
342        pass
343
344
345    def drop_caches_between_iterations(self):
346        if self.job.drop_caches_between_iterations:
347            utils.drop_caches()
348
349
350    def _call_run_once(self, constraints, profile_only,
351                       postprocess_profiled_run, args, dargs):
352        self.drop_caches_between_iterations()
353        # execute iteration hooks
354        if not self.job.fast:
355            logging.debug('Starting before_iteration_hooks for %s',
356                          self.tagged_testname)
357            with metrics.SecondsTimer(
358                    'chromeos/autotest/job/before_iteration_hook_duration'):
359                for hook in self.before_iteration_hooks:
360                    hook(self)
361            logging.debug('before_iteration_hooks completed')
362
363        finished = False
364        try:
365            if profile_only:
366                if not self.job.profilers.present():
367                    self.job.record('WARN', None, None,
368                                    'No profilers have been added but '
369                                    'profile_only is set - nothing '
370                                    'will be run')
371                self.run_once_profiling(postprocess_profiled_run,
372                                        *args, **dargs)
373            else:
374                self.before_run_once()
375                logging.debug('starting test(run_once()), test details follow'
376                              '\nargs: %r\nkwargs: %r', args, dargs)
377                self.run_once(*args, **dargs)
378                logging.debug('The test has completed successfully')
379                self.after_run_once()
380
381            self.postprocess_iteration()
382            self.analyze_perf_constraints(constraints)
383            finished = True
384        # Catch and re-raise to let after_iteration_hooks see the exception.
385        except Exception as e:
386            logging.debug('Test failed due to %s. Exception log follows the '
387                          'after_iteration_hooks.', str(e))
388            raise
389        finally:
390            if not finished or not self.job.fast:
391                logging.debug('Starting after_iteration_hooks for %s',
392                              self.tagged_testname)
393                with metrics.SecondsTimer(
394                        'chromeos/autotest/job/after_iteration_hook_duration'):
395                    for hook in reversed(self.after_iteration_hooks):
396                        hook(self)
397                logging.debug('after_iteration_hooks completed')
398
399
400    def execute(self, iterations=None, test_length=None, profile_only=None,
401                _get_time=time.time, postprocess_profiled_run=None,
402                constraints=(), *args, **dargs):
403        """
404        This is the basic execute method for the tests inherited from base_test.
405        If you want to implement a benchmark test, it's better to implement
406        the run_once function, to cope with the profiling infrastructure. For
407        other tests, you can just override the default implementation.
408
409        @param test_length: The minimum test length in seconds. We'll run the
410            run_once function for a number of times large enough to cover the
411            minimum test length.
412
413        @param iterations: A number of iterations that we'll run the run_once
414            function. This parameter is incompatible with test_length and will
415            be silently ignored if you specify both.
416
417        @param profile_only: If true run X iterations with profilers enabled.
418            If false run X iterations and one with profiling if profiles are
419            enabled. If None, default to the value of job.default_profile_only.
420
421        @param _get_time: [time.time] Used for unit test time injection.
422
423        @param postprocess_profiled_run: Run the postprocessing for the
424            profiled run.
425        """
426
427        # For our special class of tests, the benchmarks, we don't want
428        # profilers to run during the test iterations. Let's reserve only
429        # the last iteration for profiling, if needed. So let's stop
430        # all profilers if they are present and active.
431        profilers = self.job.profilers
432        if profilers.active():
433            profilers.stop(self)
434        if profile_only is None:
435            profile_only = self.job.default_profile_only
436        # If the user called this test in an odd way (specified both iterations
437        # and test_length), let's warn them.
438        if iterations and test_length:
439            logging.debug('Iterations parameter ignored (timed execution)')
440        if test_length:
441            test_start = _get_time()
442            time_elapsed = 0
443            timed_counter = 0
444            logging.debug('Test started. Specified %d s as the minimum test '
445                          'length', test_length)
446            while time_elapsed < test_length:
447                timed_counter = timed_counter + 1
448                if time_elapsed == 0:
449                    logging.debug('Executing iteration %d', timed_counter)
450                elif time_elapsed > 0:
451                    logging.debug('Executing iteration %d, time_elapsed %d s',
452                                  timed_counter, time_elapsed)
453                self._call_run_once(constraints, profile_only,
454                                    postprocess_profiled_run, args, dargs)
455                test_iteration_finish = _get_time()
456                time_elapsed = test_iteration_finish - test_start
457            logging.debug('Test finished after %d iterations, '
458                          'time elapsed: %d s', timed_counter, time_elapsed)
459        else:
460            if iterations is None:
461                iterations = 1
462            if iterations > 1:
463                logging.debug('Test started. Specified %d iterations',
464                              iterations)
465            for self.iteration in xrange(1, iterations + 1):
466                if iterations > 1:
467                    logging.debug('Executing iteration %d of %d',
468                                  self.iteration, iterations)
469                self._call_run_once(constraints, profile_only,
470                                    postprocess_profiled_run, args, dargs)
471
472        if not profile_only:
473            self.iteration += 1
474            self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
475
476        # Do any postprocessing, normally extracting performance keyvals, etc
477        self.postprocess()
478        self.process_failed_constraints()
479
480
481    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
482        profilers = self.job.profilers
483        # Do a profiling run if necessary
484        if profilers.present():
485            self.drop_caches_between_iterations()
486            profilers.before_start(self)
487
488            self.before_run_once()
489            profilers.start(self)
490            logging.debug('Profilers present. Profiling run started')
491
492            try:
493                self.run_once(*args, **dargs)
494
495                # Priority to the run_once() argument over the attribute.
496                postprocess_attribute = getattr(self,
497                                                'postprocess_profiled_run',
498                                                False)
499
500                if (postprocess_profiled_run or
501                    (postprocess_profiled_run is None and
502                     postprocess_attribute)):
503                    self.postprocess_iteration()
504
505            finally:
506                profilers.stop(self)
507                profilers.report(self)
508
509            self.after_run_once()
510
511
512    def postprocess(self):
513        pass
514
515
516    def postprocess_iteration(self):
517        pass
518
519
520    def cleanup(self):
521        pass
522
523
524    def before_run_once(self):
525        """
526        Override in tests that need it, will be called before any run_once()
527        call including the profiling run (when it's called before starting
528        the profilers).
529        """
530        pass
531
532
533    def after_run_once(self):
534        """
535        Called after every run_once (including from a profiled run when it's
536        called after stopping the profilers).
537        """
538        pass
539
540
541    @staticmethod
542    def _make_writable_to_others(directory):
543        mode = os.stat(directory).st_mode
544        mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
545        os.chmod(directory, mode)
546
547
548    def _exec(self, args, dargs):
549        self.job.logging.tee_redirect_debug_dir(self.debugdir,
550                                                log_name=self.tagged_testname)
551        try:
552            # write out the test attributes into a keyval
553            dargs   = dargs.copy()
554            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
555            keyvals = dargs.pop('test_attributes', {}).copy()
556            keyvals['version'] = self.version
557            for i, arg in enumerate(args):
558                keyvals['param-%d' % i] = repr(arg)
559            for name, arg in dargs.iteritems():
560                keyvals['param-%s' % name] = repr(arg)
561            self.write_test_keyval(keyvals)
562
563            _validate_args(args, dargs, self.initialize, self.setup,
564                           self.execute, self.cleanup)
565
566            try:
567                # Make resultsdir and tmpdir accessible to everyone. We may
568                # output data to these directories as others, e.g., chronos.
569                self._make_writable_to_others(self.tmpdir)
570                self._make_writable_to_others(self.resultsdir)
571
572                # Initialize:
573                _cherry_pick_call(self.initialize, *args, **dargs)
574
575                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
576                try:
577                    fcntl.flock(lockfile, fcntl.LOCK_EX)
578                    # Setup: (compile and install the test, if needed)
579                    p_args, p_dargs = _cherry_pick_args(self.setup, args, dargs)
580                    utils.update_version(self.srcdir, self.preserve_srcdir,
581                                         self.version, self.setup,
582                                         *p_args, **p_dargs)
583                finally:
584                    fcntl.flock(lockfile, fcntl.LOCK_UN)
585                    lockfile.close()
586
587                # Execute:
588                os.chdir(self.outputdir)
589
590                # call self.warmup cherry picking the arguments it accepts and
591                # translate exceptions if needed
592                _call_test_function(_cherry_pick_call, self.warmup,
593                                    *args, **dargs)
594
595                if hasattr(self, 'run_once'):
596                    p_args, p_dargs = _cherry_pick_args(self.run_once,
597                                                        args, dargs)
598                    # pull in any non-* and non-** args from self.execute
599                    for param in _get_nonstar_args(self.execute):
600                        if param in dargs:
601                            p_dargs[param] = dargs[param]
602                else:
603                    p_args, p_dargs = _cherry_pick_args(self.execute,
604                                                        args, dargs)
605
606                _call_test_function(self.execute, *p_args, **p_dargs)
607            except Exception:
608                # Save the exception while we run our cleanup() before
609                # reraising it, but log it to so actual time of error is known.
610                exc_info = sys.exc_info()
611                logging.warning('The test failed with the following exception',
612                                exc_info=True)
613
614                try:
615                    try:
616                        if run_cleanup:
617                            logging.debug('Running cleanup for test.')
618                            _cherry_pick_call(self.cleanup, *args, **dargs)
619                    except Exception:
620                        logging.error('Ignoring exception during cleanup() '
621                                      'phase:')
622                        traceback.print_exc()
623                        logging.error('Now raising the earlier %s error',
624                                      exc_info[0])
625                    self.crash_handler_report()
626                finally:
627                    # Raise exception after running cleanup, reporting crash,
628                    # and restoring job's logging, even if the first two
629                    # actions fail.
630                    self.job.logging.restore()
631                    try:
632                        raise exc_info[0], exc_info[1], exc_info[2]
633                    finally:
634                        # http://docs.python.org/library/sys.html#sys.exc_info
635                        # Be nice and prevent a circular reference.
636                        del exc_info
637            else:
638                try:
639                    if run_cleanup:
640                        _cherry_pick_call(self.cleanup, *args, **dargs)
641                    self.crash_handler_report()
642                finally:
643                    self.job.logging.restore()
644        except error.AutotestError:
645            # Pass already-categorized errors on up.
646            raise
647        except Exception, e:
648            # Anything else is an ERROR in our own code, not execute().
649            raise error.UnhandledTestError(e)
650
651    def runsubtest(self, url, *args, **dargs):
652        """
653        Execute another autotest test from inside the current test's scope.
654
655        @param test: Parent test.
656        @param url: Url of new test.
657        @param tag: Tag added to test name.
658        @param args: Args for subtest.
659        @param dargs: Dictionary with args for subtest.
660        @iterations: Number of subtest iterations.
661        @profile_only: If true execute one profiled run.
662        """
663        dargs["profile_only"] = dargs.get("profile_only", False)
664        test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
665        return self.job.run_test(url, master_testpath=test_basepath,
666                                 *args, **dargs)
667
668
669def _get_nonstar_args(func):
670    """Extract all the (normal) function parameter names.
671
672    Given a function, returns a tuple of parameter names, specifically
673    excluding the * and ** parameters, if the function accepts them.
674
675    @param func: A callable that we want to chose arguments for.
676
677    @return: A tuple of parameters accepted by the function.
678    """
679    return func.func_code.co_varnames[:func.func_code.co_argcount]
680
681
682def _cherry_pick_args(func, args, dargs):
683    """Sanitize positional and keyword arguments before calling a function.
684
685    Given a callable (func), an argument tuple and a dictionary of keyword
686    arguments, pick only those arguments which the function is prepared to
687    accept and return a new argument tuple and keyword argument dictionary.
688
689    Args:
690      func: A callable that we want to choose arguments for.
691      args: A tuple of positional arguments to consider passing to func.
692      dargs: A dictionary of keyword arguments to consider passing to func.
693    Returns:
694      A tuple of: (args tuple, keyword arguments dictionary)
695    """
696    # Cherry pick args:
697    if func.func_code.co_flags & 0x04:
698        # func accepts *args, so return the entire args.
699        p_args = args
700    else:
701        p_args = ()
702
703    # Cherry pick dargs:
704    if func.func_code.co_flags & 0x08:
705        # func accepts **dargs, so return the entire dargs.
706        p_dargs = dargs
707    else:
708        # Only return the keyword arguments that func accepts.
709        p_dargs = {}
710        for param in _get_nonstar_args(func):
711            if param in dargs:
712                p_dargs[param] = dargs[param]
713
714    return p_args, p_dargs
715
716
717def _cherry_pick_call(func, *args, **dargs):
718    """Cherry picks arguments from args/dargs based on what "func" accepts
719    and calls the function with the picked arguments."""
720    p_args, p_dargs = _cherry_pick_args(func, args, dargs)
721    return func(*p_args, **p_dargs)
722
723
724def _validate_args(args, dargs, *funcs):
725    """Verify that arguments are appropriate for at least one callable.
726
727    Given a list of callables as additional parameters, verify that
728    the proposed keyword arguments in dargs will each be accepted by at least
729    one of the callables.
730
731    NOTE: args is currently not supported and must be empty.
732
733    Args:
734      args: A tuple of proposed positional arguments.
735      dargs: A dictionary of proposed keyword arguments.
736      *funcs: Callables to be searched for acceptance of args and dargs.
737    Raises:
738      error.AutotestError: if an arg won't be accepted by any of *funcs.
739    """
740    all_co_flags = 0
741    all_varnames = ()
742    for func in funcs:
743        all_co_flags |= func.func_code.co_flags
744        all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
745
746    # Check if given args belongs to at least one of the methods below.
747    if len(args) > 0:
748        # Current implementation doesn't allow the use of args.
749        raise error.TestError('Unnamed arguments not accepted. Please '
750                              'call job.run_test with named args only')
751
752    # Check if given dargs belongs to at least one of the methods below.
753    if len(dargs) > 0:
754        if not all_co_flags & 0x08:
755            # no func accepts *dargs, so:
756            for param in dargs:
757                if not param in all_varnames:
758                    raise error.AutotestError('Unknown parameter: %s' % param)
759
760
761def _installtest(job, url):
762    (group, name) = job.pkgmgr.get_package_name(url, 'test')
763
764    # Bail if the test is already installed
765    group_dir = os.path.join(job.testdir, "download", group)
766    if os.path.exists(os.path.join(group_dir, name)):
767        return (group, name)
768
769    # If the group directory is missing create it and add
770    # an empty  __init__.py so that sub-directories are
771    # considered for import.
772    if not os.path.exists(group_dir):
773        os.makedirs(group_dir)
774        f = file(os.path.join(group_dir, '__init__.py'), 'w+')
775        f.close()
776
777    logging.debug("%s: installing test url=%s", name, url)
778    tarball = os.path.basename(url)
779    tarball_path = os.path.join(group_dir, tarball)
780    test_dir = os.path.join(group_dir, name)
781    job.pkgmgr.fetch_pkg(tarball, tarball_path,
782                         repo_url = os.path.dirname(url))
783
784    # Create the directory for the test
785    if not os.path.exists(test_dir):
786        os.mkdir(os.path.join(group_dir, name))
787
788    job.pkgmgr.untar_pkg(tarball_path, test_dir)
789
790    os.remove(tarball_path)
791
792    # For this 'sub-object' to be importable via the name
793    # 'group.name' we need to provide an __init__.py,
794    # so link the main entry point to this.
795    os.symlink(name + '.py', os.path.join(group_dir, name,
796                            '__init__.py'))
797
798    # The test is now installed.
799    return (group, name)
800
801
802def _call_test_function(func, *args, **dargs):
803    """Calls a test function and translates exceptions so that errors
804    inside test code are considered test failures."""
805    try:
806        return func(*args, **dargs)
807    except error.AutotestError:
808        raise
809    except Exception, e:
810        # Other exceptions must be treated as a FAIL when
811        # raised during the test functions
812        raise error.UnhandledTestFail(e)
813
814
815def runtest(job, url, tag, args, dargs,
816            local_namespace={}, global_namespace={},
817            before_test_hook=None, after_test_hook=None,
818            before_iteration_hook=None, after_iteration_hook=None):
819    local_namespace = local_namespace.copy()
820    global_namespace = global_namespace.copy()
821    # if this is not a plain test name then download and install the
822    # specified test
823    if url.endswith('.tar.bz2'):
824        (testgroup, testname) = _installtest(job, url)
825        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
826        importdir = os.path.join(job.testdir, 'download')
827        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
828        classname = '%s.%s' % (modulename, testname)
829        path = testname
830    else:
831        # If the test is local, it may be under either testdir or site_testdir.
832        # Tests in site_testdir override tests defined in testdir
833        testname = path = url
834        testgroup = ''
835        path = re.sub(':', '/', testname)
836        modulename = os.path.basename(path)
837        classname = '%s.%s' % (modulename, modulename)
838
839        # Try installing the test package
840        # The job object may be either a server side job or a client side job.
841        # 'install_pkg' method will be present only if it's a client side job.
842        if hasattr(job, 'install_pkg'):
843            try:
844                bindir = os.path.join(job.testdir, testname)
845                job.install_pkg(testname, 'test', bindir)
846            except error.PackageInstallError:
847                # continue as a fall back mechanism and see if the test code
848                # already exists on the machine
849                pass
850
851        bindir = None
852        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
853            if dir is not None and os.path.exists(os.path.join(dir, path)):
854                importdir = bindir = os.path.join(dir, path)
855        if not bindir:
856            raise error.TestError(testname + ': test does not exist')
857
858    subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
859    outputdir = os.path.join(job.resultdir, subdir)
860    if tag:
861        outputdir += '.' + tag
862
863    local_namespace['job'] = job
864    local_namespace['bindir'] = bindir
865    local_namespace['outputdir'] = outputdir
866
867    sys.path.insert(0, importdir)
868    try:
869        exec ('import %s' % modulename, local_namespace, global_namespace)
870        exec ("mytest = %s(job, bindir, outputdir)" % classname,
871              local_namespace, global_namespace)
872    finally:
873        sys.path.pop(0)
874
875    pwd = os.getcwd()
876    os.chdir(outputdir)
877
878    try:
879        mytest = global_namespace['mytest']
880        mytest.success = False
881        if not job.fast and before_test_hook:
882            logging.info('Starting before_hook for %s', mytest.tagged_testname)
883            with metrics.SecondsTimer(
884                    'chromeos/autotest/job/before_hook_duration'):
885                before_test_hook(mytest)
886            logging.info('before_hook completed')
887
888        # we use the register iteration hooks methods to register the passed
889        # in hooks
890        if before_iteration_hook:
891            mytest.register_before_iteration_hook(before_iteration_hook)
892        if after_iteration_hook:
893            mytest.register_after_iteration_hook(after_iteration_hook)
894        mytest._exec(args, dargs)
895        mytest.success = True
896    finally:
897        os.chdir(pwd)
898        if after_test_hook and (not mytest.success or not job.fast):
899            logging.info('Starting after_hook for %s', mytest.tagged_testname)
900            with metrics.SecondsTimer(
901                    'chromeos/autotest/job/after_hook_duration'):
902                after_test_hook(mytest)
903            logging.info('after_hook completed')
904
905        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
906