• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Shell class for a test, inherited by all individual tests
2#
3# Methods:
4#       __init__        initialise
5#       initialize      run once for each job
6#       setup           run once for each new version of the test installed
7#       run             run the test (wrapped by job.run_test())
8#
9# Data:
10#       job             backreference to the job this test instance is part of
11#       outputdir       eg. results/<job>/<testname.tag>
12#       resultsdir      eg. results/<job>/<testname.tag>/results
13#       profdir         eg. results/<job>/<testname.tag>/profiling
14#       debugdir        eg. results/<job>/<testname.tag>/debug
15#       bindir          eg. tests/<test>
16#       src             eg. tests/<test>/src
17#       tmpdir          eg. tmp/<tempname>_<testname.tag>
18
19#pylint: disable=C0111
20
21import fcntl, json, os, re, sys, shutil, stat, tempfile, time, traceback
22import logging
23
24from autotest_lib.client.bin import utils
25from autotest_lib.client.common_lib import error
26
27
28class base_test(object):
29    preserve_srcdir = False
30    network_destabilizing = False
31
32    def __init__(self, job, bindir, outputdir):
33        self.job = job
34        self.pkgmgr = job.pkgmgr
35        self.autodir = job.autodir
36        self.outputdir = outputdir
37        self.tagged_testname = os.path.basename(self.outputdir)
38        self.resultsdir = os.path.join(self.outputdir, 'results')
39        os.mkdir(self.resultsdir)
40        self.profdir = os.path.join(self.outputdir, 'profiling')
41        os.mkdir(self.profdir)
42        self.debugdir = os.path.join(self.outputdir, 'debug')
43        os.mkdir(self.debugdir)
44        # TODO(ericli): figure out how autotest crash handler work with cros
45        # Once this is re-enabled import getpass. crosbug.com/31232
46        # crash handler, we should restore it in near term.
47        # if getpass.getuser() == 'root':
48        #     self.configure_crash_handler()
49        # else:
50        self.crash_handling_enabled = False
51        self.bindir = bindir
52        self.srcdir = os.path.join(self.bindir, 'src')
53        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
54                                       dir=job.tmpdir)
55        self._keyvals = []
56        self._new_keyval = False
57        self.failed_constraints = []
58        self.iteration = 0
59        self.before_iteration_hooks = []
60        self.after_iteration_hooks = []
61
62        # Flag to indicate if the test has succeeded or failed.
63        self.success = False
64
65
66    def configure_crash_handler(self):
67        pass
68
69
70    def crash_handler_report(self):
71        pass
72
73
74    def assert_(self, expr, msg='Assertion failed.'):
75        if not expr:
76            raise error.TestError(msg)
77
78
79    def write_test_keyval(self, attr_dict):
80        utils.write_keyval(self.outputdir, attr_dict)
81
82
83    @staticmethod
84    def _append_type_to_keys(dictionary, typename):
85        new_dict = {}
86        for key, value in dictionary.iteritems():
87            new_key = "%s{%s}" % (key, typename)
88            new_dict[new_key] = value
89        return new_dict
90
91
92    def output_perf_value(self, description, value, units=None,
93                          higher_is_better=None, graph=None,
94                          replacement='_', replace_existing_values=False):
95        """
96        Records a measured performance value in an output file.
97
98        The output file will subsequently be parsed by the TKO parser to have
99        the information inserted into the results database.
100
101        @param description: A string describing the measured perf value. Must
102                be maximum length 256, and may only contain letters, numbers,
103                periods, dashes, and underscores.  For example:
104                "page_load_time", "scrolling-frame-rate".
105        @param value: A number representing the measured perf value, or a list
106                of measured values if a test takes multiple measurements.
107                Measured perf values can be either ints or floats.
108        @param units: A string describing the units associated with the
109                measured perf value. Must be maximum length 32, and may only
110                contain letters, numbers, periods, dashes, and underscores.
111                For example: "msec", "fps", "score", "runs_per_second".
112        @param higher_is_better: A boolean indicating whether or not a "higher"
113                measured perf value is considered to be better. If False, it is
114                assumed that a "lower" measured value is considered to be
115                better. This impacts dashboard plotting and email notification.
116                Pure autotests are expected to specify either True or False!
117                This value can be set to "None" to indicate that the perf
118                dashboard should apply the rules encoded via Chromium
119                unit-info.json. This is only used for tracking Chromium based
120                tests (in particular telemetry).
121        @param graph: A string indicating the name of the graph on which
122                the perf value will be subsequently displayed on the chrome perf
123                dashboard. This allows multiple metrics be grouped together on
124                the same graphs. Defaults to None, indicating that the perf
125                value should be displayed individually on a separate graph.
126        @param replacement: string to replace illegal characters in
127                |description| and |units| with.
128        @param replace_existing_values: A boolean indicating whether or not a
129                new added perf value should replace existing perf.
130        """
131        if len(description) > 256:
132            raise ValueError('The description must be at most 256 characters.')
133        if units and len(units) > 32:
134            raise ValueError('The units must be at most 32 characters.')
135
136        # If |replacement| is legal replace illegal characters with it.
137        string_regex = re.compile(r'[^-\.\w]')
138        if replacement is None or re.search(string_regex, replacement):
139            raise ValueError('Invalid replacement string to mask illegal '
140                             'characters. May only contain letters, numbers, '
141                             'periods, dashes, and underscores. '
142                             'replacement: %s' % replacement)
143        description = re.sub(string_regex, replacement, description)
144        units = re.sub(string_regex, replacement, units) if units else None
145
146        charts = {}
147        output_file = os.path.join(self.resultsdir, 'results-chart.json')
148        if os.path.isfile(output_file):
149            with open(output_file, 'r') as fp:
150                contents = fp.read()
151                if contents:
152                     charts = json.loads(contents)
153
154        if graph:
155            first_level = graph
156            second_level = description
157        else:
158            first_level = description
159            second_level = 'summary'
160
161        direction = 'up' if higher_is_better else 'down'
162
163        # All input should be a number - but at times there are strings
164        # representing numbers logged, attempt to convert them to numbers.
165        # If a non number string is logged an exception will be thrown.
166        if isinstance(value, list):
167          value = map(float, value)
168        else:
169          value = float(value)
170
171        result_type = 'scalar'
172        value_key = 'value'
173        result_value = value
174
175        # The chart json spec go/telemetry-json differenciates between a single
176        # value vs a list of values.  Lists of values get extra processing in
177        # the chromeperf dashboard ( mean, standard deviation etc)
178        # Tests can log one or more values for the same metric, to adhere stricly
179        # to the specification the first value logged is a scalar but if another
180        # value is logged the results become a list of scalar.
181        # TODO Figure out if there would be any difference of always using list
182        # of scalar even if there is just one item in the list.
183        if isinstance(value, list):
184            result_type = 'list_of_scalar_values'
185            value_key = 'values'
186            if first_level in charts and second_level in charts[first_level]:
187                if 'values' in charts[first_level][second_level]:
188                    result_value = charts[first_level][second_level]['values']
189                elif 'value' in charts[first_level][second_level]:
190                    result_value = [charts[first_level][second_level]['value']]
191                if replace_existing_values:
192                    result_value = value
193                else:
194                    result_value.extend(value)
195            else:
196                result_value = value
197        elif (first_level in charts and second_level in charts[first_level] and
198              not replace_existing_values):
199            result_type = 'list_of_scalar_values'
200            value_key = 'values'
201            if 'values' in charts[first_level][second_level]:
202                result_value = charts[first_level][second_level]['values']
203                result_value.append(value)
204            else:
205                result_value = [charts[first_level][second_level]['value'], value]
206
207        test_data = {
208            second_level: {
209                 'type': result_type,
210                 'units': units,
211                 value_key: result_value,
212                 'improvement_direction': direction
213           }
214        }
215
216        if first_level in charts:
217            charts[first_level].update(test_data)
218        else:
219            charts.update({first_level: test_data})
220
221        with open(output_file, 'w') as fp:
222            fp.write(json.dumps(charts, indent=2))
223
224
225    def write_perf_keyval(self, perf_dict):
226        self.write_iteration_keyval({}, perf_dict)
227
228
229    def write_attr_keyval(self, attr_dict):
230        self.write_iteration_keyval(attr_dict, {})
231
232
233    def write_iteration_keyval(self, attr_dict, perf_dict):
234        # append the dictionaries before they have the {perf} and {attr} added
235        self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
236        self._new_keyval = True
237
238        if attr_dict:
239            attr_dict = self._append_type_to_keys(attr_dict, "attr")
240            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
241
242        if perf_dict:
243            perf_dict = self._append_type_to_keys(perf_dict, "perf")
244            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
245
246        keyval_path = os.path.join(self.resultsdir, "keyval")
247        print >> open(keyval_path, "a"), ""
248
249
250    def analyze_perf_constraints(self, constraints):
251        if not self._new_keyval:
252            return
253
254        # create a dict from the keyvals suitable as an environment for eval
255        keyval_env = self._keyvals[-1]['perf'].copy()
256        keyval_env['__builtins__'] = None
257        self._new_keyval = False
258        failures = []
259
260        # evaluate each constraint using the current keyvals
261        for constraint in constraints:
262            logging.info('___________________ constraint = %s', constraint)
263            logging.info('___________________ keyvals = %s', keyval_env)
264
265            try:
266                if not eval(constraint, keyval_env):
267                    failures.append('%s: constraint was not met' % constraint)
268            except:
269                failures.append('could not evaluate constraint: %s'
270                                % constraint)
271
272        # keep track of the errors for each iteration
273        self.failed_constraints.append(failures)
274
275
276    def process_failed_constraints(self):
277        msg = ''
278        for i, failures in enumerate(self.failed_constraints):
279            if failures:
280                msg += 'iteration %d:%s  ' % (i, ','.join(failures))
281
282        if msg:
283            raise error.TestFail(msg)
284
285
286    def register_before_iteration_hook(self, iteration_hook):
287        """
288        This is how we expect test writers to register a before_iteration_hook.
289        This adds the method to the list of hooks which are executed
290        before each iteration.
291
292        @param iteration_hook: Method to run before each iteration. A valid
293                               hook accepts a single argument which is the
294                               test object.
295        """
296        self.before_iteration_hooks.append(iteration_hook)
297
298
299    def register_after_iteration_hook(self, iteration_hook):
300        """
301        This is how we expect test writers to register an after_iteration_hook.
302        This adds the method to the list of hooks which are executed
303        after each iteration. Hooks are executed starting with the most-
304        recently registered, in stack fashion.
305
306        @param iteration_hook: Method to run after each iteration. A valid
307                               hook accepts a single argument which is the
308                               test object.
309        """
310        self.after_iteration_hooks.append(iteration_hook)
311
312
313    def initialize(self):
314        pass
315
316
317    def setup(self):
318        pass
319
320
321    def warmup(self, *args, **dargs):
322        pass
323
324
325    def drop_caches_between_iterations(self):
326        if self.job.drop_caches_between_iterations:
327            utils.drop_caches()
328
329
330    def _call_run_once_with_retry(self, constraints, profile_only,
331                                  postprocess_profiled_run, args, dargs):
332        """Thin wrapper around _call_run_once that retries unsuccessful tests.
333
334        If the job object's attribute test_retry is > 0 retry any tests that
335        ran unsuccessfully X times.
336        *Note this does not competely re-initialize the test, it only
337            re-executes code once all the initial job set up (packages,
338            sysinfo, etc) is complete.
339        """
340        if self.job.test_retry != 0:
341            logging.info('Test will be retried a maximum of %d times',
342                         self.job.test_retry)
343
344        max_runs = self.job.test_retry
345        for retry_run in xrange(0, max_runs+1):
346            try:
347                self._call_run_once(constraints, profile_only,
348                                    postprocess_profiled_run, args, dargs)
349                break
350            except error.TestFailRetry as err:
351                if retry_run == max_runs:
352                    raise
353                self.job.record('INFO', None, None, 'Run %s failed with %s' % (
354                        retry_run, err))
355        if retry_run > 0:
356            self.write_test_keyval({'test_retries_before_success': retry_run})
357
358
359    def _call_run_once(self, constraints, profile_only,
360                       postprocess_profiled_run, args, dargs):
361        self.drop_caches_between_iterations()
362        # execute iteration hooks
363        logging.debug('starting before_iteration_hooks')
364        for hook in self.before_iteration_hooks:
365            hook(self)
366        logging.debug('before_iteration_hooks completed')
367
368        try:
369            if profile_only:
370                if not self.job.profilers.present():
371                    self.job.record('WARN', None, None,
372                                    'No profilers have been added but '
373                                    'profile_only is set - nothing '
374                                    'will be run')
375                self.run_once_profiling(postprocess_profiled_run,
376                                        *args, **dargs)
377            else:
378                self.before_run_once()
379                logging.debug('starting test(run_once()), test details follow'
380                              '\n%r', args)
381                self.run_once(*args, **dargs)
382                logging.debug('The test has completed successfully')
383                self.after_run_once()
384
385            self.postprocess_iteration()
386            self.analyze_perf_constraints(constraints)
387        # Catch and re-raise to let after_iteration_hooks see the exception.
388        except Exception as e:
389            logging.debug('Test failed due to %s. Exception log follows the '
390                          'after_iteration_hooks.', str(e))
391            raise
392        finally:
393            logging.debug('starting after_iteration_hooks')
394            for hook in reversed(self.after_iteration_hooks):
395                hook(self)
396            logging.debug('after_iteration_hooks completed')
397
398
399    def execute(self, iterations=None, test_length=None, profile_only=None,
400                _get_time=time.time, postprocess_profiled_run=None,
401                constraints=(), *args, **dargs):
402        """
403        This is the basic execute method for the tests inherited from base_test.
404        If you want to implement a benchmark test, it's better to implement
405        the run_once function, to cope with the profiling infrastructure. For
406        other tests, you can just override the default implementation.
407
408        @param test_length: The minimum test length in seconds. We'll run the
409            run_once function for a number of times large enough to cover the
410            minimum test length.
411
412        @param iterations: A number of iterations that we'll run the run_once
413            function. This parameter is incompatible with test_length and will
414            be silently ignored if you specify both.
415
416        @param profile_only: If true run X iterations with profilers enabled.
417            If false run X iterations and one with profiling if profiles are
418            enabled. If None, default to the value of job.default_profile_only.
419
420        @param _get_time: [time.time] Used for unit test time injection.
421
422        @param postprocess_profiled_run: Run the postprocessing for the
423            profiled run.
424        """
425
426        # For our special class of tests, the benchmarks, we don't want
427        # profilers to run during the test iterations. Let's reserve only
428        # the last iteration for profiling, if needed. So let's stop
429        # all profilers if they are present and active.
430        profilers = self.job.profilers
431        if profilers.active():
432            profilers.stop(self)
433        if profile_only is None:
434            profile_only = self.job.default_profile_only
435        # If the user called this test in an odd way (specified both iterations
436        # and test_length), let's warn them.
437        if iterations and test_length:
438            logging.debug('Iterations parameter ignored (timed execution)')
439        if test_length:
440            test_start = _get_time()
441            time_elapsed = 0
442            timed_counter = 0
443            logging.debug('Test started. Specified %d s as the minimum test '
444                          'length', test_length)
445            while time_elapsed < test_length:
446                timed_counter = timed_counter + 1
447                if time_elapsed == 0:
448                    logging.debug('Executing iteration %d', timed_counter)
449                elif time_elapsed > 0:
450                    logging.debug('Executing iteration %d, time_elapsed %d s',
451                                  timed_counter, time_elapsed)
452                self._call_run_once_with_retry(constraints, profile_only,
453                                               postprocess_profiled_run, args,
454                                               dargs)
455                test_iteration_finish = _get_time()
456                time_elapsed = test_iteration_finish - test_start
457            logging.debug('Test finished after %d iterations, '
458                          'time elapsed: %d s', timed_counter, time_elapsed)
459        else:
460            if iterations is None:
461                iterations = 1
462            if iterations > 1:
463                logging.debug('Test started. Specified %d iterations',
464                              iterations)
465            for self.iteration in xrange(1, iterations + 1):
466                if iterations > 1:
467                    logging.debug('Executing iteration %d of %d',
468                                  self.iteration, iterations)
469                self._call_run_once_with_retry(constraints, profile_only,
470                                               postprocess_profiled_run, args,
471                                               dargs)
472
473        if not profile_only:
474            self.iteration += 1
475            self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
476
477        # Do any postprocessing, normally extracting performance keyvals, etc
478        self.postprocess()
479        self.process_failed_constraints()
480
481
482    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
483        profilers = self.job.profilers
484        # Do a profiling run if necessary
485        if profilers.present():
486            self.drop_caches_between_iterations()
487            profilers.before_start(self)
488
489            self.before_run_once()
490            profilers.start(self)
491            logging.debug('Profilers present. Profiling run started')
492
493            try:
494                self.run_once(*args, **dargs)
495
496                # Priority to the run_once() argument over the attribute.
497                postprocess_attribute = getattr(self,
498                                                'postprocess_profiled_run',
499                                                False)
500
501                if (postprocess_profiled_run or
502                    (postprocess_profiled_run is None and
503                     postprocess_attribute)):
504                    self.postprocess_iteration()
505
506            finally:
507                profilers.stop(self)
508                profilers.report(self)
509
510            self.after_run_once()
511
512
513    def postprocess(self):
514        pass
515
516
517    def postprocess_iteration(self):
518        pass
519
520
521    def cleanup(self):
522        pass
523
524
525    def before_run_once(self):
526        """
527        Override in tests that need it, will be called before any run_once()
528        call including the profiling run (when it's called before starting
529        the profilers).
530        """
531        pass
532
533
534    def after_run_once(self):
535        """
536        Called after every run_once (including from a profiled run when it's
537        called after stopping the profilers).
538        """
539        pass
540
541
542    @staticmethod
543    def _make_writable_to_others(directory):
544        mode = os.stat(directory).st_mode
545        mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
546        os.chmod(directory, mode)
547
548
549    def _exec(self, args, dargs):
550        self.job.logging.tee_redirect_debug_dir(self.debugdir,
551                                                log_name=self.tagged_testname)
552        try:
553            if self.network_destabilizing:
554                self.job.disable_warnings("NETWORK")
555
556            # write out the test attributes into a keyval
557            dargs   = dargs.copy()
558            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
559            keyvals = dargs.pop('test_attributes', {}).copy()
560            keyvals['version'] = self.version
561            for i, arg in enumerate(args):
562                keyvals['param-%d' % i] = repr(arg)
563            for name, arg in dargs.iteritems():
564                keyvals['param-%s' % name] = repr(arg)
565            self.write_test_keyval(keyvals)
566
567            _validate_args(args, dargs, self.initialize, self.setup,
568                           self.execute, self.cleanup)
569
570            try:
571                # Make resultsdir and tmpdir accessible to everyone. We may
572                # output data to these directories as others, e.g., chronos.
573                self._make_writable_to_others(self.tmpdir)
574                self._make_writable_to_others(self.resultsdir)
575
576                # Initialize:
577                _cherry_pick_call(self.initialize, *args, **dargs)
578
579                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
580                try:
581                    fcntl.flock(lockfile, fcntl.LOCK_EX)
582                    # Setup: (compile and install the test, if needed)
583                    p_args, p_dargs = _cherry_pick_args(self.setup, args, dargs)
584                    utils.update_version(self.srcdir, self.preserve_srcdir,
585                                         self.version, self.setup,
586                                         *p_args, **p_dargs)
587                finally:
588                    fcntl.flock(lockfile, fcntl.LOCK_UN)
589                    lockfile.close()
590
591                # Execute:
592                os.chdir(self.outputdir)
593
594                # call self.warmup cherry picking the arguments it accepts and
595                # translate exceptions if needed
596                _call_test_function(_cherry_pick_call, self.warmup,
597                                    *args, **dargs)
598
599                if hasattr(self, 'run_once'):
600                    p_args, p_dargs = _cherry_pick_args(self.run_once,
601                                                        args, dargs)
602                    # pull in any non-* and non-** args from self.execute
603                    for param in _get_nonstar_args(self.execute):
604                        if param in dargs:
605                            p_dargs[param] = dargs[param]
606                else:
607                    p_args, p_dargs = _cherry_pick_args(self.execute,
608                                                        args, dargs)
609
610                _call_test_function(self.execute, *p_args, **p_dargs)
611            except Exception:
612                # Save the exception while we run our cleanup() before
613                # reraising it, but log it to so actual time of error is known.
614                exc_info = sys.exc_info()
615                logging.warning('The test failed with the following exception',
616                                exc_info=True)
617
618                try:
619                    try:
620                        if run_cleanup:
621                            logging.debug('Running cleanup for test.')
622                            _cherry_pick_call(self.cleanup, *args, **dargs)
623                    except Exception:
624                        logging.error('Ignoring exception during cleanup() '
625                                      'phase:')
626                        traceback.print_exc()
627                        logging.error('Now raising the earlier %s error',
628                                      exc_info[0])
629                    self.crash_handler_report()
630                finally:
631                    # Raise exception after running cleanup, reporting crash,
632                    # and restoring job's logging, even if the first two
633                    # actions fail.
634                    self.job.logging.restore()
635                    try:
636                        raise exc_info[0], exc_info[1], exc_info[2]
637                    finally:
638                        # http://docs.python.org/library/sys.html#sys.exc_info
639                        # Be nice and prevent a circular reference.
640                        del exc_info
641            else:
642                try:
643                    if run_cleanup:
644                        _cherry_pick_call(self.cleanup, *args, **dargs)
645                    self.crash_handler_report()
646                finally:
647                    self.job.logging.restore()
648        except error.AutotestError:
649            if self.network_destabilizing:
650                self.job.enable_warnings("NETWORK")
651            # Pass already-categorized errors on up.
652            raise
653        except Exception, e:
654            if self.network_destabilizing:
655                self.job.enable_warnings("NETWORK")
656            # Anything else is an ERROR in our own code, not execute().
657            raise error.UnhandledTestError(e)
658        else:
659            if self.network_destabilizing:
660                self.job.enable_warnings("NETWORK")
661
662
663    def runsubtest(self, url, *args, **dargs):
664        """
665        Execute another autotest test from inside the current test's scope.
666
667        @param test: Parent test.
668        @param url: Url of new test.
669        @param tag: Tag added to test name.
670        @param args: Args for subtest.
671        @param dargs: Dictionary with args for subtest.
672        @iterations: Number of subtest iterations.
673        @profile_only: If true execute one profiled run.
674        """
675        dargs["profile_only"] = dargs.get("profile_only", False)
676        test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
677        return self.job.run_test(url, master_testpath=test_basepath,
678                                 *args, **dargs)
679
680
681def _get_nonstar_args(func):
682    """Extract all the (normal) function parameter names.
683
684    Given a function, returns a tuple of parameter names, specifically
685    excluding the * and ** parameters, if the function accepts them.
686
687    @param func: A callable that we want to chose arguments for.
688
689    @return: A tuple of parameters accepted by the function.
690    """
691    return func.func_code.co_varnames[:func.func_code.co_argcount]
692
693
694def _cherry_pick_args(func, args, dargs):
695    """Sanitize positional and keyword arguments before calling a function.
696
697    Given a callable (func), an argument tuple and a dictionary of keyword
698    arguments, pick only those arguments which the function is prepared to
699    accept and return a new argument tuple and keyword argument dictionary.
700
701    Args:
702      func: A callable that we want to choose arguments for.
703      args: A tuple of positional arguments to consider passing to func.
704      dargs: A dictionary of keyword arguments to consider passing to func.
705    Returns:
706      A tuple of: (args tuple, keyword arguments dictionary)
707    """
708    # Cherry pick args:
709    if func.func_code.co_flags & 0x04:
710        # func accepts *args, so return the entire args.
711        p_args = args
712    else:
713        p_args = ()
714
715    # Cherry pick dargs:
716    if func.func_code.co_flags & 0x08:
717        # func accepts **dargs, so return the entire dargs.
718        p_dargs = dargs
719    else:
720        # Only return the keyword arguments that func accepts.
721        p_dargs = {}
722        for param in _get_nonstar_args(func):
723            if param in dargs:
724                p_dargs[param] = dargs[param]
725
726    return p_args, p_dargs
727
728
729def _cherry_pick_call(func, *args, **dargs):
730    """Cherry picks arguments from args/dargs based on what "func" accepts
731    and calls the function with the picked arguments."""
732    p_args, p_dargs = _cherry_pick_args(func, args, dargs)
733    return func(*p_args, **p_dargs)
734
735
736def _validate_args(args, dargs, *funcs):
737    """Verify that arguments are appropriate for at least one callable.
738
739    Given a list of callables as additional parameters, verify that
740    the proposed keyword arguments in dargs will each be accepted by at least
741    one of the callables.
742
743    NOTE: args is currently not supported and must be empty.
744
745    Args:
746      args: A tuple of proposed positional arguments.
747      dargs: A dictionary of proposed keyword arguments.
748      *funcs: Callables to be searched for acceptance of args and dargs.
749    Raises:
750      error.AutotestError: if an arg won't be accepted by any of *funcs.
751    """
752    all_co_flags = 0
753    all_varnames = ()
754    for func in funcs:
755        all_co_flags |= func.func_code.co_flags
756        all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
757
758    # Check if given args belongs to at least one of the methods below.
759    if len(args) > 0:
760        # Current implementation doesn't allow the use of args.
761        raise error.TestError('Unnamed arguments not accepted. Please '
762                              'call job.run_test with named args only')
763
764    # Check if given dargs belongs to at least one of the methods below.
765    if len(dargs) > 0:
766        if not all_co_flags & 0x08:
767            # no func accepts *dargs, so:
768            for param in dargs:
769                if not param in all_varnames:
770                    raise error.AutotestError('Unknown parameter: %s' % param)
771
772
773def _installtest(job, url):
774    (group, name) = job.pkgmgr.get_package_name(url, 'test')
775
776    # Bail if the test is already installed
777    group_dir = os.path.join(job.testdir, "download", group)
778    if os.path.exists(os.path.join(group_dir, name)):
779        return (group, name)
780
781    # If the group directory is missing create it and add
782    # an empty  __init__.py so that sub-directories are
783    # considered for import.
784    if not os.path.exists(group_dir):
785        os.makedirs(group_dir)
786        f = file(os.path.join(group_dir, '__init__.py'), 'w+')
787        f.close()
788
789    logging.debug("%s: installing test url=%s", name, url)
790    tarball = os.path.basename(url)
791    tarball_path = os.path.join(group_dir, tarball)
792    test_dir = os.path.join(group_dir, name)
793    job.pkgmgr.fetch_pkg(tarball, tarball_path,
794                         repo_url = os.path.dirname(url))
795
796    # Create the directory for the test
797    if not os.path.exists(test_dir):
798        os.mkdir(os.path.join(group_dir, name))
799
800    job.pkgmgr.untar_pkg(tarball_path, test_dir)
801
802    os.remove(tarball_path)
803
804    # For this 'sub-object' to be importable via the name
805    # 'group.name' we need to provide an __init__.py,
806    # so link the main entry point to this.
807    os.symlink(name + '.py', os.path.join(group_dir, name,
808                            '__init__.py'))
809
810    # The test is now installed.
811    return (group, name)
812
813
814def _call_test_function(func, *args, **dargs):
815    """Calls a test function and translates exceptions so that errors
816    inside test code are considered test failures."""
817    try:
818        return func(*args, **dargs)
819    except error.AutotestError:
820        raise
821    except Exception, e:
822        # Other exceptions must be treated as a FAIL when
823        # raised during the test functions
824        raise error.UnhandledTestFail(e)
825
826
827def runtest(job, url, tag, args, dargs,
828            local_namespace={}, global_namespace={},
829            before_test_hook=None, after_test_hook=None,
830            before_iteration_hook=None, after_iteration_hook=None):
831    local_namespace = local_namespace.copy()
832    global_namespace = global_namespace.copy()
833    # if this is not a plain test name then download and install the
834    # specified test
835    if url.endswith('.tar.bz2'):
836        (testgroup, testname) = _installtest(job, url)
837        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
838        importdir = os.path.join(job.testdir, 'download')
839        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
840        classname = '%s.%s' % (modulename, testname)
841        path = testname
842    else:
843        # If the test is local, it may be under either testdir or site_testdir.
844        # Tests in site_testdir override tests defined in testdir
845        testname = path = url
846        testgroup = ''
847        path = re.sub(':', '/', testname)
848        modulename = os.path.basename(path)
849        classname = '%s.%s' % (modulename, modulename)
850
851        # Try installing the test package
852        # The job object may be either a server side job or a client side job.
853        # 'install_pkg' method will be present only if it's a client side job.
854        if hasattr(job, 'install_pkg'):
855            try:
856                bindir = os.path.join(job.testdir, testname)
857                job.install_pkg(testname, 'test', bindir)
858            except error.PackageInstallError:
859                # continue as a fall back mechanism and see if the test code
860                # already exists on the machine
861                pass
862
863        bindir = None
864        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
865            if dir is not None and os.path.exists(os.path.join(dir, path)):
866                importdir = bindir = os.path.join(dir, path)
867        if not bindir:
868            raise error.TestError(testname + ': test does not exist')
869
870    subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
871    outputdir = os.path.join(job.resultdir, subdir)
872    if tag:
873        outputdir += '.' + tag
874
875    local_namespace['job'] = job
876    local_namespace['bindir'] = bindir
877    local_namespace['outputdir'] = outputdir
878
879    sys.path.insert(0, importdir)
880    try:
881        exec ('import %s' % modulename, local_namespace, global_namespace)
882        exec ("mytest = %s(job, bindir, outputdir)" % classname,
883              local_namespace, global_namespace)
884    finally:
885        sys.path.pop(0)
886
887    pwd = os.getcwd()
888    os.chdir(outputdir)
889
890    try:
891        mytest = global_namespace['mytest']
892        mytest.success = False
893        if before_test_hook:
894            before_test_hook(mytest)
895
896        # we use the register iteration hooks methods to register the passed
897        # in hooks
898        if before_iteration_hook:
899            mytest.register_before_iteration_hook(before_iteration_hook)
900        if after_iteration_hook:
901            mytest.register_after_iteration_hook(after_iteration_hook)
902        mytest._exec(args, dargs)
903        mytest.success = True
904    finally:
905        os.chdir(pwd)
906        if after_test_hook:
907            after_test_hook(mytest)
908        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
909
910