• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# SPDX-License-Identifier: Apache-2.0
2#
3# Copyright (C) 2015, ARM Limited and contributors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17
18import os
19import unittest
20import logging
21
22from bart.sched.SchedAssert import SchedAssert
23from bart.sched.SchedMultiAssert import SchedMultiAssert
24from devlib.utils.misc import memoized
25import wrapt
26
27from env import TestEnv
28from executor import Executor
29from trace import Trace
30
31
32class LisaTest(unittest.TestCase):
33    """
34    A base class for LISA tests
35
36    This class is intended to be subclassed in order to create automated tests
37    for LISA. It sets up the TestEnv and Executor and provides convenience
38    methods for making assertions on results.
39
40    Subclasses should provide a test_conf to configure the TestEnv and an
41    experiments_conf to configure the executor.
42
43    Tests whose behaviour is dependent on target parameters, for example
44    presence of cpufreq governors or number of CPUs, can override
45    _getExperimentsConf to generate target-dependent experiments.
46
47    Example users of this class can be found under LISA's tests/ directory.
48
49    :ivar experiments: List of :class:`Experiment` s executed for the test. Only
50                       available after :meth:`init` has been called.
51    """
52
53    test_conf = None
54    """Override this with a dictionary or JSON path to configure the TestEnv"""
55
56    experiments_conf = None
57    """Override this with a dictionary or JSON path to configure the Executor"""
58
59    permitted_fail_pct = 0
60    """The percentage of iterations of each test that may be permitted to fail"""
61
62    @classmethod
63    def _getTestConf(cls):
64        if cls.test_conf is None:
65            raise NotImplementedError("Override `test_conf` attribute")
66        return cls.test_conf
67
68    @classmethod
69    def _getExperimentsConf(cls, test_env):
70        """
71        Get the experiments_conf used to configure the Executor
72
73        This method receives the initialized TestEnv as a parameter, so
74        subclasses can override it to configure workloads or target confs in a
75        manner dependent on the target. If not overridden, just returns the
76        experiments_conf attribute.
77        """
78        if cls.experiments_conf is None:
79            raise NotImplementedError("Override `experiments_conf` attribute")
80        return cls.experiments_conf
81
82    @classmethod
83    def runExperiments(cls):
84        """
85        Set up logging and trigger running experiments
86        """
87        cls._log = logging.getLogger('LisaTest')
88
89        cls._log.info('Setup tests execution engine...')
90        test_env = TestEnv(test_conf=cls._getTestConf())
91
92        experiments_conf = cls._getExperimentsConf(test_env)
93
94        if ITERATIONS_FROM_CMDLINE:
95            if 'iterations' in experiments_conf:
96                cls.logger.warning(
97                    "Command line overrides iteration count in "
98                    "{}'s experiments_conf".format(cls.__name__))
99            experiments_conf['iterations'] = ITERATIONS_FROM_CMDLINE
100
101        cls.executor = Executor(test_env, experiments_conf)
102
103        # Alias tests and workloads configurations
104        cls.wloads = cls.executor._experiments_conf["wloads"]
105        cls.confs = cls.executor._experiments_conf["confs"]
106
107        # Alias executor objects to make less verbose tests code
108        cls.te = cls.executor.te
109        cls.target = cls.executor.target
110
111        # Execute pre-experiments code defined by the test
112        cls._experimentsInit()
113
114        cls._log.info('Experiments execution...')
115        cls.executor.run()
116
117        cls.experiments = cls.executor.experiments
118
119        # Execute post-experiments code defined by the test
120        cls._experimentsFinalize()
121
122    @classmethod
123    def _experimentsInit(cls):
124        """
125        Code executed before running the experiments
126        """
127
128    @classmethod
129    def _experimentsFinalize(cls):
130        """
131        Code executed after running the experiments
132        """
133
134    @memoized
135    def get_sched_assert(self, experiment, task):
136        """
137        Return a SchedAssert over the task provided
138        """
139        return SchedAssert(
140            self.get_trace(experiment).ftrace, self.te.topology, execname=task)
141
142    @memoized
143    def get_multi_assert(self, experiment, task_filter=""):
144        """
145        Return a SchedMultiAssert over the tasks whose names contain task_filter
146
147        By default, this includes _all_ the tasks that were executed for the
148        experiment.
149        """
150        tasks = experiment.wload.tasks.keys()
151        return SchedMultiAssert(self.get_trace(experiment).ftrace,
152                                self.te.topology,
153                                [t for t in tasks if task_filter in t])
154
155    def get_trace(self, experiment):
156        if not hasattr(self, "__traces"):
157            self.__traces = {}
158        if experiment.out_dir in self.__traces:
159            return self.__traces[experiment.out_dir]
160
161        if ('ftrace' not in experiment.conf['flags']
162            or 'ftrace' not in self.test_conf):
163            raise ValueError(
164                'Tracing not enabled. If this test needs a trace, add "ftrace" '
165                'to your test/experiment configuration flags')
166
167        events = self.test_conf['ftrace']['events']
168        trace = Trace(self.te.platform, experiment.out_dir, events)
169
170        self.__traces[experiment.out_dir] = trace
171        return trace
172
173    def get_start_time(self, experiment):
174        """
175        Get the time at which the experiment workload began executing
176        """
177        start_times_dict = self.get_multi_assert(experiment).getStartTime()
178        return min([t["starttime"] for t in start_times_dict.itervalues()])
179
180    def get_end_time(self, experiment):
181        """
182        Get the time at which the experiment workload finished executing
183        """
184        end_times_dict = self.get_multi_assert(experiment).getEndTime()
185        return max([t["endtime"] for t in end_times_dict.itervalues()])
186
187    def get_window(self, experiment):
188        return (self.get_start_time(experiment), self.get_end_time(experiment))
189
190    def get_end_times(self, experiment):
191        """
192        Get the time at which each task in the workload finished
193
194        Returned as a dict; {"task_name": finish_time, ...}
195        """
196
197        end_times = {}
198        ftrace = self.get_trace(experiment).ftrace
199        for task in experiment.wload.tasks.keys():
200            sched_assert = SchedAssert(ftrace, self.te.topology, execname=task)
201            end_times[task] = sched_assert.getEndTime()
202
203        return end_times
204
205    def _dummy_method(self):
206        pass
207
208    # In the Python unittest framework you instantiate TestCase objects passing
209    # the name of a test method that is going to be run to make assertions. We
210    # run our tests using nosetests, which automatically discovers these
211    # methods. However we also want to be able to instantiate LisaTest objects
212    # in notebooks without the inconvenience of having to provide a methodName,
213    # since we won't need any assertions. So we'll override __init__ with a
214    # default dummy test method that does nothing.
215    def __init__(self, methodName='_dummy_method', *args, **kwargs):
216        super(LisaTest, self).__init__(methodName, *args, **kwargs)
217
218@wrapt.decorator
219def experiment_test(wrapped_test, instance, args, kwargs):
220    """
221    Convert a LisaTest test method to be automatically called for each experiment
222
223    The method will be passed the experiment object and a list of the names of
224    tasks that were run as the experiment's workload.
225    """
226    failures = {}
227    for experiment in instance.executor.experiments:
228        tasks = experiment.wload.tasks.keys()
229        try:
230            wrapped_test(experiment, tasks, *args, **kwargs)
231        except AssertionError as e:
232            trace_relpath = os.path.join(experiment.out_dir, "trace.dat")
233            add_msg = "Check trace file: " + os.path.abspath(trace_relpath)
234            msg = str(e) + "\n\t" +  add_msg
235
236            test_key = (experiment.wload_name, experiment.conf['tag'])
237            failures[test_key] = failures.get(test_key, []) + [msg]
238
239    for fails in failures.itervalues():
240        iterations = instance.executor.iterations
241        fail_pct = 100. * len(fails) / iterations
242
243        msg = "{} failures from {} iteration(s):\n{}".format(
244            len(fails), iterations, '\n'.join(fails))
245        if fail_pct > instance.permitted_fail_pct:
246            raise AssertionError(msg)
247        else:
248            instance._log.warning(msg)
249            instance._log.warning(
250                'ALLOWING due to permitted_fail_pct={}'.format(
251                    instance.permitted_fail_pct))
252
253
254# Prevent nosetests from running experiment_test directly as a test case
255experiment_test.__test__ = False
256
257# Allow the user to override the iterations setting from the command
258# line. Nosetests does not support this kind of thing, so we use an
259# evil hack: the lisa-test shell function takes an --iterations
260# argument and exports an environment variable. If the test itself
261# specifies an iterations count, we'll later print a warning and
262# override it. We do this here in the root scope, rather than in
263# runExperiments, so that if the value is invalid we print the error
264# immediately instead of going ahead with target setup etc.
265try:
266    ITERATIONS_FROM_CMDLINE = int(
267        os.getenv('LISA_TEST_ITERATIONS', '0'))
268    if ITERATIONS_FROM_CMDLINE < 0:
269        raise ValueError('Cannot be negative')
270except ValueError as e:
271    raise ValueError("Couldn't read iterations count: {}".format(e))
272
273# vim :set tabstop=4 shiftwidth=4 expandtab
274