• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# DExTer : Debugging Experience Tester
2# ~~~~~~   ~         ~~         ~   ~~
3#
4# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5# See https://llvm.org/LICENSE.txt for license information.
6# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7"""Test tool."""
8
9import math
10import os
11import csv
12import pickle
13import shutil
14
15from dex.builder import run_external_build_script
16from dex.command.ParseCommand import get_command_infos
17from dex.debugger.Debuggers import run_debugger_subprocess
18from dex.debugger.DebuggerControllers.DefaultController import DefaultController
19from dex.debugger.DebuggerControllers.ConditionalController import ConditionalController
20from dex.dextIR.DextIR import DextIR
21from dex.heuristic import Heuristic
22from dex.tools import TestToolBase
23from dex.utils.Exceptions import DebuggerException
24from dex.utils.Exceptions import BuildScriptException, HeuristicException
25from dex.utils.PrettyOutputBase import Stream
26from dex.utils.ReturnCode import ReturnCode
27from dex.dextIR import BuilderIR
28
29
30class TestCase(object):
31    def __init__(self, context, name, heuristic, error):
32        self.context = context
33        self.name = name
34        self.heuristic = heuristic
35        self.error = error
36
37    @property
38    def penalty(self):
39        try:
40            return self.heuristic.penalty
41        except AttributeError:
42            return float('nan')
43
44    @property
45    def max_penalty(self):
46        try:
47            return self.heuristic.max_penalty
48        except AttributeError:
49            return float('nan')
50
51    @property
52    def score(self):
53        try:
54            return self.heuristic.score
55        except AttributeError:
56            return float('nan')
57
58    def __str__(self):
59        if self.error and self.context.options.verbose:
60            verbose_error = str(self.error)
61        else:
62            verbose_error = ''
63
64        if self.error:
65            script_error = (' : {}'.format(
66                self.error.script_error.splitlines()[0]) if getattr(
67                    self.error, 'script_error', None) else '')
68
69            error = ' [{}{}]'.format(
70                str(self.error).splitlines()[0], script_error)
71        else:
72            error = ''
73
74        try:
75            summary = self.heuristic.summary_string
76        except AttributeError:
77            summary = '<r>nan/nan (nan)</>'
78        return '{}: {}{}\n{}'.format(self.name, summary, error, verbose_error)
79
80
81class Tool(TestToolBase):
82    """Run the specified DExTer test(s) with the specified compiler and linker
83    options and produce a dextIR file as well as printing out the debugging
84    experience score calculated by the DExTer heuristic.
85    """
86
87    def __init__(self, *args, **kwargs):
88        super(Tool, self).__init__(*args, **kwargs)
89        self._test_cases = []
90
91    @property
92    def name(self):
93        return 'DExTer test'
94
95    def add_tool_arguments(self, parser, defaults):
96        parser.add_argument('--fail-lt',
97                            type=float,
98                            default=0.0, # By default TEST always succeeds.
99                            help='exit with status FAIL(2) if the test result'
100                                ' is less than this value.',
101                            metavar='<float>')
102        parser.add_argument('--calculate-average',
103                            action="store_true",
104                            help='calculate the average score of every test run')
105        super(Tool, self).add_tool_arguments(parser, defaults)
106
107    def _build_test_case(self):
108        """Build an executable from the test source with the given --builder
109        script and flags (--cflags, --ldflags) in the working directory.
110        Or, if the --binary option has been given, copy the executable provided
111        into the working directory and rename it to match the --builder output.
112        """
113
114        options = self.context.options
115        if options.binary:
116            # Copy user's binary into the tmp working directory
117            shutil.copy(options.binary, options.executable)
118            builderIR = BuilderIR(
119                name='binary',
120                cflags=[options.binary],
121                ldflags='')
122        else:
123            options = self.context.options
124            compiler_options = [options.cflags for _ in options.source_files]
125            linker_options = options.ldflags
126            _, _, builderIR = run_external_build_script(
127                self.context,
128                script_path=self.build_script,
129                source_files=options.source_files,
130                compiler_options=compiler_options,
131                linker_options=linker_options,
132                executable_file=options.executable)
133        return builderIR
134
135    def _init_debugger_controller(self):
136        step_collection = DextIR(
137            executable_path=self.context.options.executable,
138            source_paths=self.context.options.source_files,
139            dexter_version=self.context.version)
140
141        step_collection.commands = get_command_infos(
142            self.context.options.source_files)
143
144        if 'DexLimitSteps' in step_collection.commands:
145            debugger_controller = ConditionalController(self.context, step_collection)
146        else:
147            debugger_controller = DefaultController(self.context, step_collection)
148
149        return debugger_controller
150
151    def _get_steps(self, builderIR):
152        """Generate a list of debugger steps from a test case.
153        """
154        debugger_controller = self._init_debugger_controller()
155        debugger_controller = run_debugger_subprocess(
156            debugger_controller, self.context.working_directory.path)
157        steps = debugger_controller.step_collection
158        steps.builder = builderIR
159        return steps
160
161    def _get_results_basename(self, test_name):
162        def splitall(x):
163            while len(x) > 0:
164              x, y = os.path.split(x)
165              yield y
166        all_components = reversed([x for x in splitall(test_name)])
167        return '_'.join(all_components)
168
169    def _get_results_path(self, test_name):
170        """Returns the path to the test results directory for the test denoted
171        by test_name.
172        """
173        return os.path.join(self.context.options.results_directory,
174                            self._get_results_basename(test_name))
175
176    def _get_results_text_path(self, test_name):
177        """Returns path results .txt file for test denoted by test_name.
178        """
179        test_results_path = self._get_results_path(test_name)
180        return '{}.txt'.format(test_results_path)
181
182    def _get_results_pickle_path(self, test_name):
183        """Returns path results .dextIR file for test denoted by test_name.
184        """
185        test_results_path = self._get_results_path(test_name)
186        return '{}.dextIR'.format(test_results_path)
187
188    def _record_steps(self, test_name, steps):
189        """Write out the set of steps out to the test's .txt and .json
190        results file.
191        """
192        output_text_path = self._get_results_text_path(test_name)
193        with open(output_text_path, 'w') as fp:
194            self.context.o.auto(str(steps), stream=Stream(fp))
195
196        output_dextIR_path = self._get_results_pickle_path(test_name)
197        with open(output_dextIR_path, 'wb') as fp:
198            pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
199
200    def _record_score(self, test_name, heuristic):
201        """Write out the test's heuristic score to the results .txt file.
202        """
203        output_text_path = self._get_results_text_path(test_name)
204        with open(output_text_path, 'a') as fp:
205            self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
206
207    def _record_test_and_display(self, test_case):
208        """Output test case to o stream and record test case internally for
209        handling later.
210        """
211        self.context.o.auto(test_case)
212        self._test_cases.append(test_case)
213
214    def _record_failed_test(self, test_name, exception):
215        """Instantiate a failed test case with failure exception and
216        store internally.
217        """
218        test_case = TestCase(self.context, test_name, None, exception)
219        self._record_test_and_display(test_case)
220
221    def _record_successful_test(self, test_name, steps, heuristic):
222        """Instantiate a successful test run, store test for handling later.
223        Display verbose output for test case if required.
224        """
225        test_case = TestCase(self.context, test_name, heuristic, None)
226        self._record_test_and_display(test_case)
227        if self.context.options.verbose:
228            self.context.o.auto('\n{}\n'.format(steps))
229            self.context.o.auto(heuristic.verbose_output)
230
231    def _run_test(self, test_name):
232        """Attempt to run test files specified in options.source_files. Store
233        result internally in self._test_cases.
234        """
235        try:
236            builderIR = self._build_test_case()
237            steps = self._get_steps(builderIR)
238            self._record_steps(test_name, steps)
239            heuristic_score = Heuristic(self.context, steps)
240            self._record_score(test_name, heuristic_score)
241        except (BuildScriptException, DebuggerException,
242                HeuristicException) as e:
243            self._record_failed_test(test_name, e)
244            return
245
246        self._record_successful_test(test_name, steps, heuristic_score)
247        return
248
249    def _handle_results(self) -> ReturnCode:
250        return_code = ReturnCode.OK
251        options = self.context.options
252
253        if not options.verbose:
254            self.context.o.auto('\n')
255
256        if options.calculate_average:
257            # Calculate and print the average score
258            score_sum = 0.0
259            num_tests = 0
260            for test_case in self._test_cases:
261                score = test_case.score
262                if not test_case.error and not math.isnan(score):
263                    score_sum += test_case.score
264                    num_tests += 1
265
266            if num_tests != 0:
267                print("@avg: ({:.4f})".format(score_sum/num_tests))
268
269        summary_path = os.path.join(options.results_directory, 'summary.csv')
270        with open(summary_path, mode='w', newline='') as fp:
271            writer = csv.writer(fp, delimiter=',')
272            writer.writerow(['Test Case', 'Score', 'Error'])
273
274            for test_case in self._test_cases:
275                if (test_case.score < options.fail_lt or
276                        test_case.error is not None):
277                    return_code = ReturnCode.FAIL
278
279                writer.writerow([
280                    test_case.name, '{:.4f}'.format(test_case.score),
281                    test_case.error
282                ])
283
284        return return_code
285