• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4#Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies)
5
6#This library is free software; you can redistribute it and/or
7#modify it under the terms of the GNU Library General Public
8#License as published by the Free Software Foundation; either
9#version 2 of the License, or (at your option) any later version.
10
11#This library is distributed in the hope that it will be useful,
12#but WITHOUT ANY WARRANTY; without even the implied warranty of
13#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14#Library General Public License for more details.
15
16#You should have received a copy of the GNU Library General Public License
17#along with this library; see the file COPYING.LIB.  If not, write to
18#the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
19#Boston, MA 02110-1301, USA.
20
21from __future__ import with_statement
22
23import sys
24import os
25import os.path
26import re
27import logging
28from subprocess import Popen, PIPE, STDOUT
29from optparse import OptionParser
30
31
32class Log(object):
33    def __init__(self, name):
34        self._log = logging.getLogger(name)
35        self.debug = self._log.debug
36        self.warn = self._log.warn
37        self.error = self._log.error
38        self.exception = self._log.exception
39        self.info = self._log.info
40
41
42class Options(Log):
43    """ Option manager. It parses and checks script's parameters, sets an internal variable. """
44
45    def __init__(self, args):
46        Log.__init__(self, "Options")
47        log = self._log
48        opt = OptionParser("%prog [options] PathToSearch.\nTry -h or --help.")
49        opt.add_option("-j", "--parallel-level", action="store", type="int",
50              dest="parallel_level", default=None,
51              help="Number of parallel processes executing the Qt's tests. Default: cpu count.")
52        opt.add_option("-v", "--verbose", action="store", type="int",
53              dest="verbose", default=2,
54              help="Verbose level (0 - quiet, 1 - errors only, 2 - infos and warnings, 3 - debug information). Default: %default.")
55        opt.add_option("", "--tests-options", action="store", type="string",
56              dest="tests_options", default="",
57              help="Parameters passed to Qt's tests (for example '-eventdelay 123').")
58        opt.add_option("-o", "--output-file", action="store", type="string",
59              dest="output_file", default="/tmp/qtwebkittests.html",
60              help="File where results will be stored. The file will be overwritten. Default: %default.")
61        opt.add_option("-b", "--browser", action="store", dest="browser",
62              default="xdg-open",
63              help="Browser in which results will be opened. Default %default.")
64        opt.add_option("", "--do-not-open-results", action="store_false",
65              dest="open_results", default=True,
66              help="The results shouldn't pop-up in a browser automatically")
67        opt.add_option("-d", "--developer-mode", action="store_true",
68              dest="developer", default=False,
69              help="Special mode for debugging. In general it simulates human behavior, running all autotests. In the mode everything is executed synchronously, no html output will be generated, no changes or transformation will be applied to stderr or stdout. In this mode options; parallel-level, output-file, browser and do-not-open-results will be ignored.")
70        opt.add_option("-t", "--timeout", action="store", type="int",
71              dest="timeout", default=0,
72              help="Timeout in seconds for each testsuite. Zero value means that there is not timeout. Default: %default.")
73
74        self._o, self._a = opt.parse_args(args)
75        verbose = self._o.verbose
76        if verbose == 0:
77            logging.basicConfig(level=logging.CRITICAL,)
78        elif verbose == 1:
79            logging.basicConfig(level=logging.ERROR,)
80        elif verbose == 2:
81            logging.basicConfig(level=logging.INFO,)
82        elif verbose == 3:
83            logging.basicConfig(level=logging.DEBUG,)
84        else:
85            logging.basicConfig(level=logging.INFO,)
86            log.warn("Bad verbose level, switching to default.")
87        try:
88            if not os.path.exists(self._a[0]):
89                raise Exception("Given path doesn't exist.")
90            if len(self._a) > 1:
91                raise IndexError("Only one directory could be provided.")
92            self._o.path = self._a[0]
93        except IndexError:
94            log.error("Bad usage. Please try -h or --help.")
95            sys.exit(1)
96        except Exception:
97            log.error("Path '%s' doesn't exist", self._a[0])
98            sys.exit(2)
99        if self._o.developer:
100            if not self._o.parallel_level is None:
101                log.warn("Developer mode sets parallel-level option to one.")
102            self._o.parallel_level = 1
103            self._o.open_results = False
104
105    def __getattr__(self, attr):
106        """ Maps all options properties into this object (remove one level of indirection). """
107        return getattr(self._o, attr)
108
109
110def run_test(args):
111    """ Runs one given test.
112    args should contain a tuple with 3 elements;
113      TestSuiteResult containing full file name of an autotest executable.
114      str with options that should be passed to the autotest executable
115      bool if true then the stdout will be buffered and separated from the stderr, if it is false
116        then the stdout and the stderr will be merged together and left unbuffered (the TestSuiteResult output will be None).
117      int time after which the autotest executable would be killed
118    """
119    log = logging.getLogger("Exec")
120    test_suite, options, buffered, timeout = args
121    timer = None
122    try:
123        log.info("Running... %s", test_suite.test_file_name())
124        if buffered:
125            tst = Popen([test_suite.test_file_name()] + options.split(), stdout=PIPE, stderr=None)
126        else:
127            tst = Popen([test_suite.test_file_name()] + options.split(), stdout=None, stderr=STDOUT)
128        if timeout:
129            from threading import Timer
130            log.debug("Setting timeout timer %i sec on %s (process %s)", timeout, test_suite.test_file_name(), tst.pid)
131            def process_killer():
132                try:
133                    try:
134                        tst.terminate()
135                    except AttributeError:
136                        # Workaround for python version < 2.6 it can be removed as soon as we drop support for python2.5
137                        try:
138                            import ctypes
139                            PROCESS_TERMINATE = 1
140                            handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, tst.pid)
141                            ctypes.windll.kernel32.TerminateProcess(handle, -1)
142                            ctypes.windll.kernel32.CloseHandle(handle)
143                        except AttributeError:
144                            # windll is not accessible so we are on *nix like system
145                            import signal
146                            os.kill(tst.pid, signal.SIGTERM)
147                    log.error("Timeout, process '%s' (%i) was terminated", test_suite.test_file_name(), tst.pid)
148                except OSError, e:
149                    # the process was finished before got killed
150                    pass
151            timer = Timer(timeout, process_killer)
152            timer.start()
153    except OSError, e:
154        log.exception("Can't open an autotest file: '%s'. Skipping the test...", e.filename)
155    else:
156        test_suite.set_output(tst.communicate()[0])  # takes stdout only, in developer mode it would be None.
157    log.info("Finished %s", test_suite.test_file_name())
158    return test_suite
159
160
161class TestSuiteResult(object):
162    """ Keeps information about a test. """
163
164    def __init__(self):
165        self._output = None
166        self._test_file_name = None
167
168    def set_output(self, xml):
169        if xml:
170            self._output = xml.strip()
171
172    def output(self):
173        return self._output
174
175    def set_test_file_name(self, file_name):
176        self._test_file_name = file_name
177
178    def test_file_name(self):
179        return self._test_file_name
180
181
182class Main(Log):
183    """ The main script. All real work is done in run() method. """
184
185    def __init__(self, options):
186        Log.__init__(self, "Main")
187        self._options = options
188        if options.parallel_level > 1 or options.parallel_level is None:
189            try:
190                from multiprocessing import Pool
191            except ImportError:
192                self.warn("Import Error: the multiprocessing module couldn't be loaded (may be lack of python-multiprocessing package?). The Qt autotests will be executed one by one.")
193                options.parallel_level = 1
194        if options.parallel_level == 1:
195
196            class Pool(object):
197                """ A hack, created to avoid problems with multiprocessing module, this class is single thread replacement for the multiprocessing.Pool class. """
198                def __init__(self, processes):
199                    pass
200
201                def imap_unordered(self, func, files):
202                    return map(func, files)
203
204                def map(self, func, files):
205                    return map(func, files)
206
207        self._Pool = Pool
208
209    def run(self):
210        """ We need to set this system variable in order to load the proper plugin for the qml tests """
211        os.putenv("QML_IMPORT_PATH", self._options.path + "../../../imports")
212        """ Find && execute && publish results of all test. "All in one" function. """
213        self.debug("Searching executables...")
214        tests_executables = self.find_tests_paths(self._options.path)
215        self.debug("Found: %s", len(tests_executables))
216        self.debug("Executing tests...")
217        results = self.run_tests(tests_executables)
218        if not self._options.developer:
219            self.debug("Transforming...")
220            transformed_results = self.transform(results)
221            self.debug("Publishing...")
222            self.announce_results(transformed_results)
223
224    def find_tests_paths(self, path):
225        """ Finds all tests executables inside the given path. """
226        executables = []
227        for root, dirs, files in os.walk(path):
228            # Check only for a file that name starts from 'tst_' and that we can execute.
229            filtered_path = filter(lambda w: w.startswith('tst_') and os.access(os.path.join(root, w), os.X_OK), files)
230            filtered_path = map(lambda w: os.path.join(root, w), filtered_path)
231            for file_name in filtered_path:
232                r = TestSuiteResult()
233                r.set_test_file_name(file_name)
234                executables.append(r)
235        return executables
236
237    def run_tests(self, files):
238        """ Executes given files by using a pool of workers. """
239        workers = self._Pool(processes=self._options.parallel_level)
240        # to each file add options.
241        self.debug("Using %s the workers pool, number of workers %i", repr(workers), self._options.parallel_level)
242        package = map(lambda w: [w, self._options.tests_options, not self._options.developer, self._options.timeout], files)
243        self.debug("Generated packages for workers: %s", repr(package))
244        results = workers.map(run_test, package)  # Collects results.
245        return results
246
247    def transform(self, results):
248        """ Transforms list of the results to specialized versions. """
249        stdout = self.convert_to_stdout(results)
250        html = self.convert_to_html(results)
251        return {"stdout": stdout, "html": html}
252
253    def announce_results(self, results):
254        """ Shows the results. """
255        self.announce_results_stdout(results['stdout'])
256        self.announce_results_html(results['html'])
257
258    def announce_results_stdout(self, results):
259        """ Show the results by printing to the stdout."""
260        print(results)
261
262    def announce_results_html(self, results):
263        """ Shows the result by creating a html file and calling a web browser to render it. """
264        with file(self._options.output_file, 'w') as f:
265            f.write(results)
266        if self._options.open_results:
267            Popen(self._options.browser + " " + self._options.output_file, stdout=None, stderr=None, shell=True)
268
269    def convert_to_stdout(self, results):
270        """ Converts results, that they could be nicely presented in the stdout. """
271        # Join all results into one piece.
272        txt = "\n\n".join(map(lambda w: w.output(), results))
273        # Find total count of failed, skipped and passed tests.
274        totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt)
275        totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals)
276        totals = map(str, totals)
277        totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped"
278        # Add a summary.
279        txt += '\n\n\n' + '*' * 70
280        txt += "\n**" + ("TOTALS: " + totals).center(66) + '**'
281        txt += '\n' + '*' * 70 + '\n'
282        return txt
283
284    def convert_to_html(self, results):
285        """ Converts results, that they could showed as a html page. """
286        # Join results into one piece.
287        txt = "\n\n".join(map(lambda w: w.output(), results))
288        txt = txt.replace('&', '&amp;').replace('<', "&lt;").replace('>', "&gt;")
289        # Add a color and a style.
290        txt = re.sub(r"([* ]+(Finished)[ a-z_A-Z0-9]+[*]+)",
291            lambda w: r"",
292            txt)
293        txt = re.sub(r"([*]+[ a-z_A-Z0-9]+[*]+)",
294            lambda w: "<case class='good'><br><br><b>" + w.group(0) + r"</b></case>",
295            txt)
296        txt = re.sub(r"(Config: Using QTest library)((.)+)",
297            lambda w: "\n<case class='good'><br><i>" + w.group(0) + r"</i>  ",
298            txt)
299        txt = re.sub(r"\n(PASS)((.)+)",
300            lambda w: "</case>\n<case class='good'><br><status class='pass'>" + w.group(1) + r"</status>" + w.group(2),
301            txt)
302        txt = re.sub(r"\n(FAIL!)((.)+)",
303            lambda w: "</case>\n<case class='bad'><br><status class='fail'>" + w.group(1) + r"</status>" + w.group(2),
304            txt)
305        txt = re.sub(r"\n(XPASS)((.)+)",
306            lambda w: "</case>\n<case class='bad'><br><status class='xpass'>" + w.group(1) + r"</status>" + w.group(2),
307            txt)
308        txt = re.sub(r"\n(XFAIL)((.)+)",
309            lambda w: "</case>\n<case class='good'><br><status class='xfail'>" + w.group(1) + r"</status>" + w.group(2),
310            txt)
311        txt = re.sub(r"\n(SKIP)((.)+)",
312            lambda w: "</case>\n<case class='good'><br><status class='xfail'>" + w.group(1) + r"</status>" + w.group(2),
313            txt)
314        txt = re.sub(r"\n(QWARN)((.)+)",
315            lambda w: "</case>\n<case class='bad'><br><status class='warn'>" + w.group(1) + r"</status>" + w.group(2),
316            txt)
317        txt = re.sub(r"\n(RESULT)((.)+)",
318            lambda w: "</case>\n<case class='good'><br><status class='benchmark'>" + w.group(1) + r"</status>" + w.group(2),
319            txt)
320        txt = re.sub(r"\n(QFATAL)((.)+)",
321            lambda w: "</case>\n<case class='bad'><br><status class='crash'>" + w.group(1) + r"</status>" + w.group(2),
322            txt)
323        txt = re.sub(r"\n(Totals:)([0-9', a-z]*)",
324            lambda w: "</case>\n<case class='good'><br><b>" + w.group(1) + r"</b>" + w.group(2) + "</case>",
325            txt)
326        # Find total count of failed, skipped and passed tests.
327        totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt)
328        totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals)
329        totals = map(str, totals)
330        totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped."
331        # Create a header of the html source.
332        txt = """
333        <html>
334        <head>
335          <script>
336          function init() {
337              // Try to find the right styleSheet (this document could be embedded in an other html doc)
338              for (i = document.styleSheets.length - 1; i >= 0; --i) {
339                  if (document.styleSheets[i].cssRules[0].selectorText == "case.good") {
340                      resultStyleSheet = i;
341                      return;
342                  }
343              }
344              // The styleSheet hasn't been found, but it should be the last one.
345              resultStyleSheet = document.styleSheets.length - 1;
346          }
347
348          function hide() {
349              document.styleSheets[resultStyleSheet].cssRules[0].style.display='none';
350          }
351
352          function show() {
353              document.styleSheets[resultStyleSheet].cssRules[0].style.display='';
354          }
355
356          </script>
357          <style type="text/css">
358            case.good {color:black}
359            case.bad {color:black}
360            status.pass {color:green}
361            status.crash {color:red}
362            status.fail {color:red}
363            status.xpass {color:663300}
364            status.xfail {color:004500}
365            status.benchmark {color:000088}
366            status.warn {color:orange}
367            status.crash {color:red; text-decoration:blink; background-color:black}
368          </style>
369        </head>
370        <body onload="init()">
371        <center>
372          <h1>Qt's autotests results</h1>%(totals)s<br>
373          <hr>
374          <form>
375            <input type="button" value="Show failures only" onclick="hide()"/>
376            &nbsp;
377            <input type="button" value="Show all" onclick="show()"/>
378          </form>
379        </center>
380        <hr>
381        %(results)s
382        </body>
383        </html>""" % {"totals": totals, "results": txt}
384        return txt
385
386
387if __name__ == '__main__':
388    options = Options(sys.argv[1:])
389    main = Main(options)
390    main.run()
391