• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python2
2# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6
7"""Parses and displays the contents of one or more autoserv result directories.
8
9This script parses the contents of one or more autoserv results folders and
10generates test reports.
11"""
12
13import datetime
14import glob
15import logging
16import operator
17import optparse
18import os
19import re
20import sys
21
22import common
23from autotest_lib.utils import terminal
24
25
26_STDOUT_IS_TTY = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
27
28
29def Die(message_format, *args, **kwargs):
30    """Log a message and kill the current process.
31
32    @param message_format: string for logging.error.
33
34    """
35    logging.error(message_format, *args, **kwargs)
36    sys.exit(1)
37
38
39class CrashWaiver:
40    """Represents a crash that we want to ignore for now."""
41    def __init__(self, signals, deadline, url, person):
42        self.signals = signals
43        self.deadline = datetime.datetime.strptime(deadline, '%Y-%b-%d')
44        self.issue_url = url
45        self.suppressor = person
46
47# List of crashes which are okay to ignore. This list should almost always be
48# empty. If you add an entry, include the bug URL and your name, something like
49#     'crashy':CrashWaiver(
50#       ['sig 11'], '2011-Aug-18', 'http://crosbug/123456', 'developer'),
51
52_CRASH_WHITELIST = {
53}
54
55
56class ResultCollector(object):
57    """Collects status and performance data from an autoserv results dir."""
58
59    def __init__(self, collect_perf=True, collect_attr=False,
60                 collect_info=False, escape_error=False,
61                 whitelist_chrome_crashes=False):
62        """Initialize ResultsCollector class.
63
64        @param collect_perf: Should perf keyvals be collected?
65        @param collect_attr: Should attr keyvals be collected?
66        @param collect_info: Should info keyvals be collected?
67        @param escape_error: Escape error message text for tools.
68        @param whitelist_chrome_crashes: Treat Chrome crashes as non-fatal.
69
70        """
71        self._collect_perf = collect_perf
72        self._collect_attr = collect_attr
73        self._collect_info = collect_info
74        self._escape_error = escape_error
75        self._whitelist_chrome_crashes = whitelist_chrome_crashes
76
77    def _CollectPerf(self, testdir):
78        """Parses keyval file under testdir and return the perf keyval pairs.
79
80        @param testdir: autoserv test result directory path.
81
82        @return dict of perf keyval pairs.
83
84        """
85        if not self._collect_perf:
86            return {}
87        return self._CollectKeyval(testdir, 'perf')
88
89    def _CollectAttr(self, testdir):
90        """Parses keyval file under testdir and return the attr keyval pairs.
91
92        @param testdir: autoserv test result directory path.
93
94        @return dict of attr keyval pairs.
95
96        """
97        if not self._collect_attr:
98            return {}
99        return self._CollectKeyval(testdir, 'attr')
100
101    def _CollectKeyval(self, testdir, keyword):
102        """Parses keyval file under testdir.
103
104        If testdir contains a result folder, process the keyval file and return
105        a dictionary of perf keyval pairs.
106
107        @param testdir: The autoserv test result directory.
108        @param keyword: The keyword of keyval, either 'perf' or 'attr'.
109
110        @return If the perf option is disabled or the there's no keyval file
111                under testdir, returns an empty dictionary. Otherwise, returns
112                a dictionary of parsed keyvals. Duplicate keys are uniquified
113                by their instance number.
114
115        """
116        keyval = {}
117        keyval_file = os.path.join(testdir, 'results', 'keyval')
118        if not os.path.isfile(keyval_file):
119            return keyval
120
121        instances = {}
122
123        for line in open(keyval_file):
124            match = re.search(r'^(.+){%s}=(.+)$' % keyword, line)
125            if match:
126                key = match.group(1)
127                val = match.group(2)
128
129                # If the same key name was generated multiple times, uniquify
130                # all instances other than the first one by adding the instance
131                # count to the key name.
132                key_inst = key
133                instance = instances.get(key, 0)
134                if instance:
135                    key_inst = '%s{%d}' % (key, instance)
136                instances[key] = instance + 1
137
138                keyval[key_inst] = val
139
140        return keyval
141
142    def _CollectCrashes(self, status_raw):
143        """Parses status_raw file for crashes.
144
145        Saves crash details if crashes are discovered.  If a whitelist is
146        present, only records whitelisted crashes.
147
148        @param status_raw: The contents of the status.log or status file from
149                the test.
150
151        @return a list of crash entries to be reported.
152
153        """
154        crashes = []
155        regex = re.compile(
156                'Received crash notification for ([-\w]+).+ (sig \d+)')
157        chrome_regex = re.compile(r'^supplied_[cC]hrome|^chrome$')
158        for match in regex.finditer(status_raw):
159            w = _CRASH_WHITELIST.get(match.group(1))
160            if (self._whitelist_chrome_crashes and
161                    chrome_regex.match(match.group(1))):
162                print '@@@STEP_WARNINGS@@@'
163                print '%s crashed with %s' % (match.group(1), match.group(2))
164            elif (w is not None and match.group(2) in w.signals and
165                        w.deadline > datetime.datetime.now()):
166                print 'Ignoring crash in %s for waiver that expires %s' % (
167                        match.group(1), w.deadline.strftime('%Y-%b-%d'))
168            else:
169                crashes.append('%s %s' % match.groups())
170        return crashes
171
172    def _CollectInfo(self, testdir, custom_info):
173        """Parses *_info files under testdir/sysinfo/var/log.
174
175        If the sysinfo/var/log/*info files exist, save information that shows
176        hw, ec and bios version info.
177
178        This collection of extra info is disabled by default (this funtion is
179        a no-op).  It is enabled only if the --info command-line option is
180        explicitly supplied.  Normal job parsing does not supply this option.
181
182        @param testdir: The autoserv test result directory.
183        @param custom_info: Dictionary to collect detailed ec/bios info.
184
185        @return a dictionary of info that was discovered.
186
187        """
188        if not self._collect_info:
189            return {}
190        info = custom_info
191
192        sysinfo_dir = os.path.join(testdir, 'sysinfo', 'var', 'log')
193        for info_file, info_keys in {'ec_info.txt': ['fw_version'],
194                                     'bios_info.txt': ['fwid',
195                                                       'hwid']}.iteritems():
196            info_file_path = os.path.join(sysinfo_dir, info_file)
197            if not os.path.isfile(info_file_path):
198                continue
199            # Some example raw text that might be matched include:
200            #
201            # fw_version           | snow_v1.1.332-cf20b3e
202            # fwid = Google_Snow.2711.0.2012_08_06_1139 # Active firmware ID
203            # hwid = DAISY TEST A-A 9382                # Hardware ID
204            info_regex = re.compile(r'^(%s)\s*[|=]\s*(.*)' %
205                                    '|'.join(info_keys))
206            with open(info_file_path, 'r') as f:
207                for line in f:
208                    line = line.strip()
209                    line = line.split('#')[0]
210                    match = info_regex.match(line)
211                    if match:
212                        info[match.group(1)] = str(match.group(2)).strip()
213        return info
214
215    def _CollectEndTimes(self, status_raw, status_re='', is_end=True):
216        """Helper to match and collect timestamp and localtime.
217
218        Preferred to locate timestamp and localtime with an
219        'END GOOD test_name...' line.  However, aborted tests occasionally fail
220        to produce this line and then need to scrape timestamps from the 'START
221        test_name...' line.
222
223        @param status_raw: multi-line text to search.
224        @param status_re: status regex to seek (e.g. GOOD|FAIL)
225        @param is_end: if True, search for 'END' otherwise 'START'.
226
227        @return Tuple of timestamp, localtime retrieved from the test status
228                log.
229
230        """
231        timestamp = ''
232        localtime = ''
233
234        localtime_re = r'\w+\s+\w+\s+[:\w]+'
235        match_filter = (
236                r'^\s*%s\s+(?:%s).*timestamp=(\d*).*localtime=(%s).*$' % (
237                'END' if is_end else 'START', status_re, localtime_re))
238        matches = re.findall(match_filter, status_raw, re.MULTILINE)
239        if matches:
240            # There may be multiple lines with timestamp/localtime info.
241            # The last one found is selected because it will reflect the end
242            # time.
243            for i in xrange(len(matches)):
244                timestamp_, localtime_ = matches[-(i+1)]
245                if not timestamp or timestamp_ > timestamp:
246                    timestamp = timestamp_
247                    localtime = localtime_
248        return timestamp, localtime
249
250    def _CheckExperimental(self, testdir):
251        """Parses keyval file and return the value of `experimental`.
252
253        @param testdir: The result directory that has the keyval file.
254
255        @return The value of 'experimental', which is a boolean value indicating
256                whether it is an experimental test or not.
257
258        """
259        keyval_file = os.path.join(testdir, 'keyval')
260        if not os.path.isfile(keyval_file):
261            return False
262
263        with open(keyval_file) as f:
264            for line in f:
265                match = re.match(r'experimental=(.+)', line)
266                if match:
267                    return match.group(1) == 'True'
268            else:
269                return False
270
271
272    def _CollectResult(self, testdir, results, is_experimental=False):
273        """Collects results stored under testdir into a dictionary.
274
275        The presence/location of status files (status.log, status and
276        job_report.html) varies depending on whether the job is a simple
277        client test, simple server test, old-style suite or new-style
278        suite.  For example:
279        -In some cases a single job_report.html may exist but many times
280         multiple instances are produced in a result tree.
281        -Most tests will produce a status.log but client tests invoked
282         by a server test will only emit a status file.
283
284        The two common criteria that seem to define the presence of a
285        valid test result are:
286        1. Existence of a 'status.log' or 'status' file. Note that if both a
287             'status.log' and 'status' file exist for a test, the 'status' file
288             is always a subset of the 'status.log' fle contents.
289        2. Presence of a 'debug' directory.
290
291        In some cases multiple 'status.log' files will exist where the parent
292        'status.log' contains the contents of multiple subdirectory 'status.log'
293        files.  Parent and subdirectory 'status.log' files are always expected
294        to agree on the outcome of a given test.
295
296        The test results discovered from the 'status*' files are included
297        in the result dictionary.  The test directory name and a test directory
298        timestamp/localtime are saved to be used as sort keys for the results.
299
300        The value of 'is_experimental' is included in the result dictionary.
301
302        @param testdir: The autoserv test result directory.
303        @param results: A list to which a populated test-result-dictionary will
304                be appended if a status file is found.
305        @param is_experimental: A boolean value indicating whether the result
306                directory is for an experimental test.
307
308        """
309        status_file = os.path.join(testdir, 'status.log')
310        if not os.path.isfile(status_file):
311            status_file = os.path.join(testdir, 'status')
312            if not os.path.isfile(status_file):
313                return
314
315        # Status is True if GOOD, else False for all others.
316        status = False
317        error_msg = ''
318        status_raw = open(status_file, 'r').read()
319        failure_tags = 'ABORT|ERROR|FAIL'
320        warning_tag = 'WARN|TEST_NA'
321        failure = re.search(r'%s' % failure_tags, status_raw)
322        warning = re.search(r'%s' % warning_tag, status_raw) and not failure
323        good = (re.search(r'GOOD.+completed successfully', status_raw) and
324                             not (failure or warning))
325
326        # We'd like warnings to allow the tests to pass, but still gather info.
327        if good or warning:
328            status = True
329
330        if not good:
331            match = re.search(r'^\t+(%s|%s)\t(.+)' % (failure_tags,
332                                                      warning_tag),
333                              status_raw, re.MULTILINE)
334            if match:
335                failure_type = match.group(1)
336                reason = match.group(2).split('\t')[4]
337                if self._escape_error:
338                    reason = re.escape(reason)
339                error_msg = ': '.join([failure_type, reason])
340
341        # Grab the timestamp - can be used for sorting the test runs.
342        # Grab the localtime - may be printed to enable line filtering by date.
343        # Designed to match a line like this:
344        #   END GOOD testname ... timestamp=1347324321 localtime=Sep 10 17:45:21
345        status_re = r'GOOD|%s|%s' % (failure_tags, warning_tag)
346        timestamp, localtime = self._CollectEndTimes(status_raw, status_re)
347        # Hung tests will occasionally skip printing the END line so grab
348        # a default timestamp from the START line in those cases.
349        if not timestamp:
350            timestamp, localtime = self._CollectEndTimes(status_raw,
351                                                         is_end=False)
352
353        results.append({
354                'testdir': testdir,
355                'crashes': self._CollectCrashes(status_raw),
356                'status': status,
357                'error_msg': error_msg,
358                'localtime': localtime,
359                'timestamp': timestamp,
360                'perf': self._CollectPerf(testdir),
361                'attr': self._CollectAttr(testdir),
362                'info': self._CollectInfo(testdir, {'localtime': localtime,
363                                                    'timestamp': timestamp}),
364                'experimental': is_experimental})
365
366    def RecursivelyCollectResults(self, resdir, parent_experimental_tag=False):
367        """Recursively collect results into a list of dictionaries.
368
369        Only recurses into directories that possess a 'debug' subdirectory
370        because anything else is not considered a 'test' directory.
371
372        The value of 'experimental' in keyval file is used to determine whether
373        the result is for an experimental test. If it is, all its sub
374        directories are considered to be experimental tests too.
375
376        @param resdir: results/test directory to parse results from and recurse
377                into.
378        @param parent_experimental_tag: A boolean value, used to keep track of
379                whether its parent directory is for an experimental test.
380
381        @return List of dictionaries of results.
382
383        """
384        results = []
385        is_experimental = (parent_experimental_tag or
386                           self._CheckExperimental(resdir))
387        self._CollectResult(resdir, results, is_experimental)
388        for testdir in glob.glob(os.path.join(resdir, '*')):
389            # Remove false positives that are missing a debug dir.
390            if not os.path.exists(os.path.join(testdir, 'debug')):
391                continue
392
393            results.extend(self.RecursivelyCollectResults(
394                    testdir, is_experimental))
395        return results
396
397
398class ReportGenerator(object):
399    """Collects and displays data from autoserv results directories.
400
401    This class collects status and performance data from one or more autoserv
402    result directories and generates test reports.
403    """
404
405    _KEYVAL_INDENT = 2
406    _STATUS_STRINGS = {'hr': {'pass': '[  PASSED  ]', 'fail': '[  FAILED  ]'},
407                       'csv': {'pass': 'PASS', 'fail': 'FAIL'}}
408
409    def __init__(self, options, args):
410        self._options = options
411        self._args = args
412        self._color = terminal.Color(options.color)
413        self._results = []
414
415    def _CollectAllResults(self):
416        """Parses results into the self._results list.
417
418        Builds a list (self._results) where each entry is a dictionary of
419        result data from one test (which may contain other tests). Each
420        dictionary will contain values such as: test folder, status, localtime,
421        crashes, error_msg, perf keyvals [optional], info [optional].
422
423        """
424        collector = ResultCollector(
425                collect_perf=self._options.perf,
426                collect_attr=self._options.attr,
427                collect_info=self._options.info,
428                escape_error=self._options.escape_error,
429                whitelist_chrome_crashes=self._options.whitelist_chrome_crashes)
430
431        for resdir in self._args:
432            if not os.path.isdir(resdir):
433                Die('%r does not exist', resdir)
434            self._results.extend(collector.RecursivelyCollectResults(resdir))
435
436        if not self._results:
437            Die('no test directories found')
438
439    def _GenStatusString(self, status):
440        """Given a bool indicating success or failure, return the right string.
441
442        Also takes --csv into account, returns old-style strings if it is set.
443
444        @param status: True or False, indicating success or failure.
445
446        @return The appropriate string for printing..
447
448        """
449        success = 'pass' if status else 'fail'
450        if self._options.csv:
451            return self._STATUS_STRINGS['csv'][success]
452        return self._STATUS_STRINGS['hr'][success]
453
454    def _Indent(self, msg):
455        """Given a message, indents it appropriately.
456
457        @param msg: string to indent.
458        @return indented version of msg.
459
460        """
461        return ' ' * self._KEYVAL_INDENT + msg
462
463    def _GetTestColumnWidth(self):
464        """Returns the test column width based on the test data.
465
466        The test results are aligned by discovering the longest width test
467        directory name or perf key stored in the list of result dictionaries.
468
469        @return The width for the test column.
470
471        """
472        width = 0
473        for result in self._results:
474            width = max(width, len(result['testdir']))
475            perf = result.get('perf')
476            if perf:
477                perf_key_width = len(max(perf, key=len))
478                width = max(width, perf_key_width + self._KEYVAL_INDENT)
479        return width
480
481    def _PrintDashLine(self, width):
482        """Prints a line of dashes as a separator in output.
483
484        @param width: an integer.
485        """
486        if not self._options.csv:
487            print ''.ljust(width + len(self._STATUS_STRINGS['hr']['pass']), '-')
488
489    def _PrintEntries(self, entries):
490        """Prints a list of strings, delimited based on --csv flag.
491
492        @param entries: a list of strings, entities to output.
493
494        """
495        delimiter = ',' if self._options.csv else ' '
496        print delimiter.join(entries)
497
498    def _PrintErrors(self, test, error_msg):
499        """Prints an indented error message, unless the --csv flag is set.
500
501        @param test: the name of a test with which to prefix the line.
502        @param error_msg: a message to print.  None is allowed, but ignored.
503
504        """
505        if not self._options.csv and error_msg:
506            self._PrintEntries([test, self._Indent(error_msg)])
507
508    def _PrintErrorLogs(self, test, test_string):
509        """Prints the error log for |test| if --debug is set.
510
511        @param test: the name of a test suitable for embedding in a path
512        @param test_string: the name of a test with which to prefix the line.
513
514        """
515        if self._options.print_debug:
516            debug_file_regex = os.path.join(
517                    'results.', test, 'debug',
518                    '%s*.ERROR' % os.path.basename(test))
519            for path in glob.glob(debug_file_regex):
520                try:
521                    with open(path) as fh:
522                        for line in fh:
523                            # Ensure line is not just WS.
524                            if len(line.lstrip()) <=  0:
525                                continue
526                            self._PrintEntries(
527                                    [test_string, self._Indent(line.rstrip())])
528                except IOError:
529                    print 'Could not open %s' % path
530
531    def _PrintResultDictKeyVals(self, test_entry, result_dict):
532        """Formatted print a dict of keyvals like 'perf' or 'info'.
533
534        This function emits each keyval on a single line for uncompressed
535        review.  The 'perf' dictionary contains performance keyvals while the
536        'info' dictionary contains ec info, bios info and some test timestamps.
537
538        @param test_entry: The unique name of the test (dir) - matches other
539                test output.
540        @param result_dict: A dict of keyvals to be presented.
541
542        """
543        if not result_dict:
544            return
545        dict_keys = result_dict.keys()
546        dict_keys.sort()
547        width = self._GetTestColumnWidth()
548        for dict_key in dict_keys:
549            if self._options.csv:
550                key_entry = dict_key
551            else:
552                key_entry = dict_key.ljust(width - self._KEYVAL_INDENT)
553                key_entry = key_entry.rjust(width)
554            value_entry = self._color.Color(
555                    self._color.BOLD, result_dict[dict_key])
556            self._PrintEntries([test_entry, key_entry, value_entry])
557
558    def _GetSortedTests(self):
559        """Sort the test result dicts in preparation for results printing.
560
561        By default sorts the results directionaries by their test names.
562        However, when running long suites, it is useful to see if an early test
563        has wedged the system and caused the remaining tests to abort/fail. The
564        datetime-based chronological sorting allows this view.
565
566        Uses the --sort-chron command line option to control.
567
568        """
569        if self._options.sort_chron:
570            # Need to reverse sort the test dirs to ensure the suite folder
571            # shows at the bottom. Because the suite folder shares its datetime
572            # with the last test it shows second-to-last without the reverse
573            # sort first.
574            tests = sorted(self._results, key=operator.itemgetter('testdir'),
575                           reverse=True)
576            tests = sorted(tests, key=operator.itemgetter('timestamp'))
577        else:
578            tests = sorted(self._results, key=operator.itemgetter('testdir'))
579        return tests
580
581    # TODO(zamorzaev): reuse this method in _GetResultsForHTMLReport to avoid
582    # code copying.
583    def _GetDedupedResults(self):
584        """Aggregate results from multiple retries of the same test."""
585        deduped_results = {}
586        for test in self._GetSortedTests():
587            test_details_matched = re.search(r'(.*)results-(\d[0-9]*)-(.*)',
588                                             test['testdir'])
589            if not test_details_matched:
590                continue
591
592            log_dir, test_number, test_name = test_details_matched.groups()
593            if (test_name in deduped_results and
594                deduped_results[test_name].get('status')):
595                # Already have a successfull (re)try.
596                continue
597
598            deduped_results[test_name] = test
599        return deduped_results.values()
600
601    def _GetResultsForHTMLReport(self):
602        """Return cleaned results for HTML report.!"""
603        import copy
604        tests = copy.deepcopy(self._GetSortedTests())
605        pass_tag = "Pass"
606        fail_tag = "Fail"
607        na_tag = "NA"
608        count = 0
609        html_results = {}
610        for test_status in tests:
611            individual_tc_results = {}
612            test_details_matched = re.search(r'(.*)results-(\d[0-9]*)-(.*)',
613                                             test_status['testdir'])
614            if not test_details_matched:
615                continue
616            log_dir = test_details_matched.group(1)
617            test_number = test_details_matched.group(2)
618            test_name = test_details_matched.group(3)
619            if '/' in test_name:
620                test_name = test_name.split('/')[0]
621            if test_status['error_msg'] is None:
622                test_status['error_msg'] = ''
623            if not html_results.has_key(test_name):
624                count = count + 1
625                # Arranging the results in an order
626                individual_tc_results['status'] = test_status['status']
627                individual_tc_results['error_msg'] = test_status['error_msg']
628                individual_tc_results['s_no'] = count
629                individual_tc_results['crashes'] = test_status['crashes']
630
631                # Add <b> and </b> tag for the good format in the report.
632                individual_tc_results['attempts'] = \
633                    '<b>test_result_number: %s - %s</b> : %s' % (
634                        test_number, log_dir, test_status['error_msg'])
635                html_results[test_name] = individual_tc_results
636            else:
637
638                # If test found already then we are using the previous data
639                # instead of creating two different html rows. If existing
640                # status is False then needs to be updated
641                if html_results[test_name]['status'] is False:
642                    html_results[test_name]['status'] = test_status['status']
643                    html_results[test_name]['error_msg'] = test_status[
644                        'error_msg']
645                    html_results[test_name]['crashes'] = \
646                        html_results[test_name]['crashes'] + test_status[
647                            'crashes']
648                    html_results[test_name]['attempts'] = \
649                        html_results[test_name]['attempts'] + \
650                        '</br><b>test_result_number : %s - %s</b> : %s' % (
651                            test_number, log_dir, test_status['error_msg'])
652
653        # Re-formating the dictionary as s_no as key. So that we can have
654        # ordered data at the end
655        sorted_html_results = {}
656        for key in html_results.keys():
657            sorted_html_results[str(html_results[key]['s_no'])] = \
658                    html_results[key]
659            sorted_html_results[str(html_results[key]['s_no'])]['test'] = key
660
661        # Mapping the Test case status if True->Pass, False->Fail and if
662        # True and the error message then NA
663        for key in sorted_html_results.keys():
664            if sorted_html_results[key]['status']:
665                if sorted_html_results[key]['error_msg'] != '':
666                    sorted_html_results[key]['status'] = na_tag
667                else:
668                    sorted_html_results[key]['status'] = pass_tag
669            else:
670                sorted_html_results[key]['status'] = fail_tag
671
672        return sorted_html_results
673
674    def GenerateReportHTML(self):
675        """Generate clean HTMl report for the results."""
676
677        results = self._GetResultsForHTMLReport()
678        html_table_header = """ <th>S.No</th>
679                                <th>Test</th>
680                                <th>Status</th>
681                                <th>Error Message</th>
682                                <th>Crashes</th>
683                                <th>Attempts</th>
684                            """
685        passed_tests = len([key for key in results.keys() if results[key][
686                'status'].lower() == 'pass'])
687        failed_tests = len([key for key in results.keys() if results[key][
688            'status'].lower() == 'fail'])
689        na_tests = len([key for key in results.keys() if results[key][
690            'status'].lower() == 'na'])
691        total_tests = passed_tests + failed_tests + na_tests
692
693        # Sort the keys
694        ordered_keys = sorted([int(key) for key in results.keys()])
695        html_table_body = ''
696        for key in ordered_keys:
697            key = str(key)
698            if results[key]['status'].lower() == 'pass':
699                color = 'LimeGreen'
700            elif results[key]['status'].lower() == 'na':
701                color = 'yellow'
702            else:
703                color = 'red'
704            html_table_body = html_table_body + """<tr>
705                                                    <td>%s</td>
706                                                    <td>%s</td>
707                                                    <td
708                                                    style="background-color:%s;">
709                                                    %s</td>
710                                                    <td>%s</td>
711                                                    <td>%s</td>
712                                                    <td>%s</td></tr>""" % \
713                                                (key, results[key]['test'],
714                                                 color,
715                                                 results[key]['status'],
716                                                 results[key]['error_msg'],
717                                                 results[key]['crashes'],
718                                                 results[key]['attempts'])
719        html_page = """
720                        <!DOCTYPE html>
721                        <html lang="en">
722                        <head>
723                            <title>Automation Results</title>
724                            <meta charset="utf-8">
725                            <meta name="viewport" content="width=device-width,initial-scale=1">
726                            <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
727                            <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
728                            <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
729                        </head>
730                        <body>
731                            <div class="container">
732                                <h2>Automation Report</h2>
733                                <table class="table table-bordered" border="1">
734                                    <thead>
735                                        <tr style="background-color:LightSkyBlue;">
736                                        \n%s
737                                        </tr>
738                                    </thead>
739                                    <tbody>
740                                    \n%s
741                                    </tbody>
742                                </table>
743                                <div class="row">
744                                    <div class="col-sm-4">Passed: <b>%d</b></div>
745                                    <div class="col-sm-4">Failed: <b>%d</b></div>
746                                    <div class="col-sm-4">NA: <b>%d</b></div>
747                                </div>
748                                <div class="row">
749                                    <div class="col-sm-4">Total: <b>%d</b></div>
750                                </div>
751                            </div>
752                        </body>
753                        </html>
754
755                """ % (html_table_header, html_table_body, passed_tests,
756                       failed_tests, na_tests, total_tests)
757        with open(os.path.join(self._options.html_report_dir,
758                               "test_report.html"), 'w') as html_file:
759            html_file.write(html_page)
760
761    def _GenerateReportText(self):
762        """Prints a result report to stdout.
763
764        Prints a result table to stdout. Each row of the table contains the
765        test result directory and the test result (PASS, FAIL). If the perf
766        option is enabled, each test entry is followed by perf keyval entries
767        from the test results.
768
769        """
770        tests = self._GetSortedTests()
771        width = self._GetTestColumnWidth()
772
773        crashes = {}
774        tests_pass = 0
775        self._PrintDashLine(width)
776
777        for result in tests:
778            testdir = result['testdir']
779            test_entry = testdir if self._options.csv else testdir.ljust(width)
780
781            status_entry = self._GenStatusString(result['status'])
782            if result['status']:
783                color = self._color.GREEN
784                # Change the color of 'PASSED' if the test run wasn't completely
785                # ok, so it's more obvious it isn't a pure pass.
786                if 'WARN' in result['error_msg']:
787                    color = self._color.YELLOW
788                elif 'TEST_NA' in result['error_msg']:
789                    color = self._color.MAGENTA
790                tests_pass += 1
791            else:
792                color = self._color.RED
793
794            test_entries = [test_entry, self._color.Color(color, status_entry)]
795
796            info = result.get('info', {})
797            info.update(result.get('attr', {}))
798            if self._options.csv and (self._options.info or self._options.attr):
799                if info:
800                    test_entries.extend(['%s=%s' % (k, info[k])
801                                        for k in sorted(info.keys())])
802                if not result['status'] and result['error_msg']:
803                    test_entries.append('reason="%s"' % result['error_msg'])
804
805            self._PrintEntries(test_entries)
806            self._PrintErrors(test_entry, result['error_msg'])
807
808            # Print out error log for failed tests.
809            if not result['status']:
810                self._PrintErrorLogs(testdir, test_entry)
811
812            # Emit the perf keyvals entries. There will be no entries if the
813            # --no-perf option is specified.
814            self._PrintResultDictKeyVals(test_entry, result['perf'])
815
816            # Determine that there was a crash during this test.
817            if result['crashes']:
818                for crash in result['crashes']:
819                    if not crash in crashes:
820                        crashes[crash] = set([])
821                    crashes[crash].add(testdir)
822
823            # Emit extra test metadata info on separate lines if not --csv.
824            if not self._options.csv:
825                self._PrintResultDictKeyVals(test_entry, info)
826
827        self._PrintDashLine(width)
828
829        if not self._options.csv:
830            total_tests = len(tests)
831            percent_pass = 100 * tests_pass / total_tests
832            pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass)
833            print 'Total PASS: ' + self._color.Color(self._color.BOLD, pass_str)
834
835        if self._options.crash_detection:
836            print ''
837            if crashes:
838                print self._color.Color(self._color.RED,
839                                        'Crashes detected during testing:')
840                self._PrintDashLine(width)
841
842                for crash_name, crashed_tests in sorted(crashes.iteritems()):
843                    print self._color.Color(self._color.RED, crash_name)
844                    for crashed_test in crashed_tests:
845                        print self._Indent(crashed_test)
846
847                self._PrintDashLine(width)
848                print ('Total unique crashes: ' +
849                       self._color.Color(self._color.BOLD, str(len(crashes))))
850
851            # Sometimes the builders exit before these buffers are flushed.
852            sys.stderr.flush()
853            sys.stdout.flush()
854
855    def Run(self):
856        """Runs report generation."""
857        self._CollectAllResults()
858        if not self._options.just_status_code:
859            self._GenerateReportText()
860            if self._options.html:
861                print "\nLogging the data into test_report.html file."
862                try:
863                    self.GenerateReportHTML()
864                except Exception as e:
865                    print "Failed to generate HTML report %s" % str(e)
866        for d in self._GetDedupedResults():
867            if d['experimental'] and self._options.ignore_experimental_tests:
868                continue
869            if not d['status'] or (
870                    self._options.crash_detection and d['crashes']):
871                sys.exit(1)
872
873
874def main():
875    usage = 'Usage: %prog [options] result-directories...'
876    parser = optparse.OptionParser(usage=usage)
877    parser.add_option('--color', dest='color', action='store_true',
878                      default=_STDOUT_IS_TTY,
879                      help='Use color for text reports [default if TTY stdout]')
880    parser.add_option('--no-color', dest='color', action='store_false',
881                      help='Don\'t use color for text reports')
882    parser.add_option('--no-crash-detection', dest='crash_detection',
883                      action='store_false', default=True,
884                      help='Don\'t report crashes or error out when detected')
885    parser.add_option('--csv', dest='csv', action='store_true',
886                      help='Output test result in CSV format.  '
887                      'Implies --no-debug --no-crash-detection.')
888    parser.add_option('--html', dest='html', action='store_true',
889                      help='To generate HTML File.  '
890                           'Implies --no-debug --no-crash-detection.')
891    parser.add_option('--html-report-dir', dest='html_report_dir',
892                      action='store', default=None, help='Path to generate '
893                                                          'html report')
894    parser.add_option('--info', dest='info', action='store_true',
895                      default=False,
896                      help='Include info keyvals in the report')
897    parser.add_option('--escape-error', dest='escape_error',
898                      action='store_true', default=False,
899                      help='Escape error message text for tools.')
900    parser.add_option('--perf', dest='perf', action='store_true',
901                      default=True,
902                      help='Include perf keyvals in the report [default]')
903    parser.add_option('--attr', dest='attr', action='store_true',
904                      default=False,
905                      help='Include attr keyvals in the report')
906    parser.add_option('--no-perf', dest='perf', action='store_false',
907                      help='Don\'t include perf keyvals in the report')
908    parser.add_option('--sort-chron', dest='sort_chron', action='store_true',
909                      default=False,
910                      help='Sort results by datetime instead of by test name.')
911    parser.add_option('--no-debug', dest='print_debug', action='store_false',
912                      default=True,
913                      help='Don\'t print out logs when tests fail.')
914    parser.add_option('--whitelist_chrome_crashes',
915                      dest='whitelist_chrome_crashes',
916                      action='store_true', default=False,
917                      help='Treat Chrome crashes as non-fatal.')
918    parser.add_option('--ignore_experimental_tests',
919                      dest='ignore_experimental_tests',
920                      action='store_true', default=False,
921                      help='If set, experimental test results will not '
922                           'influence the exit code.')
923    parser.add_option('--just_status_code',
924                      dest='just_status_code',
925                      action='store_true', default=False,
926                      help='Skip generating a report, just return status code.')
927
928    (options, args) = parser.parse_args()
929
930    if not args:
931        parser.print_help()
932        Die('no result directories provided')
933
934    if options.csv and (options.print_debug or options.crash_detection):
935        Warning('Forcing --no-debug --no-crash-detection')
936        options.print_debug = False
937        options.crash_detection = False
938
939    report_options = ['color', 'csv', 'info', 'escape_error', 'perf', 'attr',
940                      'sort_chron', 'print_debug', 'html', 'html_report_dir']
941    if options.just_status_code and any(
942        getattr(options, opt) for opt in report_options):
943        Warning('Passed --just_status_code and incompatible options %s' %
944                ' '.join(opt for opt in report_options if getattr(options,opt)))
945
946    generator = ReportGenerator(options, args)
947    generator.Run()
948
949
950if __name__ == '__main__':
951    main()
952