• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# -*- coding: utf-8 -*-
2# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3# See https://llvm.org/LICENSE.txt for license information.
4# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5""" This module implements the 'scan-build' command API.
6
7To run the static analyzer against a build is done in multiple steps:
8
9 -- Intercept: capture the compilation command during the build,
10 -- Analyze:   run the analyzer against the captured commands,
11 -- Report:    create a cover report from the analyzer outputs.  """
12
13import re
14import os
15import os.path
16import json
17import logging
18import multiprocessing
19import tempfile
20import functools
21import subprocess
22import contextlib
23import datetime
24import shutil
25import glob
26from collections import defaultdict
27
28from libscanbuild import command_entry_point, compiler_wrapper, \
29    wrapper_environment, run_build, run_command, CtuConfig
30from libscanbuild.arguments import parse_args_for_scan_build, \
31    parse_args_for_analyze_build
32from libscanbuild.intercept import capture
33from libscanbuild.report import document
34from libscanbuild.compilation import split_command, classify_source, \
35    compiler_language
36from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
37    ClangErrorException
38from libscanbuild.shell import decode
39
40__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
41
42COMPILER_WRAPPER_CC = 'analyze-cc'
43COMPILER_WRAPPER_CXX = 'analyze-c++'
44
45CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
46CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
47
48
49@command_entry_point
50def scan_build():
51    """ Entry point for scan-build command. """
52
53    args = parse_args_for_scan_build()
54    # will re-assign the report directory as new output
55    with report_directory(args.output, args.keep_empty) as args.output:
56        # Run against a build command. there are cases, when analyzer run
57        # is not required. But we need to set up everything for the
58        # wrappers, because 'configure' needs to capture the CC/CXX values
59        # for the Makefile.
60        if args.intercept_first:
61            # Run build command with intercept module.
62            exit_code = capture(args)
63            # Run the analyzer against the captured commands.
64            if need_analyzer(args.build):
65                govern_analyzer_runs(args)
66        else:
67            # Run build command and analyzer with compiler wrappers.
68            environment = setup_environment(args)
69            exit_code = run_build(args.build, env=environment)
70        # Cover report generation and bug counting.
71        number_of_bugs = document(args)
72        # Set exit status as it was requested.
73        return number_of_bugs if args.status_bugs else exit_code
74
75
76@command_entry_point
77def analyze_build():
78    """ Entry point for analyze-build command. """
79
80    args = parse_args_for_analyze_build()
81    # will re-assign the report directory as new output
82    with report_directory(args.output, args.keep_empty) as args.output:
83        # Run the analyzer against a compilation db.
84        govern_analyzer_runs(args)
85        # Cover report generation and bug counting.
86        number_of_bugs = document(args)
87        # Set exit status as it was requested.
88        return number_of_bugs if args.status_bugs else 0
89
90
91def need_analyzer(args):
92    """ Check the intent of the build command.
93
94    When static analyzer run against project configure step, it should be
95    silent and no need to run the analyzer or generate report.
96
97    To run `scan-build` against the configure step might be necessary,
98    when compiler wrappers are used. That's the moment when build setup
99    check the compiler and capture the location for the build process. """
100
101    return len(args) and not re.search(r'configure|autogen', args[0])
102
103
104def prefix_with(constant, pieces):
105    """ From a sequence create another sequence where every second element
106    is from the original sequence and the odd elements are the prefix.
107
108    eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
109
110    return [elem for piece in pieces for elem in [constant, piece]]
111
112
113def get_ctu_config_from_args(args):
114    """ CTU configuration is created from the chosen phases and dir. """
115
116    return (
117        CtuConfig(collect=args.ctu_phases.collect,
118                  analyze=args.ctu_phases.analyze,
119                  dir=args.ctu_dir,
120                  extdef_map_cmd=args.extdef_map_cmd)
121        if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
122        else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
123
124
125def get_ctu_config_from_json(ctu_conf_json):
126    """ CTU configuration is created from the chosen phases and dir. """
127
128    ctu_config = json.loads(ctu_conf_json)
129    # Recover namedtuple from json when coming from analyze-cc or analyze-c++
130    return CtuConfig(collect=ctu_config[0],
131                     analyze=ctu_config[1],
132                     dir=ctu_config[2],
133                     extdef_map_cmd=ctu_config[3])
134
135
136def create_global_ctu_extdef_map(extdef_map_lines):
137    """ Takes iterator of individual external definition maps and creates a
138    global map keeping only unique names. We leave conflicting names out of
139    CTU.
140
141    :param extdef_map_lines: Contains the id of a definition (mangled name) and
142    the originating source (the corresponding AST file) name.
143    :type extdef_map_lines: Iterator of str.
144    :returns: Mangled name - AST file pairs.
145    :rtype: List of (str, str) tuples.
146    """
147
148    mangled_to_asts = defaultdict(set)
149
150    for line in extdef_map_lines:
151        mangled_name, ast_file = line.strip().split(' ', 1)
152        mangled_to_asts[mangled_name].add(ast_file)
153
154    mangled_ast_pairs = []
155
156    for mangled_name, ast_files in mangled_to_asts.items():
157        if len(ast_files) == 1:
158            mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
159
160    return mangled_ast_pairs
161
162
163def merge_ctu_extdef_maps(ctudir):
164    """ Merge individual external definition maps into a global one.
165
166    As the collect phase runs parallel on multiple threads, all compilation
167    units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
168    These definition maps contain the mangled names and the source
169    (AST generated from the source) which had their definition.
170    These files should be merged at the end into a global map file:
171    CTU_EXTDEF_MAP_FILENAME."""
172
173    def generate_extdef_map_lines(extdefmap_dir):
174        """ Iterate over all lines of input files in a determined order. """
175
176        files = glob.glob(os.path.join(extdefmap_dir, '*'))
177        files.sort()
178        for filename in files:
179            with open(filename, 'r') as in_file:
180                for line in in_file:
181                    yield line
182
183    def write_global_map(arch, mangled_ast_pairs):
184        """ Write (mangled name, ast file) pairs into final file. """
185
186        extern_defs_map_file = os.path.join(ctudir, arch,
187                                           CTU_EXTDEF_MAP_FILENAME)
188        with open(extern_defs_map_file, 'w') as out_file:
189            for mangled_name, ast_file in mangled_ast_pairs:
190                out_file.write('%s %s\n' % (mangled_name, ast_file))
191
192    triple_arches = glob.glob(os.path.join(ctudir, '*'))
193    for triple_path in triple_arches:
194        if os.path.isdir(triple_path):
195            triple_arch = os.path.basename(triple_path)
196            extdefmap_dir = os.path.join(ctudir, triple_arch,
197                                     CTU_TEMP_DEFMAP_FOLDER)
198
199            extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
200            mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
201            write_global_map(triple_arch, mangled_ast_pairs)
202
203            # Remove all temporary files
204            shutil.rmtree(extdefmap_dir, ignore_errors=True)
205
206
207def run_analyzer_parallel(args):
208    """ Runs the analyzer against the given compilation database. """
209
210    def exclude(filename, directory):
211        """ Return true when any excluded directory prefix the filename. """
212        if not os.path.isabs(filename):
213            # filename is either absolute or relative to directory. Need to turn
214            # it to absolute since 'args.excludes' are absolute paths.
215            filename = os.path.normpath(os.path.join(directory, filename))
216        return any(re.match(r'^' + exclude_directory, filename)
217                   for exclude_directory in args.excludes)
218
219    consts = {
220        'clang': args.clang,
221        'output_dir': args.output,
222        'output_format': args.output_format,
223        'output_failures': args.output_failures,
224        'direct_args': analyzer_params(args),
225        'force_debug': args.force_debug,
226        'ctu': get_ctu_config_from_args(args)
227    }
228
229    logging.debug('run analyzer against compilation database')
230    with open(args.cdb, 'r') as handle:
231        generator = (dict(cmd, **consts)
232                     for cmd in json.load(handle) if not exclude(
233                            cmd['file'], cmd['directory']))
234        # when verbose output requested execute sequentially
235        pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
236        for current in pool.imap_unordered(run, generator):
237            if current is not None:
238                # display error message from the static analyzer
239                for line in current['error_output']:
240                    logging.info(line.rstrip())
241        pool.close()
242        pool.join()
243
244
245def govern_analyzer_runs(args):
246    """ Governs multiple runs in CTU mode or runs once in normal mode. """
247
248    ctu_config = get_ctu_config_from_args(args)
249    # If we do a CTU collect (1st phase) we remove all previous collection
250    # data first.
251    if ctu_config.collect:
252        shutil.rmtree(ctu_config.dir, ignore_errors=True)
253
254    # If the user asked for a collect (1st) and analyze (2nd) phase, we do an
255    # all-in-one run where we deliberately remove collection data before and
256    # also after the run. If the user asks only for a single phase data is
257    # left so multiple analyze runs can use the same data gathered by a single
258    # collection run.
259    if ctu_config.collect and ctu_config.analyze:
260        # CTU strings are coming from args.ctu_dir and extdef_map_cmd,
261        # so we can leave it empty
262        args.ctu_phases = CtuConfig(collect=True, analyze=False,
263                                    dir='', extdef_map_cmd='')
264        run_analyzer_parallel(args)
265        merge_ctu_extdef_maps(ctu_config.dir)
266        args.ctu_phases = CtuConfig(collect=False, analyze=True,
267                                    dir='', extdef_map_cmd='')
268        run_analyzer_parallel(args)
269        shutil.rmtree(ctu_config.dir, ignore_errors=True)
270    else:
271        # Single runs (collect or analyze) are launched from here.
272        run_analyzer_parallel(args)
273        if ctu_config.collect:
274            merge_ctu_extdef_maps(ctu_config.dir)
275
276
277def setup_environment(args):
278    """ Set up environment for build command to interpose compiler wrapper. """
279
280    environment = dict(os.environ)
281    environment.update(wrapper_environment(args))
282    environment.update({
283        'CC': COMPILER_WRAPPER_CC,
284        'CXX': COMPILER_WRAPPER_CXX,
285        'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
286        'ANALYZE_BUILD_REPORT_DIR': args.output,
287        'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
288        'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
289        'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
290        'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
291        'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
292    })
293    return environment
294
295
296@command_entry_point
297def analyze_compiler_wrapper():
298    """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
299
300    return compiler_wrapper(analyze_compiler_wrapper_impl)
301
302
303def analyze_compiler_wrapper_impl(result, execution):
304    """ Implements analyzer compiler wrapper functionality. """
305
306    # don't run analyzer when compilation fails. or when it's not requested.
307    if result or not os.getenv('ANALYZE_BUILD_CLANG'):
308        return
309
310    # check is it a compilation?
311    compilation = split_command(execution.cmd)
312    if compilation is None:
313        return
314    # collect the needed parameters from environment, crash when missing
315    parameters = {
316        'clang': os.getenv('ANALYZE_BUILD_CLANG'),
317        'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
318        'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
319        'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
320        'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
321                                 '').split(' '),
322        'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
323        'directory': execution.cwd,
324        'command': [execution.cmd[0], '-c'] + compilation.flags,
325        'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
326    }
327    # call static analyzer against the compilation
328    for source in compilation.files:
329        parameters.update({'file': source})
330        logging.debug('analyzer parameters %s', parameters)
331        current = run(parameters)
332        # display error message from the static analyzer
333        if current is not None:
334            for line in current['error_output']:
335                logging.info(line.rstrip())
336
337
338@contextlib.contextmanager
339def report_directory(hint, keep):
340    """ Responsible for the report directory.
341
342    hint -- could specify the parent directory of the output directory.
343    keep -- a boolean value to keep or delete the empty report directory. """
344
345    stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
346    stamp = datetime.datetime.now().strftime(stamp_format)
347    parent_dir = os.path.abspath(hint)
348    if not os.path.exists(parent_dir):
349        os.makedirs(parent_dir)
350    name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
351
352    logging.info('Report directory created: %s', name)
353
354    try:
355        yield name
356    finally:
357        if os.listdir(name):
358            msg = "Run 'scan-view %s' to examine bug reports."
359            keep = True
360        else:
361            if keep:
362                msg = "Report directory '%s' contains no report, but kept."
363            else:
364                msg = "Removing directory '%s' because it contains no report."
365        logging.warning(msg, name)
366
367        if not keep:
368            os.rmdir(name)
369
370
371def analyzer_params(args):
372    """ A group of command line arguments can mapped to command
373    line arguments of the analyzer. This method generates those. """
374
375    result = []
376
377    if args.store_model:
378        result.append('-analyzer-store={0}'.format(args.store_model))
379    if args.constraints_model:
380        result.append('-analyzer-constraints={0}'.format(
381            args.constraints_model))
382    if args.internal_stats:
383        result.append('-analyzer-stats')
384    if args.analyze_headers:
385        result.append('-analyzer-opt-analyze-headers')
386    if args.stats:
387        result.append('-analyzer-checker=debug.Stats')
388    if args.maxloop:
389        result.extend(['-analyzer-max-loop', str(args.maxloop)])
390    if args.output_format:
391        result.append('-analyzer-output={0}'.format(args.output_format))
392    if args.analyzer_config:
393        result.extend(['-analyzer-config', args.analyzer_config])
394    if args.verbose >= 4:
395        result.append('-analyzer-display-progress')
396    if args.plugins:
397        result.extend(prefix_with('-load', args.plugins))
398    if args.enable_checker:
399        checkers = ','.join(args.enable_checker)
400        result.extend(['-analyzer-checker', checkers])
401    if args.disable_checker:
402        checkers = ','.join(args.disable_checker)
403        result.extend(['-analyzer-disable-checker', checkers])
404
405    return prefix_with('-Xclang', result)
406
407
408def require(required):
409    """ Decorator for checking the required values in state.
410
411    It checks the required attributes in the passed state and stop when
412    any of those is missing. """
413
414    def decorator(function):
415        @functools.wraps(function)
416        def wrapper(*args, **kwargs):
417            for key in required:
418                if key not in args[0]:
419                    raise KeyError('{0} not passed to {1}'.format(
420                        key, function.__name__))
421
422            return function(*args, **kwargs)
423
424        return wrapper
425
426    return decorator
427
428
429@require(['command',  # entry from compilation database
430          'directory',  # entry from compilation database
431          'file',  # entry from compilation database
432          'clang',  # clang executable name (and path)
433          'direct_args',  # arguments from command line
434          'force_debug',  # kill non debug macros
435          'output_dir',  # where generated report files shall go
436          'output_format',  # it's 'plist', 'html', both or plist-multi-file
437          'output_failures',  # generate crash reports or not
438          'ctu'])  # ctu control options
439def run(opts):
440    """ Entry point to run (or not) static analyzer against a single entry
441    of the compilation database.
442
443    This complex task is decomposed into smaller methods which are calling
444    each other in chain. If the analysis is not possible the given method
445    just return and break the chain.
446
447    The passed parameter is a python dictionary. Each method first check
448    that the needed parameters received. (This is done by the 'require'
449    decorator. It's like an 'assert' to check the contract between the
450    caller and the called method.) """
451
452    try:
453        command = opts.pop('command')
454        command = command if isinstance(command, list) else decode(command)
455        logging.debug("Run analyzer against '%s'", command)
456        opts.update(classify_parameters(command))
457
458        return arch_check(opts)
459    except Exception:
460        logging.error("Problem occurred during analysis.", exc_info=1)
461        return None
462
463
464@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
465          'error_output', 'exit_code'])
466def report_failure(opts):
467    """ Create report when analyzer failed.
468
469    The major report is the preprocessor output. The output filename generated
470    randomly. The compiler output also captured into '.stderr.txt' file.
471    And some more execution context also saved into '.info.txt' file. """
472
473    def extension():
474        """ Generate preprocessor file extension. """
475
476        mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
477        return mapping.get(opts['language'], '.i')
478
479    def destination():
480        """ Creates failures directory if not exits yet. """
481
482        failures_dir = os.path.join(opts['output_dir'], 'failures')
483        if not os.path.isdir(failures_dir):
484            os.makedirs(failures_dir)
485        return failures_dir
486
487    # Classify error type: when Clang terminated by a signal it's a 'Crash'.
488    # (python subprocess Popen.returncode is negative when child terminated
489    # by signal.) Everything else is 'Other Error'.
490    error = 'crash' if opts['exit_code'] < 0 else 'other_error'
491    # Create preprocessor output file name. (This is blindly following the
492    # Perl implementation.)
493    (handle, name) = tempfile.mkstemp(suffix=extension(),
494                                      prefix='clang_' + error + '_',
495                                      dir=destination())
496    os.close(handle)
497    # Execute Clang again, but run the syntax check only.
498    cwd = opts['directory']
499    cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \
500        [opts['file'], '-o', name]
501    try:
502        cmd = get_arguments(cmd, cwd)
503        run_command(cmd, cwd=cwd)
504    except subprocess.CalledProcessError:
505        pass
506    except ClangErrorException:
507        pass
508    # write general information about the crash
509    with open(name + '.info.txt', 'w') as handle:
510        handle.write(opts['file'] + os.linesep)
511        handle.write(error.title().replace('_', ' ') + os.linesep)
512        handle.write(' '.join(cmd) + os.linesep)
513        handle.write(' '.join(os.uname()) + os.linesep)
514        handle.write(get_version(opts['clang']))
515        handle.close()
516    # write the captured output too
517    with open(name + '.stderr.txt', 'w') as handle:
518        handle.writelines(opts['error_output'])
519        handle.close()
520
521
522@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
523          'output_format'])
524def run_analyzer(opts, continuation=report_failure):
525    """ It assembles the analysis command line and executes it. Capture the
526    output of the analysis and returns with it. If failure reports are
527    requested, it calls the continuation to generate it. """
528
529    def target():
530        """ Creates output file name for reports. """
531        if opts['output_format'] in {
532                'plist',
533                'plist-html',
534                'plist-multi-file'}:
535            (handle, name) = tempfile.mkstemp(prefix='report-',
536                                              suffix='.plist',
537                                              dir=opts['output_dir'])
538            os.close(handle)
539            return name
540        return opts['output_dir']
541
542    try:
543        cwd = opts['directory']
544        cmd = get_arguments([opts['clang'], '--analyze'] +
545                            opts['direct_args'] + opts['flags'] +
546                            [opts['file'], '-o', target()],
547                            cwd)
548        output = run_command(cmd, cwd=cwd)
549        return {'error_output': output, 'exit_code': 0}
550    except subprocess.CalledProcessError as ex:
551        result = {'error_output': ex.output, 'exit_code': ex.returncode}
552        if opts.get('output_failures', False):
553            opts.update(result)
554            continuation(opts)
555        return result
556    except ClangErrorException as ex:
557        result = {'error_output': ex.error, 'exit_code': 0}
558        if opts.get('output_failures', False):
559            opts.update(result)
560            continuation(opts)
561        return result
562
563
564def extdef_map_list_src_to_ast(extdef_src_list):
565    """ Turns textual external definition map list with source files into an
566    external definition map list with ast files. """
567
568    extdef_ast_list = []
569    for extdef_src_txt in extdef_src_list:
570        mangled_name, path = extdef_src_txt.split(" ", 1)
571        # Normalize path on windows as well
572        path = os.path.splitdrive(path)[1]
573        # Make relative path out of absolute
574        path = path[1:] if path[0] == os.sep else path
575        ast_path = os.path.join("ast", path + ".ast")
576        extdef_ast_list.append(mangled_name + " " + ast_path)
577    return extdef_ast_list
578
579
580@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
581def ctu_collect_phase(opts):
582    """ Preprocess source by generating all data needed by CTU analysis. """
583
584    def generate_ast(triple_arch):
585        """ Generates ASTs for the current compilation command. """
586
587        args = opts['direct_args'] + opts['flags']
588        ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
589                                       os.path.realpath(opts['file'])[1:] +
590                                       '.ast')
591        ast_path = os.path.abspath(ast_joined_path)
592        ast_dir = os.path.dirname(ast_path)
593        if not os.path.isdir(ast_dir):
594            try:
595                os.makedirs(ast_dir)
596            except OSError:
597                # In case an other process already created it.
598                pass
599        ast_command = [opts['clang'], '-emit-ast']
600        ast_command.extend(args)
601        ast_command.append('-w')
602        ast_command.append(opts['file'])
603        ast_command.append('-o')
604        ast_command.append(ast_path)
605        logging.debug("Generating AST using '%s'", ast_command)
606        run_command(ast_command, cwd=opts['directory'])
607
608    def map_extdefs(triple_arch):
609        """ Generate external definition map file for the current source. """
610
611        args = opts['direct_args'] + opts['flags']
612        extdefmap_command = [opts['ctu'].extdef_map_cmd]
613        extdefmap_command.append(opts['file'])
614        extdefmap_command.append('--')
615        extdefmap_command.extend(args)
616        logging.debug("Generating external definition map using '%s'",
617                      extdefmap_command)
618        extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
619        extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
620        extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
621                                             CTU_TEMP_DEFMAP_FOLDER)
622        if not os.path.isdir(extern_defs_map_folder):
623            try:
624                os.makedirs(extern_defs_map_folder)
625            except OSError:
626                # In case an other process already created it.
627                pass
628        if extdef_ast_list:
629            with tempfile.NamedTemporaryFile(mode='w',
630                                             dir=extern_defs_map_folder,
631                                             delete=False) as out_file:
632                out_file.write("\n".join(extdef_ast_list) + "\n")
633
634    cwd = opts['directory']
635    cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
636        + [opts['file']]
637    triple_arch = get_triple_arch(cmd, cwd)
638    generate_ast(triple_arch)
639    map_extdefs(triple_arch)
640
641
642@require(['ctu'])
643def dispatch_ctu(opts, continuation=run_analyzer):
644    """ Execute only one phase of 2 phases of CTU if needed. """
645
646    ctu_config = opts['ctu']
647
648    if ctu_config.collect or ctu_config.analyze:
649        assert ctu_config.collect != ctu_config.analyze
650        if ctu_config.collect:
651            return ctu_collect_phase(opts)
652        if ctu_config.analyze:
653            cwd = opts['directory']
654            cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
655                + opts['flags'] + [opts['file']]
656            triarch = get_triple_arch(cmd, cwd)
657            ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
658                           'experimental-enable-naive-ctu-analysis=true']
659            analyzer_options = prefix_with('-analyzer-config', ctu_options)
660            direct_options = prefix_with('-Xanalyzer', analyzer_options)
661            opts['direct_args'].extend(direct_options)
662
663    return continuation(opts)
664
665
666@require(['flags', 'force_debug'])
667def filter_debug_flags(opts, continuation=dispatch_ctu):
668    """ Filter out nondebug macros when requested. """
669
670    if opts.pop('force_debug'):
671        # lazy implementation just append an undefine macro at the end
672        opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
673
674    return continuation(opts)
675
676
677@require(['language', 'compiler', 'file', 'flags'])
678def language_check(opts, continuation=filter_debug_flags):
679    """ Find out the language from command line parameters or file name
680    extension. The decision also influenced by the compiler invocation. """
681
682    accepted = frozenset({
683        'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
684        'c++-cpp-output', 'objective-c-cpp-output'
685    })
686
687    # language can be given as a parameter...
688    language = opts.pop('language')
689    compiler = opts.pop('compiler')
690    # ... or find out from source file extension
691    if language is None and compiler is not None:
692        language = classify_source(opts['file'], compiler == 'c')
693
694    if language is None:
695        logging.debug('skip analysis, language not known')
696        return None
697    elif language not in accepted:
698        logging.debug('skip analysis, language not supported')
699        return None
700    else:
701        logging.debug('analysis, language: %s', language)
702        opts.update({'language': language,
703                     'flags': ['-x', language] + opts['flags']})
704        return continuation(opts)
705
706
707@require(['arch_list', 'flags'])
708def arch_check(opts, continuation=language_check):
709    """ Do run analyzer through one of the given architectures. """
710
711    disabled = frozenset({'ppc', 'ppc64'})
712
713    received_list = opts.pop('arch_list')
714    if received_list:
715        # filter out disabled architectures and -arch switches
716        filtered_list = [a for a in received_list if a not in disabled]
717        if filtered_list:
718            # There should be only one arch given (or the same multiple
719            # times). If there are multiple arch are given and are not
720            # the same, those should not change the pre-processing step.
721            # But that's the only pass we have before run the analyzer.
722            current = filtered_list.pop()
723            logging.debug('analysis, on arch: %s', current)
724
725            opts.update({'flags': ['-arch', current] + opts['flags']})
726            return continuation(opts)
727        else:
728            logging.debug('skip analysis, found not supported arch')
729            return None
730    else:
731        logging.debug('analysis, on default arch')
732        return continuation(opts)
733
734
735# To have good results from static analyzer certain compiler options shall be
736# omitted. The compiler flag filtering only affects the static analyzer run.
737#
738# Keys are the option name, value number of options to skip
739IGNORED_FLAGS = {
740    '-c': 0,  # compile option will be overwritten
741    '-fsyntax-only': 0,  # static analyzer option will be overwritten
742    '-o': 1,  # will set up own output file
743    # flags below are inherited from the perl implementation.
744    '-g': 0,
745    '-save-temps': 0,
746    '-install_name': 1,
747    '-exported_symbols_list': 1,
748    '-current_version': 1,
749    '-compatibility_version': 1,
750    '-init': 1,
751    '-e': 1,
752    '-seg1addr': 1,
753    '-bundle_loader': 1,
754    '-multiply_defined': 1,
755    '-sectorder': 3,
756    '--param': 1,
757    '--serialize-diagnostics': 1
758}
759
760
761def classify_parameters(command):
762    """ Prepare compiler flags (filters some and add others) and take out
763    language (-x) and architecture (-arch) flags for future processing. """
764
765    result = {
766        'flags': [],  # the filtered compiler flags
767        'arch_list': [],  # list of architecture flags
768        'language': None,  # compilation language, None, if not specified
769        'compiler': compiler_language(command)  # 'c' or 'c++'
770    }
771
772    # iterate on the compile options
773    args = iter(command[1:])
774    for arg in args:
775        # take arch flags into a separate basket
776        if arg == '-arch':
777            result['arch_list'].append(next(args))
778        # take language
779        elif arg == '-x':
780            result['language'] = next(args)
781        # parameters which looks source file are not flags
782        elif re.match(r'^[^-].+', arg) and classify_source(arg):
783            pass
784        # ignore some flags
785        elif arg in IGNORED_FLAGS:
786            count = IGNORED_FLAGS[arg]
787            for _ in range(count):
788                next(args)
789        # we don't care about extra warnings, but we should suppress ones
790        # that we don't want to see.
791        elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
792            pass
793        # and consider everything else as compilation flag.
794        else:
795            result['flags'].append(arg)
796
797    return result
798