• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# pylint: skip-file
3#
4# Copyright (c) 2009 Google Inc. All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions are
8# met:
9#
10#    * Redistributions of source code must retain the above copyright
11# notice, this list of conditions and the following disclaimer.
12#    * Redistributions in binary form must reproduce the above
13# copyright notice, this list of conditions and the following disclaimer
14# in the documentation and/or other materials provided with the
15# distribution.
16#    * Neither the name of Google Inc. nor the names of its
17# contributors may be used to endorse or promote products derived from
18# this software without specific prior written permission.
19#
20# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32"""Does google-lint on c++ files.
33
34The goal of this script is to identify places in the code that *may*
35be in non-compliance with google style.  It does not attempt to fix
36up these problems -- the point is to educate.  It does also not
37attempt to find all problems, or to ensure that everything it does
38find is legitimately a problem.
39
40In particular, we can get very confused by /* and // inside strings!
41We do a small hack, which is to ignore //'s with "'s after them on the
42same line, but it is far from perfect (in either direction).
43"""
44
45import codecs
46import copy
47import getopt
48import glob
49import itertools
50import math  # for log
51import os
52import re
53import sre_compile
54import string
55import sys
56import sysconfig
57import unicodedata
58import xml.etree.ElementTree
59
60# if empty, use defaults
61_valid_extensions = set([])
62
63__VERSION__ = '1.5.5'
64
65try:
66  xrange          # Python 2
67except NameError:
68  #  -- pylint: disable=redefined-builtin
69  xrange = range  # Python 3
70
71
72_USAGE = """
73Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
74                   [--filter=-x,+y,...]
75                   [--counting=total|toplevel|detailed] [--root=subdir]
76                   [--repository=path]
77                   [--linelength=digits] [--headers=x,y,...]
78                   [--recursive]
79                   [--exclude=path]
80                   [--extensions=hpp,cpp,...]
81                   [--includeorder=default|standardcfirst]
82                   [--quiet]
83                   [--version]
84        <file> [file] ...
85
86  Style checker for C/C++ source files.
87  This is a fork of the Google style checker with minor extensions.
88
89  The style guidelines this tries to follow are those in
90    https://google.github.io/styleguide/cppguide.html
91
92  Every problem is given a confidence score from 1-5, with 5 meaning we are
93  certain of the problem, and 1 meaning it could be a legitimate construct.
94  This will miss some errors, and is not a substitute for a code review.
95
96  To suppress false-positive errors of a certain category, add a
97  'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)
98  suppresses errors of all categories on that line.
99
100  The files passed in will be linted; at least one file must be provided.
101  Default linted extensions are %s.
102  Other file types will be ignored.
103  Change the extensions with the --extensions flag.
104
105  Flags:
106
107    output=emacs|eclipse|vs7|junit|sed|gsed
108      By default, the output is formatted to ease emacs parsing.  Visual Studio
109      compatible output (vs7) may also be used.  Further support exists for
110      eclipse (eclipse), and JUnit (junit). XML parsers such as those used
111      in Jenkins and Bamboo may also be used.
112      The sed format outputs sed commands that should fix some of the errors.
113      Note that this requires gnu sed. If that is installed as gsed on your
114      system (common e.g. on macOS with homebrew) you can use the gsed output
115      format. Sed commands are written to stdout, not stderr, so you should be
116      able to pipe output straight to a shell to run the fixes.
117
118    verbose=#
119      Specify a number 0-5 to restrict errors to certain verbosity levels.
120      Errors with lower verbosity levels have lower confidence and are more
121      likely to be false positives.
122
123    quiet
124      Don't print anything if no errors are found.
125
126    filter=-x,+y,...
127      Specify a comma-separated list of category-filters to apply: only
128      error messages whose category names pass the filters will be printed.
129      (Category names are printed with the message and look like
130      "[whitespace/indent]".)  Filters are evaluated left to right.
131      "-FOO" means "do not print categories that start with FOO".
132      "+FOO" means "do print categories that start with FOO".
133
134      Examples: --filter=-whitespace,+whitespace/braces
135                --filter=-whitespace,-runtime/printf,+runtime/printf_format
136                --filter=-,+build/include_what_you_use
137
138      To see a list of all the categories used in cpplint, pass no arg:
139         --filter=
140
141    counting=total|toplevel|detailed
142      The total number of errors found is always printed. If
143      'toplevel' is provided, then the count of errors in each of
144      the top-level categories like 'build' and 'whitespace' will
145      also be printed. If 'detailed' is provided, then a count
146      is provided for each category like 'build/class'.
147
148    repository=path
149      The top level directory of the repository, used to derive the header
150      guard CPP variable. By default, this is determined by searching for a
151      path that contains .git, .hg, or .svn. When this flag is specified, the
152      given path is used instead. This option allows the header guard CPP
153      variable to remain consistent even if members of a team have different
154      repository root directories (such as when checking out a subdirectory
155      with SVN). In addition, users of non-mainstream version control systems
156      can use this flag to ensure readable header guard CPP variables.
157
158      Examples:
159        Assuming that Alice checks out ProjectName and Bob checks out
160        ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
161        with no --repository flag, the header guard CPP variable will be:
162
163        Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
164        Bob   => SRC_CHROME_BROWSER_UI_BROWSER_H_
165
166        If Alice uses the --repository=trunk flag and Bob omits the flag or
167        uses --repository=. then the header guard CPP variable will be:
168
169        Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
170        Bob   => SRC_CHROME_BROWSER_UI_BROWSER_H_
171
172    root=subdir
173      The root directory used for deriving header guard CPP variable.
174      This directory is relative to the top level directory of the repository
175      which by default is determined by searching for a directory that contains
176      .git, .hg, or .svn but can also be controlled with the --repository flag.
177      If the specified directory does not exist, this flag is ignored.
178
179      Examples:
180        Assuming that src is the top level directory of the repository (and
181        cwd=top/src), the header guard CPP variables for
182        src/chrome/browser/ui/browser.h are:
183
184        No flag => CHROME_BROWSER_UI_BROWSER_H_
185        --root=chrome => BROWSER_UI_BROWSER_H_
186        --root=chrome/browser => UI_BROWSER_H_
187        --root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_
188
189    linelength=digits
190      This is the allowed line length for the project. The default value is
191      80 characters.
192
193      Examples:
194        --linelength=120
195
196    recursive
197      Search for files to lint recursively. Each directory given in the list
198      of files to be linted is replaced by all files that descend from that
199      directory. Files with extensions not in the valid extensions list are
200      excluded.
201
202    exclude=path
203      Exclude the given path from the list of files to be linted. Relative
204      paths are evaluated relative to the current directory and shell globbing
205      is performed. This flag can be provided multiple times to exclude
206      multiple files.
207
208      Examples:
209        --exclude=one.cc
210        --exclude=src/*.cc
211        --exclude=src/*.cc --exclude=test/*.cc
212
213    extensions=extension,extension,...
214      The allowed file extensions that cpplint will check
215
216      Examples:
217        --extensions=%s
218
219    includeorder=default|standardcfirst
220      For the build/include_order rule, the default is to blindly assume angle
221      bracket includes with file extension are c-system-headers (default),
222      even knowing this will have false classifications.
223      The default is established at google.
224      standardcfirst means to instead use an allow-list of known c headers and
225      treat all others as separate group of "other system headers". The C headers
226      included are those of the C-standard lib and closely related ones.
227
228    headers=x,y,...
229      The header extensions that cpplint will treat as .h in checks. Values are
230      automatically added to --extensions list.
231     (by default, only files with extensions %s will be assumed to be headers)
232
233      Examples:
234        --headers=%s
235        --headers=hpp,hxx
236        --headers=hpp
237
238    cpplint.py supports per-directory configurations specified in CPPLINT.cfg
239    files. CPPLINT.cfg file can contain a number of key=value pairs.
240    Currently the following options are supported:
241
242      set noparent
243      filter=+filter1,-filter2,...
244      exclude_files=regex
245      linelength=80
246      root=subdir
247      headers=x,y,...
248
249    "set noparent" option prevents cpplint from traversing directory tree
250    upwards looking for more .cfg files in parent directories. This option
251    is usually placed in the top-level project directory.
252
253    The "filter" option is similar in function to --filter flag. It specifies
254    message filters in addition to the |_DEFAULT_FILTERS| and those specified
255    through --filter command-line flag.
256
257    "exclude_files" allows to specify a regular expression to be matched against
258    a file name. If the expression matches, the file is skipped and not run
259    through the linter.
260
261    "linelength" allows to specify the allowed line length for the project.
262
263    The "root" option is similar in function to the --root flag (see example
264    above). Paths are relative to the directory of the CPPLINT.cfg.
265
266    The "headers" option is similar in function to the --headers flag
267    (see example above).
268
269    CPPLINT.cfg has an effect on files in the same directory and all
270    sub-directories, unless overridden by a nested configuration file.
271
272      Example file:
273        filter=-build/include_order,+build/include_alpha
274        exclude_files=.*\\.cc
275
276    The above example disables build/include_order warning and enables
277    build/include_alpha as well as excludes all .cc from being
278    processed by linter, in the current directory (where the .cfg
279    file is located) and all sub-directories.
280"""
281
282# We categorize each error message we print.  Here are the categories.
283# We want an explicit list so we can list them all in cpplint --filter=.
284# If you add a new error message with a new category, add it to the list
285# here!  cpplint_unittest.py should tell you if you forget to do this.
286_ERROR_CATEGORIES = [
287    'build/class',
288    'build/c++11',
289    'build/c++14',
290    'build/c++tr1',
291    'build/deprecated',
292    'build/endif_comment',
293    'build/explicit_make_pair',
294    'build/forward_decl',
295    'build/header_guard',
296    'build/include',
297    'build/include_subdir',
298    'build/include_alpha',
299    'build/include_order',
300    'build/include_what_you_use',
301    'build/namespaces_headers',
302    'build/namespaces_literals',
303    'build/namespaces',
304    'build/printf_format',
305    'build/storage_class',
306    'legal/copyright',
307    'readability/alt_tokens',
308    'readability/braces',
309    'readability/casting',
310    'readability/check',
311    'readability/constructors',
312    'readability/fn_size',
313    'readability/inheritance',
314    'readability/multiline_comment',
315    'readability/multiline_string',
316    'readability/namespace',
317    'readability/nolint',
318    'readability/nul',
319    'readability/strings',
320    'readability/todo',
321    'readability/utf8',
322    'runtime/arrays',
323    'runtime/casting',
324    'runtime/explicit',
325    'runtime/int',
326    'runtime/init',
327    'runtime/invalid_increment',
328    'runtime/member_string_references',
329    'runtime/memset',
330    'runtime/indentation_namespace',
331    'runtime/operator',
332    'runtime/printf',
333    'runtime/printf_format',
334    'runtime/references',
335    'runtime/string',
336    'runtime/threadsafe_fn',
337    'runtime/vlog',
338    'whitespace/blank_line',
339    'whitespace/braces',
340    'whitespace/comma',
341    'whitespace/comments',
342    'whitespace/empty_conditional_body',
343    'whitespace/empty_if_body',
344    'whitespace/empty_loop_body',
345    'whitespace/end_of_line',
346    'whitespace/ending_newline',
347    'whitespace/forcolon',
348    'whitespace/indent',
349    'whitespace/line_length',
350    'whitespace/newline',
351    'whitespace/operators',
352    'whitespace/parens',
353    'whitespace/semicolon',
354    'whitespace/tab',
355    'whitespace/todo',
356    ]
357
358# keywords to use with --outputs which generate stdout for machine processing
359_MACHINE_OUTPUTS = [
360  'junit',
361  'sed',
362  'gsed'
363]
364
365# These error categories are no longer enforced by cpplint, but for backwards-
366# compatibility they may still appear in NOLINT comments.
367_LEGACY_ERROR_CATEGORIES = [
368    'readability/streams',
369    'readability/function',
370    ]
371
372# The default state of the category filter. This is overridden by the --filter=
373# flag. By default all errors are on, so only add here categories that should be
374# off by default (i.e., categories that must be enabled by the --filter= flags).
375# All entries here should start with a '-' or '+', as in the --filter= flag.
376_DEFAULT_FILTERS = ['-build/include_alpha']
377
378# The default list of categories suppressed for C (not C++) files.
379_DEFAULT_C_SUPPRESSED_CATEGORIES = [
380    'readability/casting',
381    ]
382
383# The default list of categories suppressed for Linux Kernel files.
384_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
385    'whitespace/tab',
386    ]
387
388# We used to check for high-bit characters, but after much discussion we
389# decided those were OK, as long as they were in UTF-8 and didn't represent
390# hard-coded international strings, which belong in a separate i18n file.
391
392# C++ headers
393_CPP_HEADERS = frozenset([
394    # Legacy
395    'algobase.h',
396    'algo.h',
397    'alloc.h',
398    'builtinbuf.h',
399    'bvector.h',
400    'complex.h',
401    'defalloc.h',
402    'deque.h',
403    'editbuf.h',
404    'fstream.h',
405    'function.h',
406    'hash_map',
407    'hash_map.h',
408    'hash_set',
409    'hash_set.h',
410    'hashtable.h',
411    'heap.h',
412    'indstream.h',
413    'iomanip.h',
414    'iostream.h',
415    'istream.h',
416    'iterator.h',
417    'list.h',
418    'map.h',
419    'multimap.h',
420    'multiset.h',
421    'ostream.h',
422    'pair.h',
423    'parsestream.h',
424    'pfstream.h',
425    'procbuf.h',
426    'pthread_alloc',
427    'pthread_alloc.h',
428    'rope',
429    'rope.h',
430    'ropeimpl.h',
431    'set.h',
432    'slist',
433    'slist.h',
434    'stack.h',
435    'stdiostream.h',
436    'stl_alloc.h',
437    'stl_relops.h',
438    'streambuf.h',
439    'stream.h',
440    'strfile.h',
441    'strstream.h',
442    'tempbuf.h',
443    'tree.h',
444    'type_traits.h',
445    'vector.h',
446    # 17.6.1.2 C++ library headers
447    'algorithm',
448    'array',
449    'atomic',
450    'bitset',
451    'chrono',
452    'codecvt',
453    'complex',
454    'condition_variable',
455    'deque',
456    'exception',
457    'forward_list',
458    'fstream',
459    'functional',
460    'future',
461    'initializer_list',
462    'iomanip',
463    'ios',
464    'iosfwd',
465    'iostream',
466    'istream',
467    'iterator',
468    'limits',
469    'list',
470    'locale',
471    'map',
472    'memory',
473    'mutex',
474    'new',
475    'numeric',
476    'ostream',
477    'queue',
478    'random',
479    'ratio',
480    'regex',
481    'scoped_allocator',
482    'set',
483    'sstream',
484    'stack',
485    'stdexcept',
486    'streambuf',
487    'string',
488    'strstream',
489    'system_error',
490    'thread',
491    'tuple',
492    'typeindex',
493    'typeinfo',
494    'type_traits',
495    'unordered_map',
496    'unordered_set',
497    'utility',
498    'valarray',
499    'vector',
500    # 17.6.1.2 C++14 headers
501    'shared_mutex',
502    # 17.6.1.2 C++17 headers
503    'any',
504    'charconv',
505    'codecvt',
506    'execution',
507    'filesystem',
508    'memory_resource',
509    'optional',
510    'string_view',
511    'variant',
512    # 17.6.1.2 C++ headers for C library facilities
513    'cassert',
514    'ccomplex',
515    'cctype',
516    'cerrno',
517    'cfenv',
518    'cfloat',
519    'cinttypes',
520    'ciso646',
521    'climits',
522    'clocale',
523    'cmath',
524    'csetjmp',
525    'csignal',
526    'cstdalign',
527    'cstdarg',
528    'cstdbool',
529    'cstddef',
530    'cstdint',
531    'cstdio',
532    'cstdlib',
533    'cstring',
534    'ctgmath',
535    'ctime',
536    'cuchar',
537    'cwchar',
538    'cwctype',
539    ])
540
541# C headers
542_C_HEADERS = frozenset([
543    # System C headers
544    'assert.h',
545    'complex.h',
546    'ctype.h',
547    'errno.h',
548    'fenv.h',
549    'float.h',
550    'inttypes.h',
551    'iso646.h',
552    'limits.h',
553    'locale.h',
554    'math.h',
555    'setjmp.h',
556    'signal.h',
557    'stdalign.h',
558    'stdarg.h',
559    'stdatomic.h',
560    'stdbool.h',
561    'stddef.h',
562    'stdint.h',
563    'stdio.h',
564    'stdlib.h',
565    'stdnoreturn.h',
566    'string.h',
567    'tgmath.h',
568    'threads.h',
569    'time.h',
570    'uchar.h',
571    'wchar.h',
572    'wctype.h',
573    # additional POSIX C headers
574    'aio.h',
575    'arpa/inet.h',
576    'cpio.h',
577    'dirent.h',
578    'dlfcn.h',
579    'fcntl.h',
580    'fmtmsg.h',
581    'fnmatch.h',
582    'ftw.h',
583    'glob.h',
584    'grp.h',
585    'iconv.h',
586    'langinfo.h',
587    'libgen.h',
588    'monetary.h',
589    'mqueue.h',
590    'ndbm.h',
591    'net/if.h',
592    'netdb.h',
593    'netinet/in.h',
594    'netinet/tcp.h',
595    'nl_types.h',
596    'poll.h',
597    'pthread.h',
598    'pwd.h',
599    'regex.h',
600    'sched.h',
601    'search.h',
602    'semaphore.h',
603    'setjmp.h',
604    'signal.h',
605    'spawn.h',
606    'strings.h',
607    'stropts.h',
608    'syslog.h',
609    'tar.h',
610    'termios.h',
611    'trace.h',
612    'ulimit.h',
613    'unistd.h',
614    'utime.h',
615    'utmpx.h',
616    'wordexp.h',
617    # additional GNUlib headers
618    'a.out.h',
619    'aliases.h',
620    'alloca.h',
621    'ar.h',
622    'argp.h',
623    'argz.h',
624    'byteswap.h',
625    'crypt.h',
626    'endian.h',
627    'envz.h',
628    'err.h',
629    'error.h',
630    'execinfo.h',
631    'fpu_control.h',
632    'fstab.h',
633    'fts.h',
634    'getopt.h',
635    'gshadow.h',
636    'ieee754.h',
637    'ifaddrs.h',
638    'libintl.h',
639    'mcheck.h',
640    'mntent.h',
641    'obstack.h',
642    'paths.h',
643    'printf.h',
644    'pty.h',
645    'resolv.h',
646    'shadow.h',
647    'sysexits.h',
648    'ttyent.h',
649    # Additional linux glibc headers
650    'dlfcn.h',
651    'elf.h',
652    'features.h',
653    'gconv.h',
654    'gnu-versions.h',
655    'lastlog.h',
656    'libio.h',
657    'link.h',
658    'malloc.h',
659    'memory.h',
660    'netash/ash.h',
661    'netatalk/at.h',
662    'netax25/ax25.h',
663    'neteconet/ec.h',
664    'netipx/ipx.h',
665    'netiucv/iucv.h',
666    'netpacket/packet.h',
667    'netrom/netrom.h',
668    'netrose/rose.h',
669    'nfs/nfs.h',
670    'nl_types.h',
671    'nss.h',
672    're_comp.h',
673    'regexp.h',
674    'sched.h',
675    'sgtty.h',
676    'stab.h',
677    'stdc-predef.h',
678    'stdio_ext.h',
679    'syscall.h',
680    'termio.h',
681    'thread_db.h',
682    'ucontext.h',
683    'ustat.h',
684    'utmp.h',
685    'values.h',
686    'wait.h',
687    'xlocale.h',
688    # Hardware specific headers
689    'arm_neon.h',
690    'emmintrin.h',
691    'xmmintin.h',
692    ])
693
694# Folders of C libraries so commonly used in C++,
695# that they have parity with standard C libraries.
696C_STANDARD_HEADER_FOLDERS = frozenset([
697    # standard C library
698    "sys",
699    # glibc for linux
700    "arpa",
701    "asm-generic",
702    "bits",
703    "gnu",
704    "net",
705    "netinet",
706    "protocols",
707    "rpc",
708    "rpcsvc",
709    "scsi",
710    # linux kernel header
711    "drm",
712    "linux",
713    "misc",
714    "mtd",
715    "rdma",
716    "sound",
717    "video",
718    "xen",
719  ])
720
721# Type names
722_TYPES = re.compile(
723    r'^(?:'
724    # [dcl.type.simple]
725    r'(char(16_t|32_t)?)|wchar_t|'
726    r'bool|short|int|long|signed|unsigned|float|double|'
727    # [support.types]
728    r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
729    # [cstdint.syn]
730    r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
731    r'(u?int(max|ptr)_t)|'
732    r')$')
733
734
735# These headers are excluded from [build/include] and [build/include_order]
736# checks:
737# - Anything not following google file name conventions (containing an
738#   uppercase character, such as Python.h or nsStringAPI.h, for example).
739# - Lua headers.
740_THIRD_PARTY_HEADERS_PATTERN = re.compile(
741    r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
742
743# Pattern for matching FileInfo.BaseName() against test file name
744_test_suffixes = ['_test', '_regtest', '_unittest']
745_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
746
747# Pattern that matches only complete whitespace, possibly across multiple lines.
748_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
749
750# Assertion macros.  These are defined in base/logging.h and
751# testing/base/public/gunit.h.
752_CHECK_MACROS = [
753    'DCHECK', 'CHECK',
754    'EXPECT_TRUE', 'ASSERT_TRUE',
755    'EXPECT_FALSE', 'ASSERT_FALSE',
756    ]
757
758# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
759_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
760
761for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
762                        ('>=', 'GE'), ('>', 'GT'),
763                        ('<=', 'LE'), ('<', 'LT')]:
764  _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
765  _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
766  _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
767  _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
768
769for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
770                            ('>=', 'LT'), ('>', 'LE'),
771                            ('<=', 'GT'), ('<', 'GE')]:
772  _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
773  _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
774
775# Alternative tokens and their replacements.  For full list, see section 2.5
776# Alternative tokens [lex.digraph] in the C++ standard.
777#
778# Digraphs (such as '%:') are not included here since it's a mess to
779# match those on a word boundary.
780_ALT_TOKEN_REPLACEMENT = {
781    'and': '&&',
782    'bitor': '|',
783    'or': '||',
784    'xor': '^',
785    'compl': '~',
786    'bitand': '&',
787    'and_eq': '&=',
788    'or_eq': '|=',
789    'xor_eq': '^=',
790    'not': '!',
791    'not_eq': '!='
792    }
793
794# Compile regular expression that matches all the above keywords.  The "[ =()]"
795# bit is meant to avoid matching these keywords outside of boolean expressions.
796#
797# False positives include C-style multi-line comments and multi-line strings
798# but those have always been troublesome for cpplint.
799_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
800    r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
801
802
803# These constants define types of headers for use with
804# _IncludeState.CheckNextIncludeOrder().
805_C_SYS_HEADER = 1
806_CPP_SYS_HEADER = 2
807_OTHER_SYS_HEADER = 3
808_LIKELY_MY_HEADER = 4
809_POSSIBLE_MY_HEADER = 5
810_OTHER_HEADER = 6
811
812# These constants define the current inline assembly state
813_NO_ASM = 0       # Outside of inline assembly block
814_INSIDE_ASM = 1   # Inside inline assembly block
815_END_ASM = 2      # Last line of inline assembly block
816_BLOCK_ASM = 3    # The whole block is an inline assembly block
817
818# Match start of assembly blocks
819_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
820                        r'(?:\s+(volatile|__volatile__))?'
821                        r'\s*[{(]')
822
823# Match strings that indicate we're working on a C (not C++) file.
824_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
825                            r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
826
827# Match string that indicates we're working on a Linux Kernel file.
828_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
829
830# Commands for sed to fix the problem
831_SED_FIXUPS = {
832  'Remove spaces around =': r's/ = /=/',
833  'Remove spaces around !=': r's/ != /!=/',
834  'Remove space before ( in if (': r's/if (/if(/',
835  'Remove space before ( in for (': r's/for (/for(/',
836  'Remove space before ( in while (': r's/while (/while(/',
837  'Remove space before ( in switch (': r's/switch (/switch(/',
838  'Should have a space between // and comment': r's/\/\//\/\/ /',
839  'Missing space before {': r's/\([^ ]\){/\1 {/',
840  'Tab found, replace by spaces': r's/\t/  /g',
841  'Line ends in whitespace.  Consider deleting these extra spaces.': r's/\s*$//',
842  'You don\'t need a ; after a }': r's/};/}/',
843  'Missing space after ,': r's/,\([^ ]\)/, \1/g',
844}
845
846_regexp_compile_cache = {}
847
848# {str, set(int)}: a map from error categories to sets of linenumbers
849# on which those errors are expected and should be suppressed.
850_error_suppressions = {}
851
852# The root directory used for deriving header guard CPP variable.
853# This is set by --root flag.
854_root = None
855_root_debug = False
856
857# The top level repository directory. If set, _root is calculated relative to
858# this directory instead of the directory containing version control artifacts.
859# This is set by the --repository flag.
860_repository = None
861
862# Files to exclude from linting. This is set by the --exclude flag.
863_excludes = None
864
865# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
866_quiet = False
867
868# The allowed line length of files.
869# This is set by --linelength flag.
870_line_length = 80
871
872# This allows to use different include order rule than default
873_include_order = "default"
874
875try:
876  unicode
877except NameError:
878  #  -- pylint: disable=redefined-builtin
879  basestring = unicode = str
880
881try:
882  long
883except NameError:
884  #  -- pylint: disable=redefined-builtin
885  long = int
886
887if sys.version_info < (3,):
888  #  -- pylint: disable=no-member
889  # BINARY_TYPE = str
890  itervalues = dict.itervalues
891  iteritems = dict.iteritems
892else:
893  # BINARY_TYPE = bytes
894  itervalues = dict.values
895  iteritems = dict.items
896
897def unicode_escape_decode(x):
898  if sys.version_info < (3,):
899    return codecs.unicode_escape_decode(x)[0]
900  else:
901    return x
902
903# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
904# This is set by --headers flag.
905_hpp_headers = set([])
906
907# {str, bool}: a map from error categories to booleans which indicate if the
908# category should be suppressed for every line.
909_global_error_suppressions = {}
910
911def ProcessHppHeadersOption(val):
912  global _hpp_headers
913  try:
914    _hpp_headers = {ext.strip() for ext in val.split(',')}
915  except ValueError:
916    PrintUsage('Header extensions must be comma separated list.')
917
918def ProcessIncludeOrderOption(val):
919  if val is None or val == "default":
920    pass
921  elif val == "standardcfirst":
922    global _include_order
923    _include_order = val
924  else:
925    PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
926
927def IsHeaderExtension(file_extension):
928  return file_extension in GetHeaderExtensions()
929
930def GetHeaderExtensions():
931  if _hpp_headers:
932    return _hpp_headers
933  if _valid_extensions:
934    return {h for h in _valid_extensions if 'h' in h}
935  return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
936
937# The allowed extensions for file names
938# This is set by --extensions flag
939def GetAllExtensions():
940  return GetHeaderExtensions().union(_valid_extensions or set(
941    ['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
942
943def ProcessExtensionsOption(val):
944  global _valid_extensions
945  try:
946    extensions = [ext.strip() for ext in val.split(',')]
947    _valid_extensions = set(extensions)
948  except ValueError:
949    PrintUsage('Extensions should be a comma-separated list of values;'
950               'for example: extensions=hpp,cpp\n'
951               'This could not be parsed: "%s"' % (val,))
952
953def GetNonHeaderExtensions():
954  return GetAllExtensions().difference(GetHeaderExtensions())
955
956def ParseNolintSuppressions(filename, raw_line, linenum, error):
957  """Updates the global list of line error-suppressions.
958
959  Parses any NOLINT comments on the current line, updating the global
960  error_suppressions store.  Reports an error if the NOLINT comment
961  was malformed.
962
963  Args:
964    filename: str, the name of the input file.
965    raw_line: str, the line of input text, with comments.
966    linenum: int, the number of the current line.
967    error: function, an error handler.
968  """
969  matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
970  if matched:
971    if matched.group(1):
972      suppressed_line = linenum + 1
973    else:
974      suppressed_line = linenum
975    category = matched.group(2)
976    if category in (None, '(*)'):  # => "suppress all"
977      _error_suppressions.setdefault(None, set()).add(suppressed_line)
978    else:
979      if category.startswith('(') and category.endswith(')'):
980        category = category[1:-1]
981        if category in _ERROR_CATEGORIES:
982          _error_suppressions.setdefault(category, set()).add(suppressed_line)
983        elif category not in _LEGACY_ERROR_CATEGORIES:
984          error(filename, linenum, 'readability/nolint', 5,
985                'Unknown NOLINT error category: %s' % category)
986
987
988def ProcessGlobalSuppresions(lines):
989  """Updates the list of global error suppressions.
990
991  Parses any lint directives in the file that have global effect.
992
993  Args:
994    lines: An array of strings, each representing a line of the file, with the
995           last element being empty if the file is terminated with a newline.
996  """
997  for line in lines:
998    if _SEARCH_C_FILE.search(line):
999      for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
1000        _global_error_suppressions[category] = True
1001    if _SEARCH_KERNEL_FILE.search(line):
1002      for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
1003        _global_error_suppressions[category] = True
1004
1005
1006def ResetNolintSuppressions():
1007  """Resets the set of NOLINT suppressions to empty."""
1008  _error_suppressions.clear()
1009  _global_error_suppressions.clear()
1010
1011
1012def IsErrorSuppressedByNolint(category, linenum):
1013  """Returns true if the specified error category is suppressed on this line.
1014
1015  Consults the global error_suppressions map populated by
1016  ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
1017
1018  Args:
1019    category: str, the category of the error.
1020    linenum: int, the current line number.
1021  Returns:
1022    bool, True iff the error should be suppressed due to a NOLINT comment or
1023    global suppression.
1024  """
1025  return (_global_error_suppressions.get(category, False) or
1026          linenum in _error_suppressions.get(category, set()) or
1027          linenum in _error_suppressions.get(None, set()))
1028
1029
1030def Match(pattern, s):
1031  """Matches the string with the pattern, caching the compiled regexp."""
1032  # The regexp compilation caching is inlined in both Match and Search for
1033  # performance reasons; factoring it out into a separate function turns out
1034  # to be noticeably expensive.
1035  if pattern not in _regexp_compile_cache:
1036    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
1037  return _regexp_compile_cache[pattern].match(s)
1038
1039
1040def ReplaceAll(pattern, rep, s):
1041  """Replaces instances of pattern in a string with a replacement.
1042
1043  The compiled regex is kept in a cache shared by Match and Search.
1044
1045  Args:
1046    pattern: regex pattern
1047    rep: replacement text
1048    s: search string
1049
1050  Returns:
1051    string with replacements made (or original string if no replacements)
1052  """
1053  if pattern not in _regexp_compile_cache:
1054    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
1055  return _regexp_compile_cache[pattern].sub(rep, s)
1056
1057
1058def Search(pattern, s):
1059  """Searches the string for the pattern, caching the compiled regexp."""
1060  if pattern not in _regexp_compile_cache:
1061    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
1062  return _regexp_compile_cache[pattern].search(s)
1063
1064
1065def _IsSourceExtension(s):
1066  """File extension (excluding dot) matches a source file extension."""
1067  return s in GetNonHeaderExtensions()
1068
1069
1070class _IncludeState(object):
1071  """Tracks line numbers for includes, and the order in which includes appear.
1072
1073  include_list contains list of lists of (header, line number) pairs.
1074  It's a lists of lists rather than just one flat list to make it
1075  easier to update across preprocessor boundaries.
1076
1077  Call CheckNextIncludeOrder() once for each header in the file, passing
1078  in the type constants defined above. Calls in an illegal order will
1079  raise an _IncludeError with an appropriate error message.
1080
1081  """
1082  # self._section will move monotonically through this set. If it ever
1083  # needs to move backwards, CheckNextIncludeOrder will raise an error.
1084  _INITIAL_SECTION = 0
1085  _MY_H_SECTION = 1
1086  _C_SECTION = 2
1087  _CPP_SECTION = 3
1088  _OTHER_SYS_SECTION = 4
1089  _OTHER_H_SECTION = 5
1090
1091  _TYPE_NAMES = {
1092      _C_SYS_HEADER: 'C system header',
1093      _CPP_SYS_HEADER: 'C++ system header',
1094      _OTHER_SYS_HEADER: 'other system header',
1095      _LIKELY_MY_HEADER: 'header this file implements',
1096      _POSSIBLE_MY_HEADER: 'header this file may implement',
1097      _OTHER_HEADER: 'other header',
1098      }
1099  _SECTION_NAMES = {
1100      _INITIAL_SECTION: "... nothing. (This can't be an error.)",
1101      _MY_H_SECTION: 'a header this file implements',
1102      _C_SECTION: 'C system header',
1103      _CPP_SECTION: 'C++ system header',
1104      _OTHER_SYS_SECTION: 'other system header',
1105      _OTHER_H_SECTION: 'other header',
1106      }
1107
1108  def __init__(self):
1109    self.include_list = [[]]
1110    self._section = None
1111    self._last_header = None
1112    self.ResetSection('')
1113
1114  def FindHeader(self, header):
1115    """Check if a header has already been included.
1116
1117    Args:
1118      header: header to check.
1119    Returns:
1120      Line number of previous occurrence, or -1 if the header has not
1121      been seen before.
1122    """
1123    for section_list in self.include_list:
1124      for f in section_list:
1125        if f[0] == header:
1126          return f[1]
1127    return -1
1128
1129  def ResetSection(self, directive):
1130    """Reset section checking for preprocessor directive.
1131
1132    Args:
1133      directive: preprocessor directive (e.g. "if", "else").
1134    """
1135    # The name of the current section.
1136    self._section = self._INITIAL_SECTION
1137    # The path of last found header.
1138    self._last_header = ''
1139
1140    # Update list of includes.  Note that we never pop from the
1141    # include list.
1142    if directive in ('if', 'ifdef', 'ifndef'):
1143      self.include_list.append([])
1144    elif directive in ('else', 'elif'):
1145      self.include_list[-1] = []
1146
1147  def SetLastHeader(self, header_path):
1148    self._last_header = header_path
1149
1150  def CanonicalizeAlphabeticalOrder(self, header_path):
1151    """Returns a path canonicalized for alphabetical comparison.
1152
1153    - replaces "-" with "_" so they both cmp the same.
1154    - removes '-inl' since we don't require them to be after the main header.
1155    - lowercase everything, just in case.
1156
1157    Args:
1158      header_path: Path to be canonicalized.
1159
1160    Returns:
1161      Canonicalized path.
1162    """
1163    return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
1164
1165  def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
1166    """Check if a header is in alphabetical order with the previous header.
1167
1168    Args:
1169      clean_lines: A CleansedLines instance containing the file.
1170      linenum: The number of the line to check.
1171      header_path: Canonicalized header to be checked.
1172
1173    Returns:
1174      Returns true if the header is in alphabetical order.
1175    """
1176    # If previous section is different from current section, _last_header will
1177    # be reset to empty string, so it's always less than current header.
1178    #
1179    # If previous line was a blank line, assume that the headers are
1180    # intentionally sorted the way they are.
1181    if (self._last_header > header_path and
1182        Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
1183      return False
1184    return True
1185
1186  def CheckNextIncludeOrder(self, header_type):
1187    """Returns a non-empty error message if the next header is out of order.
1188
1189    This function also updates the internal state to be ready to check
1190    the next include.
1191
1192    Args:
1193      header_type: One of the _XXX_HEADER constants defined above.
1194
1195    Returns:
1196      The empty string if the header is in the right order, or an
1197      error message describing what's wrong.
1198
1199    """
1200    error_message = ('Found %s after %s' %
1201                     (self._TYPE_NAMES[header_type],
1202                      self._SECTION_NAMES[self._section]))
1203
1204    last_section = self._section
1205
1206    if header_type == _C_SYS_HEADER:
1207      if self._section <= self._C_SECTION:
1208        self._section = self._C_SECTION
1209      else:
1210        self._last_header = ''
1211        return error_message
1212    elif header_type == _CPP_SYS_HEADER:
1213      if self._section <= self._CPP_SECTION:
1214        self._section = self._CPP_SECTION
1215      else:
1216        self._last_header = ''
1217        return error_message
1218    elif header_type == _OTHER_SYS_HEADER:
1219      if self._section <= self._OTHER_SYS_SECTION:
1220        self._section = self._OTHER_SYS_SECTION
1221      else:
1222        self._last_header = ''
1223        return error_message
1224    elif header_type == _LIKELY_MY_HEADER:
1225      if self._section <= self._MY_H_SECTION:
1226        self._section = self._MY_H_SECTION
1227      else:
1228        self._section = self._OTHER_H_SECTION
1229    elif header_type == _POSSIBLE_MY_HEADER:
1230      if self._section <= self._MY_H_SECTION:
1231        self._section = self._MY_H_SECTION
1232      else:
1233        # This will always be the fallback because we're not sure
1234        # enough that the header is associated with this file.
1235        self._section = self._OTHER_H_SECTION
1236    else:
1237      assert header_type == _OTHER_HEADER
1238      self._section = self._OTHER_H_SECTION
1239
1240    if last_section != self._section:
1241      self._last_header = ''
1242
1243    return ''
1244
1245
1246class _CppLintState(object):
1247  """Maintains module-wide state.."""
1248
1249  def __init__(self):
1250    self.verbose_level = 1  # global setting.
1251    self.error_count = 0    # global count of reported errors
1252    # filters to apply when emitting error messages
1253    self.filters = _DEFAULT_FILTERS[:]
1254    # backup of filter list. Used to restore the state after each file.
1255    self._filters_backup = self.filters[:]
1256    self.counting = 'total'  # In what way are we counting errors?
1257    self.errors_by_category = {}  # string to int dict storing error counts
1258    self.quiet = False  # Suppress non-error messagess?
1259
1260    # output format:
1261    # "emacs" - format that emacs can parse (default)
1262    # "eclipse" - format that eclipse can parse
1263    # "vs7" - format that Microsoft Visual Studio 7 can parse
1264    # "junit" - format that Jenkins, Bamboo, etc can parse
1265    # "sed" - returns a gnu sed command to fix the problem
1266    # "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
1267    self.output_format = 'emacs'
1268
1269    # For JUnit output, save errors and failures until the end so that they
1270    # can be written into the XML
1271    self._junit_errors = []
1272    self._junit_failures = []
1273
1274  def SetOutputFormat(self, output_format):
1275    """Sets the output format for errors."""
1276    self.output_format = output_format
1277
1278  def SetQuiet(self, quiet):
1279    """Sets the module's quiet settings, and returns the previous setting."""
1280    last_quiet = self.quiet
1281    self.quiet = quiet
1282    return last_quiet
1283
1284  def SetVerboseLevel(self, level):
1285    """Sets the module's verbosity, and returns the previous setting."""
1286    last_verbose_level = self.verbose_level
1287    self.verbose_level = level
1288    return last_verbose_level
1289
1290  def SetCountingStyle(self, counting_style):
1291    """Sets the module's counting options."""
1292    self.counting = counting_style
1293
1294  def SetFilters(self, filters):
1295    """Sets the error-message filters.
1296
1297    These filters are applied when deciding whether to emit a given
1298    error message.
1299
1300    Args:
1301      filters: A string of comma-separated filters (eg "+whitespace/indent").
1302               Each filter should start with + or -; else we die.
1303
1304    Raises:
1305      ValueError: The comma-separated filters did not all start with '+' or '-'.
1306                  E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
1307    """
1308    # Default filters always have less priority than the flag ones.
1309    self.filters = _DEFAULT_FILTERS[:]
1310    self.AddFilters(filters)
1311
1312  def AddFilters(self, filters):
1313    """ Adds more filters to the existing list of error-message filters. """
1314    for filt in filters.split(','):
1315      clean_filt = filt.strip()
1316      if clean_filt:
1317        self.filters.append(clean_filt)
1318    for filt in self.filters:
1319      if not (filt.startswith('+') or filt.startswith('-')):
1320        raise ValueError('Every filter in --filters must start with + or -'
1321                         ' (%s does not)' % filt)
1322
1323  def BackupFilters(self):
1324    """ Saves the current filter list to backup storage."""
1325    self._filters_backup = self.filters[:]
1326
1327  def RestoreFilters(self):
1328    """ Restores filters previously backed up."""
1329    self.filters = self._filters_backup[:]
1330
1331  def ResetErrorCounts(self):
1332    """Sets the module's error statistic back to zero."""
1333    self.error_count = 0
1334    self.errors_by_category = {}
1335
1336  def IncrementErrorCount(self, category):
1337    """Bumps the module's error statistic."""
1338    self.error_count += 1
1339    if self.counting in ('toplevel', 'detailed'):
1340      if self.counting != 'detailed':
1341        category = category.split('/')[0]
1342      if category not in self.errors_by_category:
1343        self.errors_by_category[category] = 0
1344      self.errors_by_category[category] += 1
1345
1346  def PrintErrorCounts(self):
1347    """Print a summary of errors by category, and the total."""
1348    for category, count in sorted(iteritems(self.errors_by_category)):
1349      self.PrintInfo('Category \'%s\' errors found: %d\n' %
1350                       (category, count))
1351    if self.error_count > 0:
1352      self.PrintInfo('Total errors found: %d\n' % self.error_count)
1353
1354  def PrintInfo(self, message):
1355    # _quiet does not represent --quiet flag.
1356    # Hide infos from stdout to keep stdout pure for machine consumption
1357    if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
1358      sys.stdout.write(message)
1359
1360  def PrintError(self, message):
1361    if self.output_format == 'junit':
1362      self._junit_errors.append(message)
1363    else:
1364      sys.stderr.write(message)
1365
1366  def AddJUnitFailure(self, filename, linenum, message, category, confidence):
1367    self._junit_failures.append((filename, linenum, message, category,
1368        confidence))
1369
1370  def FormatJUnitXML(self):
1371    num_errors = len(self._junit_errors)
1372    num_failures = len(self._junit_failures)
1373
1374    testsuite = xml.etree.ElementTree.Element('testsuite')
1375    testsuite.attrib['errors'] = str(num_errors)
1376    testsuite.attrib['failures'] = str(num_failures)
1377    testsuite.attrib['name'] = 'cpplint'
1378
1379    if num_errors == 0 and num_failures == 0:
1380      testsuite.attrib['tests'] = str(1)
1381      xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
1382
1383    else:
1384      testsuite.attrib['tests'] = str(num_errors + num_failures)
1385      if num_errors > 0:
1386        testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1387        testcase.attrib['name'] = 'errors'
1388        error = xml.etree.ElementTree.SubElement(testcase, 'error')
1389        error.text = '\n'.join(self._junit_errors)
1390      if num_failures > 0:
1391        # Group failures by file
1392        failed_file_order = []
1393        failures_by_file = {}
1394        for failure in self._junit_failures:
1395          failed_file = failure[0]
1396          if failed_file not in failed_file_order:
1397            failed_file_order.append(failed_file)
1398            failures_by_file[failed_file] = []
1399          failures_by_file[failed_file].append(failure)
1400        # Create a testcase for each file
1401        for failed_file in failed_file_order:
1402          failures = failures_by_file[failed_file]
1403          testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
1404          testcase.attrib['name'] = failed_file
1405          failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
1406          template = '{0}: {1} [{2}] [{3}]'
1407          texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
1408          failure.text = '\n'.join(texts)
1409
1410    xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
1411    return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
1412
1413
1414_cpplint_state = _CppLintState()
1415
1416
1417def _OutputFormat():
1418  """Gets the module's output format."""
1419  return _cpplint_state.output_format
1420
1421
1422def _SetOutputFormat(output_format):
1423  """Sets the module's output format."""
1424  _cpplint_state.SetOutputFormat(output_format)
1425
1426def _Quiet():
1427  """Return's the module's quiet setting."""
1428  return _cpplint_state.quiet
1429
1430def _SetQuiet(quiet):
1431  """Set the module's quiet status, and return previous setting."""
1432  return _cpplint_state.SetQuiet(quiet)
1433
1434
1435def _VerboseLevel():
1436  """Returns the module's verbosity setting."""
1437  return _cpplint_state.verbose_level
1438
1439
1440def _SetVerboseLevel(level):
1441  """Sets the module's verbosity, and returns the previous setting."""
1442  return _cpplint_state.SetVerboseLevel(level)
1443
1444
1445def _SetCountingStyle(level):
1446  """Sets the module's counting options."""
1447  _cpplint_state.SetCountingStyle(level)
1448
1449
1450def _Filters():
1451  """Returns the module's list of output filters, as a list."""
1452  return _cpplint_state.filters
1453
1454
1455def _SetFilters(filters):
1456  """Sets the module's error-message filters.
1457
1458  These filters are applied when deciding whether to emit a given
1459  error message.
1460
1461  Args:
1462    filters: A string of comma-separated filters (eg "whitespace/indent").
1463             Each filter should start with + or -; else we die.
1464  """
1465  _cpplint_state.SetFilters(filters)
1466
1467def _AddFilters(filters):
1468  """Adds more filter overrides.
1469
1470  Unlike _SetFilters, this function does not reset the current list of filters
1471  available.
1472
1473  Args:
1474    filters: A string of comma-separated filters (eg "whitespace/indent").
1475             Each filter should start with + or -; else we die.
1476  """
1477  _cpplint_state.AddFilters(filters)
1478
1479def _BackupFilters():
1480  """ Saves the current filter list to backup storage."""
1481  _cpplint_state.BackupFilters()
1482
1483def _RestoreFilters():
1484  """ Restores filters previously backed up."""
1485  _cpplint_state.RestoreFilters()
1486
1487class _FunctionState(object):
1488  """Tracks current function name and the number of lines in its body."""
1489
1490  _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
1491  _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
1492
1493  def __init__(self):
1494    self.in_a_function = False
1495    self.lines_in_function = 0
1496    self.current_function = ''
1497
1498  def Begin(self, function_name):
1499    """Start analyzing function body.
1500
1501    Args:
1502      function_name: The name of the function being tracked.
1503    """
1504    self.in_a_function = True
1505    self.lines_in_function = 0
1506    self.current_function = function_name
1507
1508  def Count(self):
1509    """Count line in current function body."""
1510    if self.in_a_function:
1511      self.lines_in_function += 1
1512
1513  def Check(self, error, filename, linenum):
1514    """Report if too many lines in function body.
1515
1516    Args:
1517      error: The function to call with any errors found.
1518      filename: The name of the current file.
1519      linenum: The number of the line to check.
1520    """
1521    if not self.in_a_function:
1522      return
1523
1524    if Match(r'T(EST|est)', self.current_function):
1525      base_trigger = self._TEST_TRIGGER
1526    else:
1527      base_trigger = self._NORMAL_TRIGGER
1528    trigger = base_trigger * 2**_VerboseLevel()
1529
1530    if self.lines_in_function > trigger:
1531      error_level = int(math.log(self.lines_in_function / base_trigger, 2))
1532      # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
1533      if error_level > 5:
1534        error_level = 5
1535      error(filename, linenum, 'readability/fn_size', error_level,
1536            'Small and focused functions are preferred:'
1537            ' %s has %d non-comment lines'
1538            ' (error triggered by exceeding %d lines).'  % (
1539                self.current_function, self.lines_in_function, trigger))
1540
1541  def End(self):
1542    """Stop analyzing function body."""
1543    self.in_a_function = False
1544
1545
1546class _IncludeError(Exception):
1547  """Indicates a problem with the include order in a file."""
1548  pass
1549
1550
1551class FileInfo(object):
1552  """Provides utility functions for filenames.
1553
1554  FileInfo provides easy access to the components of a file's path
1555  relative to the project root.
1556  """
1557
1558  def __init__(self, filename):
1559    self._filename = filename
1560
1561  def FullName(self):
1562    """Make Windows paths like Unix."""
1563    return os.path.abspath(self._filename).replace('\\', '/')
1564
1565  def RepositoryName(self):
1566    r"""FullName after removing the local path to the repository.
1567
1568    If we have a real absolute path name here we can try to do something smart:
1569    detecting the root of the checkout and truncating /path/to/checkout from
1570    the name so that we get header guards that don't include things like
1571    "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
1572    people on different computers who have checked the source out to different
1573    locations won't see bogus errors.
1574    """
1575    fullname = self.FullName()
1576
1577    if os.path.exists(fullname):
1578      project_dir = os.path.dirname(fullname)
1579
1580      # If the user specified a repository path, it exists, and the file is
1581      # contained in it, use the specified repository path
1582      if _repository:
1583        repo = FileInfo(_repository).FullName()
1584        root_dir = project_dir
1585        while os.path.exists(root_dir):
1586          # allow case insensitive compare on Windows
1587          if os.path.normcase(root_dir) == os.path.normcase(repo):
1588            return os.path.relpath(fullname, root_dir).replace('\\', '/')
1589          one_up_dir = os.path.dirname(root_dir)
1590          if one_up_dir == root_dir:
1591            break
1592          root_dir = one_up_dir
1593
1594      if os.path.exists(os.path.join(project_dir, ".svn")):
1595        # If there's a .svn file in the current directory, we recursively look
1596        # up the directory tree for the top of the SVN checkout
1597        root_dir = project_dir
1598        one_up_dir = os.path.dirname(root_dir)
1599        while os.path.exists(os.path.join(one_up_dir, ".svn")):
1600          root_dir = os.path.dirname(root_dir)
1601          one_up_dir = os.path.dirname(one_up_dir)
1602
1603        prefix = os.path.commonprefix([root_dir, project_dir])
1604        return fullname[len(prefix) + 1:]
1605
1606      # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
1607      # searching up from the current path.
1608      root_dir = current_dir = os.path.dirname(fullname)
1609      while current_dir != os.path.dirname(current_dir):
1610        if (os.path.exists(os.path.join(current_dir, ".git")) or
1611            os.path.exists(os.path.join(current_dir, ".hg")) or
1612            os.path.exists(os.path.join(current_dir, ".svn"))):
1613          root_dir = current_dir
1614        current_dir = os.path.dirname(current_dir)
1615
1616      if (os.path.exists(os.path.join(root_dir, ".git")) or
1617          os.path.exists(os.path.join(root_dir, ".hg")) or
1618          os.path.exists(os.path.join(root_dir, ".svn"))):
1619        prefix = os.path.commonprefix([root_dir, project_dir])
1620        return fullname[len(prefix) + 1:]
1621
1622    # Don't know what to do; header guard warnings may be wrong...
1623    return fullname
1624
1625  def Split(self):
1626    """Splits the file into the directory, basename, and extension.
1627
1628    For 'chrome/browser/browser.cc', Split() would
1629    return ('chrome/browser', 'browser', '.cc')
1630
1631    Returns:
1632      A tuple of (directory, basename, extension).
1633    """
1634
1635    googlename = self.RepositoryName()
1636    project, rest = os.path.split(googlename)
1637    return (project,) + os.path.splitext(rest)
1638
1639  def BaseName(self):
1640    """File base name - text after the final slash, before the final period."""
1641    return self.Split()[1]
1642
1643  def Extension(self):
1644    """File extension - text following the final period, includes that period."""
1645    return self.Split()[2]
1646
1647  def NoExtension(self):
1648    """File has no source file extension."""
1649    return '/'.join(self.Split()[0:2])
1650
1651  def IsSource(self):
1652    """File has a source file extension."""
1653    return _IsSourceExtension(self.Extension()[1:])
1654
1655
1656def _ShouldPrintError(category, confidence, linenum):
1657  """If confidence >= verbose, category passes filter and is not suppressed."""
1658
1659  # There are three ways we might decide not to print an error message:
1660  # a "NOLINT(category)" comment appears in the source,
1661  # the verbosity level isn't high enough, or the filters filter it out.
1662  if IsErrorSuppressedByNolint(category, linenum):
1663    return False
1664
1665  if confidence < _cpplint_state.verbose_level:
1666    return False
1667
1668  is_filtered = False
1669  for one_filter in _Filters():
1670    if one_filter.startswith('-'):
1671      if category.startswith(one_filter[1:]):
1672        is_filtered = True
1673    elif one_filter.startswith('+'):
1674      if category.startswith(one_filter[1:]):
1675        is_filtered = False
1676    else:
1677      assert False  # should have been checked for in SetFilter.
1678  if is_filtered:
1679    return False
1680
1681  return True
1682
1683
1684def Error(filename, linenum, category, confidence, message):
1685  """Logs the fact we've found a lint error.
1686
1687  We log where the error was found, and also our confidence in the error,
1688  that is, how certain we are this is a legitimate style regression, and
1689  not a misidentification or a use that's sometimes justified.
1690
1691  False positives can be suppressed by the use of
1692  "cpplint(category)"  comments on the offending line.  These are
1693  parsed into _error_suppressions.
1694
1695  Args:
1696    filename: The name of the file containing the error.
1697    linenum: The number of the line containing the error.
1698    category: A string used to describe the "category" this bug
1699      falls under: "whitespace", say, or "runtime".  Categories
1700      may have a hierarchy separated by slashes: "whitespace/indent".
1701    confidence: A number from 1-5 representing a confidence score for
1702      the error, with 5 meaning that we are certain of the problem,
1703      and 1 meaning that it could be a legitimate construct.
1704    message: The error message.
1705  """
1706  if _ShouldPrintError(category, confidence, linenum):
1707    _cpplint_state.IncrementErrorCount(category)
1708    if _cpplint_state.output_format == 'vs7':
1709      _cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
1710          filename, linenum, category, message, confidence))
1711    elif _cpplint_state.output_format == 'eclipse':
1712      sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
1713          filename, linenum, message, category, confidence))
1714    elif _cpplint_state.output_format == 'junit':
1715      _cpplint_state.AddJUnitFailure(filename, linenum, message, category,
1716          confidence)
1717    elif _cpplint_state.output_format in ['sed', 'gsed']:
1718      if message in _SED_FIXUPS:
1719        sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s  [%s] [%d]\n" % (
1720            linenum, _SED_FIXUPS[message], filename, message, category, confidence))
1721      else:
1722        sys.stderr.write('# %s:%s:  "%s"  [%s] [%d]\n' % (
1723            filename, linenum, message, category, confidence))
1724    else:
1725      final_message = '%s:%s:  %s  [%s] [%d]\n' % (
1726          filename, linenum, message, category, confidence)
1727      sys.stderr.write(final_message)
1728
1729# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
1730_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
1731    r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
1732# Match a single C style comment on the same line.
1733_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
1734# Matches multi-line C style comments.
1735# This RE is a little bit more complicated than one might expect, because we
1736# have to take care of space removals tools so we can handle comments inside
1737# statements better.
1738# The current rule is: We only clear spaces from both sides when we're at the
1739# end of the line. Otherwise, we try to remove spaces from the right side,
1740# if this doesn't work we try on left side but only if there's a non-character
1741# on the right.
1742_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
1743    r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
1744    _RE_PATTERN_C_COMMENTS + r'\s+|' +
1745    r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
1746    _RE_PATTERN_C_COMMENTS + r')')
1747
1748
1749def IsCppString(line):
1750  """Does line terminate so, that the next symbol is in string constant.
1751
1752  This function does not consider single-line nor multi-line comments.
1753
1754  Args:
1755    line: is a partial line of code starting from the 0..n.
1756
1757  Returns:
1758    True, if next character appended to 'line' is inside a
1759    string constant.
1760  """
1761
1762  line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
1763  return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
1764
1765
1766def CleanseRawStrings(raw_lines):
1767  """Removes C++11 raw strings from lines.
1768
1769    Before:
1770      static const char kData[] = R"(
1771          multi-line string
1772          )";
1773
1774    After:
1775      static const char kData[] = ""
1776          (replaced by blank line)
1777          "";
1778
1779  Args:
1780    raw_lines: list of raw lines.
1781
1782  Returns:
1783    list of lines with C++11 raw strings replaced by empty strings.
1784  """
1785
1786  delimiter = None
1787  lines_without_raw_strings = []
1788  for line in raw_lines:
1789    if delimiter:
1790      # Inside a raw string, look for the end
1791      end = line.find(delimiter)
1792      if end >= 0:
1793        # Found the end of the string, match leading space for this
1794        # line and resume copying the original lines, and also insert
1795        # a "" on the last line.
1796        leading_space = Match(r'^(\s*)\S', line)
1797        line = leading_space.group(1) + '""' + line[end + len(delimiter):]
1798        delimiter = None
1799      else:
1800        # Haven't found the end yet, append a blank line.
1801        line = '""'
1802
1803    # Look for beginning of a raw string, and replace them with
1804    # empty strings.  This is done in a loop to handle multiple raw
1805    # strings on the same line.
1806    while delimiter is None:
1807      # Look for beginning of a raw string.
1808      # See 2.14.15 [lex.string] for syntax.
1809      #
1810      # Once we have matched a raw string, we check the prefix of the
1811      # line to make sure that the line is not part of a single line
1812      # comment.  It's done this way because we remove raw strings
1813      # before removing comments as opposed to removing comments
1814      # before removing raw strings.  This is because there are some
1815      # cpplint checks that requires the comments to be preserved, but
1816      # we don't want to check comments that are inside raw strings.
1817      matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
1818      if (matched and
1819          not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
1820                    matched.group(1))):
1821        delimiter = ')' + matched.group(2) + '"'
1822
1823        end = matched.group(3).find(delimiter)
1824        if end >= 0:
1825          # Raw string ended on same line
1826          line = (matched.group(1) + '""' +
1827                  matched.group(3)[end + len(delimiter):])
1828          delimiter = None
1829        else:
1830          # Start of a multi-line raw string
1831          line = matched.group(1) + '""'
1832      else:
1833        break
1834
1835    lines_without_raw_strings.append(line)
1836
1837  # TODO(unknown): if delimiter is not None here, we might want to
1838  # emit a warning for unterminated string.
1839  return lines_without_raw_strings
1840
1841
1842def FindNextMultiLineCommentStart(lines, lineix):
1843  """Find the beginning marker for a multiline comment."""
1844  while lineix < len(lines):
1845    if lines[lineix].strip().startswith('/*'):
1846      # Only return this marker if the comment goes beyond this line
1847      if lines[lineix].strip().find('*/', 2) < 0:
1848        return lineix
1849    lineix += 1
1850  return len(lines)
1851
1852
1853def FindNextMultiLineCommentEnd(lines, lineix):
1854  """We are inside a comment, find the end marker."""
1855  while lineix < len(lines):
1856    if lines[lineix].strip().endswith('*/'):
1857      return lineix
1858    lineix += 1
1859  return len(lines)
1860
1861
1862def RemoveMultiLineCommentsFromRange(lines, begin, end):
1863  """Clears a range of lines for multi-line comments."""
1864  # Having // <empty> comments makes the lines non-empty, so we will not get
1865  # unnecessary blank line warnings later in the code.
1866  for i in range(begin, end):
1867    lines[i] = '/**/'
1868
1869
1870def RemoveMultiLineComments(filename, lines, error):
1871  """Removes multiline (c-style) comments from lines."""
1872  lineix = 0
1873  while lineix < len(lines):
1874    lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
1875    if lineix_begin >= len(lines):
1876      return
1877    lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
1878    if lineix_end >= len(lines):
1879      error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
1880            'Could not find end of multi-line comment')
1881      return
1882    RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
1883    lineix = lineix_end + 1
1884
1885
1886def CleanseComments(line):
1887  """Removes //-comments and single-line C-style /* */ comments.
1888
1889  Args:
1890    line: A line of C++ source.
1891
1892  Returns:
1893    The line with single-line comments removed.
1894  """
1895  commentpos = line.find('//')
1896  if commentpos != -1 and not IsCppString(line[:commentpos]):
1897    line = line[:commentpos].rstrip()
1898  # get rid of /* ... */
1899  return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
1900
1901
1902class CleansedLines(object):
1903  """Holds 4 copies of all lines with different preprocessing applied to them.
1904
1905  1) elided member contains lines without strings and comments.
1906  2) lines member contains lines without comments.
1907  3) raw_lines member contains all the lines without processing.
1908  4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
1909     strings removed.
1910  All these members are of <type 'list'>, and of the same length.
1911  """
1912
1913  def __init__(self, lines):
1914    self.elided = []
1915    self.lines = []
1916    self.raw_lines = lines
1917    self.num_lines = len(lines)
1918    self.lines_without_raw_strings = CleanseRawStrings(lines)
1919    for linenum in range(len(self.lines_without_raw_strings)):
1920      self.lines.append(CleanseComments(
1921          self.lines_without_raw_strings[linenum]))
1922      elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
1923      self.elided.append(CleanseComments(elided))
1924
1925  def NumLines(self):
1926    """Returns the number of lines represented."""
1927    return self.num_lines
1928
1929  @staticmethod
1930  def _CollapseStrings(elided):
1931    """Collapses strings and chars on a line to simple "" or '' blocks.
1932
1933    We nix strings first so we're not fooled by text like '"http://"'
1934
1935    Args:
1936      elided: The line being processed.
1937
1938    Returns:
1939      The line with collapsed strings.
1940    """
1941    if _RE_PATTERN_INCLUDE.match(elided):
1942      return elided
1943
1944    # Remove escaped characters first to make quote/single quote collapsing
1945    # basic.  Things that look like escaped characters shouldn't occur
1946    # outside of strings and chars.
1947    elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
1948
1949    # Replace quoted strings and digit separators.  Both single quotes
1950    # and double quotes are processed in the same loop, otherwise
1951    # nested quotes wouldn't work.
1952    collapsed = ''
1953    while True:
1954      # Find the first quote character
1955      match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
1956      if not match:
1957        collapsed += elided
1958        break
1959      head, quote, tail = match.groups()
1960
1961      if quote == '"':
1962        # Collapse double quoted strings
1963        second_quote = tail.find('"')
1964        if second_quote >= 0:
1965          collapsed += head + '""'
1966          elided = tail[second_quote + 1:]
1967        else:
1968          # Unmatched double quote, don't bother processing the rest
1969          # of the line since this is probably a multiline string.
1970          collapsed += elided
1971          break
1972      else:
1973        # Found single quote, check nearby text to eliminate digit separators.
1974        #
1975        # There is no special handling for floating point here, because
1976        # the integer/fractional/exponent parts would all be parsed
1977        # correctly as long as there are digits on both sides of the
1978        # separator.  So we are fine as long as we don't see something
1979        # like "0.'3" (gcc 4.9.0 will not allow this literal).
1980        if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
1981          match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
1982          collapsed += head + match_literal.group(1).replace("'", '')
1983          elided = match_literal.group(2)
1984        else:
1985          second_quote = tail.find('\'')
1986          if second_quote >= 0:
1987            collapsed += head + "''"
1988            elided = tail[second_quote + 1:]
1989          else:
1990            # Unmatched single quote
1991            collapsed += elided
1992            break
1993
1994    return collapsed
1995
1996
1997def FindEndOfExpressionInLine(line, startpos, stack):
1998  """Find the position just after the end of current parenthesized expression.
1999
2000  Args:
2001    line: a CleansedLines line.
2002    startpos: start searching at this position.
2003    stack: nesting stack at startpos.
2004
2005  Returns:
2006    On finding matching end: (index just after matching end, None)
2007    On finding an unclosed expression: (-1, None)
2008    Otherwise: (-1, new stack at end of this line)
2009  """
2010  for i in xrange(startpos, len(line)):
2011    char = line[i]
2012    if char in '([{':
2013      # Found start of parenthesized expression, push to expression stack
2014      stack.append(char)
2015    elif char == '<':
2016      # Found potential start of template argument list
2017      if i > 0 and line[i - 1] == '<':
2018        # Left shift operator
2019        if stack and stack[-1] == '<':
2020          stack.pop()
2021          if not stack:
2022            return (-1, None)
2023      elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
2024        # operator<, don't add to stack
2025        continue
2026      else:
2027        # Tentative start of template argument list
2028        stack.append('<')
2029    elif char in ')]}':
2030      # Found end of parenthesized expression.
2031      #
2032      # If we are currently expecting a matching '>', the pending '<'
2033      # must have been an operator.  Remove them from expression stack.
2034      while stack and stack[-1] == '<':
2035        stack.pop()
2036      if not stack:
2037        return (-1, None)
2038      if ((stack[-1] == '(' and char == ')') or
2039          (stack[-1] == '[' and char == ']') or
2040          (stack[-1] == '{' and char == '}')):
2041        stack.pop()
2042        if not stack:
2043          return (i + 1, None)
2044      else:
2045        # Mismatched parentheses
2046        return (-1, None)
2047    elif char == '>':
2048      # Found potential end of template argument list.
2049
2050      # Ignore "->" and operator functions
2051      if (i > 0 and
2052          (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
2053        continue
2054
2055      # Pop the stack if there is a matching '<'.  Otherwise, ignore
2056      # this '>' since it must be an operator.
2057      if stack:
2058        if stack[-1] == '<':
2059          stack.pop()
2060          if not stack:
2061            return (i + 1, None)
2062    elif char == ';':
2063      # Found something that look like end of statements.  If we are currently
2064      # expecting a '>', the matching '<' must have been an operator, since
2065      # template argument list should not contain statements.
2066      while stack and stack[-1] == '<':
2067        stack.pop()
2068      if not stack:
2069        return (-1, None)
2070
2071  # Did not find end of expression or unbalanced parentheses on this line
2072  return (-1, stack)
2073
2074
2075def CloseExpression(clean_lines, linenum, pos):
2076  """If input points to ( or { or [ or <, finds the position that closes it.
2077
2078  If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
2079  linenum/pos that correspond to the closing of the expression.
2080
2081  TODO(unknown): cpplint spends a fair bit of time matching parentheses.
2082  Ideally we would want to index all opening and closing parentheses once
2083  and have CloseExpression be just a simple lookup, but due to preprocessor
2084  tricks, this is not so easy.
2085
2086  Args:
2087    clean_lines: A CleansedLines instance containing the file.
2088    linenum: The number of the line to check.
2089    pos: A position on the line.
2090
2091  Returns:
2092    A tuple (line, linenum, pos) pointer *past* the closing brace, or
2093    (line, len(lines), -1) if we never find a close.  Note we ignore
2094    strings and comments when matching; and the line we return is the
2095    'cleansed' line at linenum.
2096  """
2097
2098  line = clean_lines.elided[linenum]
2099  if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
2100    return (line, clean_lines.NumLines(), -1)
2101
2102  # Check first line
2103  (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
2104  if end_pos > -1:
2105    return (line, linenum, end_pos)
2106
2107  # Continue scanning forward
2108  while stack and linenum < clean_lines.NumLines() - 1:
2109    linenum += 1
2110    line = clean_lines.elided[linenum]
2111    (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
2112    if end_pos > -1:
2113      return (line, linenum, end_pos)
2114
2115  # Did not find end of expression before end of file, give up
2116  return (line, clean_lines.NumLines(), -1)
2117
2118
2119def FindStartOfExpressionInLine(line, endpos, stack):
2120  """Find position at the matching start of current expression.
2121
2122  This is almost the reverse of FindEndOfExpressionInLine, but note
2123  that the input position and returned position differs by 1.
2124
2125  Args:
2126    line: a CleansedLines line.
2127    endpos: start searching at this position.
2128    stack: nesting stack at endpos.
2129
2130  Returns:
2131    On finding matching start: (index at matching start, None)
2132    On finding an unclosed expression: (-1, None)
2133    Otherwise: (-1, new stack at beginning of this line)
2134  """
2135  i = endpos
2136  while i >= 0:
2137    char = line[i]
2138    if char in ')]}':
2139      # Found end of expression, push to expression stack
2140      stack.append(char)
2141    elif char == '>':
2142      # Found potential end of template argument list.
2143      #
2144      # Ignore it if it's a "->" or ">=" or "operator>"
2145      if (i > 0 and
2146          (line[i - 1] == '-' or
2147           Match(r'\s>=\s', line[i - 1:]) or
2148           Search(r'\boperator\s*$', line[0:i]))):
2149        i -= 1
2150      else:
2151        stack.append('>')
2152    elif char == '<':
2153      # Found potential start of template argument list
2154      if i > 0 and line[i - 1] == '<':
2155        # Left shift operator
2156        i -= 1
2157      else:
2158        # If there is a matching '>', we can pop the expression stack.
2159        # Otherwise, ignore this '<' since it must be an operator.
2160        if stack and stack[-1] == '>':
2161          stack.pop()
2162          if not stack:
2163            return (i, None)
2164    elif char in '([{':
2165      # Found start of expression.
2166      #
2167      # If there are any unmatched '>' on the stack, they must be
2168      # operators.  Remove those.
2169      while stack and stack[-1] == '>':
2170        stack.pop()
2171      if not stack:
2172        return (-1, None)
2173      if ((char == '(' and stack[-1] == ')') or
2174          (char == '[' and stack[-1] == ']') or
2175          (char == '{' and stack[-1] == '}')):
2176        stack.pop()
2177        if not stack:
2178          return (i, None)
2179      else:
2180        # Mismatched parentheses
2181        return (-1, None)
2182    elif char == ';':
2183      # Found something that look like end of statements.  If we are currently
2184      # expecting a '<', the matching '>' must have been an operator, since
2185      # template argument list should not contain statements.
2186      while stack and stack[-1] == '>':
2187        stack.pop()
2188      if not stack:
2189        return (-1, None)
2190
2191    i -= 1
2192
2193  return (-1, stack)
2194
2195
2196def ReverseCloseExpression(clean_lines, linenum, pos):
2197  """If input points to ) or } or ] or >, finds the position that opens it.
2198
2199  If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
2200  linenum/pos that correspond to the opening of the expression.
2201
2202  Args:
2203    clean_lines: A CleansedLines instance containing the file.
2204    linenum: The number of the line to check.
2205    pos: A position on the line.
2206
2207  Returns:
2208    A tuple (line, linenum, pos) pointer *at* the opening brace, or
2209    (line, 0, -1) if we never find the matching opening brace.  Note
2210    we ignore strings and comments when matching; and the line we
2211    return is the 'cleansed' line at linenum.
2212  """
2213  line = clean_lines.elided[linenum]
2214  if line[pos] not in ')}]>':
2215    return (line, 0, -1)
2216
2217  # Check last line
2218  (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
2219  if start_pos > -1:
2220    return (line, linenum, start_pos)
2221
2222  # Continue scanning backward
2223  while stack and linenum > 0:
2224    linenum -= 1
2225    line = clean_lines.elided[linenum]
2226    (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
2227    if start_pos > -1:
2228      return (line, linenum, start_pos)
2229
2230  # Did not find start of expression before beginning of file, give up
2231  return (line, 0, -1)
2232
2233
2234def CheckForCopyright(filename, lines, error):
2235  """Logs an error if no Copyright message appears at the top of the file."""
2236
2237  # We'll say it should occur by line 10. Don't forget there's a
2238  # placeholder line at the front.
2239  for line in xrange(1, min(len(lines), 11)):
2240    if re.search(r'Copyright', lines[line], re.I): break
2241  else:                       # means no copyright line was found
2242    error(filename, 0, 'legal/copyright', 5,
2243          'No copyright message found.  '
2244          'You should have a line: "Copyright [year] <Copyright Owner>"')
2245
2246
2247def GetIndentLevel(line):
2248  """Return the number of leading spaces in line.
2249
2250  Args:
2251    line: A string to check.
2252
2253  Returns:
2254    An integer count of leading spaces, possibly zero.
2255  """
2256  indent = Match(r'^( *)\S', line)
2257  if indent:
2258    return len(indent.group(1))
2259  else:
2260    return 0
2261
2262def PathSplitToList(path):
2263  """Returns the path split into a list by the separator.
2264
2265  Args:
2266    path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
2267
2268  Returns:
2269    A list of path components (e.g. ['a', 'b', 'c]).
2270  """
2271  lst = []
2272  while True:
2273    (head, tail) = os.path.split(path)
2274    if head == path:  # absolute paths end
2275      lst.append(head)
2276      break
2277    if tail == path:  # relative paths end
2278      lst.append(tail)
2279      break
2280
2281    path = head
2282    lst.append(tail)
2283
2284  lst.reverse()
2285  return lst
2286
2287def GetHeaderGuardCPPVariable(filename):
2288  """Returns the CPP variable that should be used as a header guard.
2289
2290  Args:
2291    filename: The name of a C++ header file.
2292
2293  Returns:
2294    The CPP variable that should be used as a header guard in the
2295    named file.
2296
2297  """
2298
2299  # Restores original filename in case that cpplint is invoked from Emacs's
2300  # flymake.
2301  filename = re.sub(r'_flymake\.h$', '.h', filename)
2302  filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
2303  # Replace 'c++' with 'cpp'.
2304  filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
2305
2306  fileinfo = FileInfo(filename)
2307  file_path_from_root = fileinfo.RepositoryName()
2308
2309  def FixupPathFromRoot():
2310    if _root_debug:
2311      sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
2312          % (_root, fileinfo.RepositoryName()))
2313
2314    # Process the file path with the --root flag if it was set.
2315    if not _root:
2316      if _root_debug:
2317        sys.stderr.write("_root unspecified\n")
2318      return file_path_from_root
2319
2320    def StripListPrefix(lst, prefix):
2321      # f(['x', 'y'], ['w, z']) -> None  (not a valid prefix)
2322      if lst[:len(prefix)] != prefix:
2323        return None
2324      # f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
2325      return lst[(len(prefix)):]
2326
2327    # root behavior:
2328    #   --root=subdir , lstrips subdir from the header guard
2329    maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
2330                                 PathSplitToList(_root))
2331
2332    if _root_debug:
2333      sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
2334          " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
2335
2336    if maybe_path:
2337      return os.path.join(*maybe_path)
2338
2339    #   --root=.. , will prepend the outer directory to the header guard
2340    full_path = fileinfo.FullName()
2341    # adapt slashes for windows
2342    root_abspath = os.path.abspath(_root).replace('\\', '/')
2343
2344    maybe_path = StripListPrefix(PathSplitToList(full_path),
2345                                 PathSplitToList(root_abspath))
2346
2347    if _root_debug:
2348      sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
2349          "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
2350
2351    if maybe_path:
2352      return os.path.join(*maybe_path)
2353
2354    if _root_debug:
2355      sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
2356
2357    #   --root=FAKE_DIR is ignored
2358    return file_path_from_root
2359
2360  file_path_from_root = FixupPathFromRoot()
2361  return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
2362
2363
2364def CheckForHeaderGuard(filename, clean_lines, error):
2365  """Checks that the file contains a header guard.
2366
2367  Logs an error if no #ifndef header guard is present.  For other
2368  headers, checks that the full pathname is used.
2369
2370  Args:
2371    filename: The name of the C++ header file.
2372    clean_lines: A CleansedLines instance containing the file.
2373    error: The function to call with any errors found.
2374  """
2375
2376  # Don't check for header guards if there are error suppression
2377  # comments somewhere in this file.
2378  #
2379  # Because this is silencing a warning for a nonexistent line, we
2380  # only support the very specific NOLINT(build/header_guard) syntax,
2381  # and not the general NOLINT or NOLINT(*) syntax.
2382  raw_lines = clean_lines.lines_without_raw_strings
2383  for i in raw_lines:
2384    if Search(r'//\s*NOLINT\(build/header_guard\)', i):
2385      return
2386
2387  # Allow pragma once instead of header guards
2388  for i in raw_lines:
2389    if Search(r'^\s*#pragma\s+once', i):
2390      return
2391
2392  cppvar = GetHeaderGuardCPPVariable(filename)
2393
2394  ifndef = ''
2395  ifndef_linenum = 0
2396  define = ''
2397  endif = ''
2398  endif_linenum = 0
2399  for linenum, line in enumerate(raw_lines):
2400    linesplit = line.split()
2401    if len(linesplit) >= 2:
2402      # find the first occurrence of #ifndef and #define, save arg
2403      if not ifndef and linesplit[0] == '#ifndef':
2404        # set ifndef to the header guard presented on the #ifndef line.
2405        ifndef = linesplit[1]
2406        ifndef_linenum = linenum
2407      if not define and linesplit[0] == '#define':
2408        define = linesplit[1]
2409    # find the last occurrence of #endif, save entire line
2410    if line.startswith('#endif'):
2411      endif = line
2412      endif_linenum = linenum
2413
2414  if not ifndef or not define or ifndef != define:
2415    error(filename, 0, 'build/header_guard', 5,
2416          'No #ifndef header guard found, suggested CPP variable is: %s' %
2417          cppvar)
2418    return
2419
2420  # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
2421  # for backward compatibility.
2422  if ifndef != cppvar:
2423    error_level = 0
2424    if ifndef != cppvar + '_':
2425      error_level = 5
2426
2427    ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
2428                            error)
2429    error(filename, ifndef_linenum, 'build/header_guard', error_level,
2430          '#ifndef header guard has wrong style, please use: %s' % cppvar)
2431
2432  # Check for "//" comments on endif line.
2433  ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
2434                          error)
2435  match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
2436  if match:
2437    if match.group(1) == '_':
2438      # Issue low severity warning for deprecated double trailing underscore
2439      error(filename, endif_linenum, 'build/header_guard', 0,
2440            '#endif line should be "#endif  // %s"' % cppvar)
2441    return
2442
2443  # Didn't find the corresponding "//" comment.  If this file does not
2444  # contain any "//" comments at all, it could be that the compiler
2445  # only wants "/**/" comments, look for those instead.
2446  no_single_line_comments = True
2447  for i in xrange(1, len(raw_lines) - 1):
2448    line = raw_lines[i]
2449    if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
2450      no_single_line_comments = False
2451      break
2452
2453  if no_single_line_comments:
2454    match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
2455    if match:
2456      if match.group(1) == '_':
2457        # Low severity warning for double trailing underscore
2458        error(filename, endif_linenum, 'build/header_guard', 0,
2459              '#endif line should be "#endif  /* %s */"' % cppvar)
2460      return
2461
2462  # Didn't find anything
2463  error(filename, endif_linenum, 'build/header_guard', 5,
2464        '#endif line should be "#endif  // %s"' % cppvar)
2465
2466
2467def CheckHeaderFileIncluded(filename, include_state, error):
2468  """Logs an error if a source file does not include its header."""
2469
2470  # Do not check test files
2471  fileinfo = FileInfo(filename)
2472  if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
2473    return
2474
2475  for ext in GetHeaderExtensions():
2476    basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
2477    headerfile = basefilename + '.' + ext
2478    if not os.path.exists(headerfile):
2479      continue
2480    headername = FileInfo(headerfile).RepositoryName()
2481    first_include = None
2482    include_uses_unix_dir_aliases = False
2483    for section_list in include_state.include_list:
2484      for f in section_list:
2485        include_text = f[0]
2486        if "./" in include_text:
2487          include_uses_unix_dir_aliases = True
2488        if headername in include_text or include_text in headername:
2489          return
2490        if not first_include:
2491          first_include = f[1]
2492
2493    message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
2494    if include_uses_unix_dir_aliases:
2495      message += ". Relative paths like . and .. are not allowed."
2496
2497    error(filename, first_include, 'build/include', 5, message)
2498
2499
2500def CheckForBadCharacters(filename, lines, error):
2501  """Logs an error for each line containing bad characters.
2502
2503  Two kinds of bad characters:
2504
2505  1. Unicode replacement characters: These indicate that either the file
2506  contained invalid UTF-8 (likely) or Unicode replacement characters (which
2507  it shouldn't).  Note that it's possible for this to throw off line
2508  numbering if the invalid UTF-8 occurred adjacent to a newline.
2509
2510  2. NUL bytes.  These are problematic for some tools.
2511
2512  Args:
2513    filename: The name of the current file.
2514    lines: An array of strings, each representing a line of the file.
2515    error: The function to call with any errors found.
2516  """
2517  for linenum, line in enumerate(lines):
2518    if unicode_escape_decode('\ufffd') in line:
2519      error(filename, linenum, 'readability/utf8', 5,
2520            'Line contains invalid UTF-8 (or Unicode replacement character).')
2521    if '\0' in line:
2522      error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
2523
2524
2525def CheckForNewlineAtEOF(filename, lines, error):
2526  """Logs an error if there is no newline char at the end of the file.
2527
2528  Args:
2529    filename: The name of the current file.
2530    lines: An array of strings, each representing a line of the file.
2531    error: The function to call with any errors found.
2532  """
2533
2534  # The array lines() was created by adding two newlines to the
2535  # original file (go figure), then splitting on \n.
2536  # To verify that the file ends in \n, we just have to make sure the
2537  # last-but-two element of lines() exists and is empty.
2538  if len(lines) < 3 or lines[-2]:
2539    error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
2540          'Could not find a newline character at the end of the file.')
2541
2542
2543def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
2544  """Logs an error if we see /* ... */ or "..." that extend past one line.
2545
2546  /* ... */ comments are legit inside macros, for one line.
2547  Otherwise, we prefer // comments, so it's ok to warn about the
2548  other.  Likewise, it's ok for strings to extend across multiple
2549  lines, as long as a line continuation character (backslash)
2550  terminates each line. Although not currently prohibited by the C++
2551  style guide, it's ugly and unnecessary. We don't do well with either
2552  in this lint program, so we warn about both.
2553
2554  Args:
2555    filename: The name of the current file.
2556    clean_lines: A CleansedLines instance containing the file.
2557    linenum: The number of the line to check.
2558    error: The function to call with any errors found.
2559  """
2560  line = clean_lines.elided[linenum]
2561
2562  # Remove all \\ (escaped backslashes) from the line. They are OK, and the
2563  # second (escaped) slash may trigger later \" detection erroneously.
2564  line = line.replace('\\\\', '')
2565
2566  if line.count('/*') > line.count('*/'):
2567    error(filename, linenum, 'readability/multiline_comment', 5,
2568          'Complex multi-line /*...*/-style comment found. '
2569          'Lint may give bogus warnings.  '
2570          'Consider replacing these with //-style comments, '
2571          'with #if 0...#endif, '
2572          'or with more clearly structured multi-line comments.')
2573
2574  if (line.count('"') - line.count('\\"')) % 2:
2575    error(filename, linenum, 'readability/multiline_string', 5,
2576          'Multi-line string ("...") found.  This lint script doesn\'t '
2577          'do well with such strings, and may give bogus warnings.  '
2578          'Use C++11 raw strings or concatenation instead.')
2579
2580
2581# (non-threadsafe name, thread-safe alternative, validation pattern)
2582#
2583# The validation pattern is used to eliminate false positives such as:
2584#  _rand();               // false positive due to substring match.
2585#  ->rand();              // some member function rand().
2586#  ACMRandom rand(seed);  // some variable named rand.
2587#  ISAACRandom rand();    // another variable named rand.
2588#
2589# Basically we require the return value of these functions to be used
2590# in some expression context on the same line by matching on some
2591# operator before the function name.  This eliminates constructors and
2592# member function calls.
2593_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
2594_THREADING_LIST = (
2595    ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
2596    ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
2597    ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
2598    ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
2599    ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
2600    ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
2601    ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
2602    ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
2603    ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
2604    ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
2605    ('strtok(', 'strtok_r(',
2606     _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
2607    ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
2608    )
2609
2610
2611def CheckPosixThreading(filename, clean_lines, linenum, error):
2612  """Checks for calls to thread-unsafe functions.
2613
2614  Much code has been originally written without consideration of
2615  multi-threading. Also, engineers are relying on their old experience;
2616  they have learned posix before threading extensions were added. These
2617  tests guide the engineers to use thread-safe functions (when using
2618  posix directly).
2619
2620  Args:
2621    filename: The name of the current file.
2622    clean_lines: A CleansedLines instance containing the file.
2623    linenum: The number of the line to check.
2624    error: The function to call with any errors found.
2625  """
2626  line = clean_lines.elided[linenum]
2627  for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
2628    # Additional pattern matching check to confirm that this is the
2629    # function we are looking for
2630    if Search(pattern, line):
2631      error(filename, linenum, 'runtime/threadsafe_fn', 2,
2632            'Consider using ' + multithread_safe_func +
2633            '...) instead of ' + single_thread_func +
2634            '...) for improved thread safety.')
2635
2636
2637def CheckVlogArguments(filename, clean_lines, linenum, error):
2638  """Checks that VLOG() is only used for defining a logging level.
2639
2640  For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
2641  VLOG(FATAL) are not.
2642
2643  Args:
2644    filename: The name of the current file.
2645    clean_lines: A CleansedLines instance containing the file.
2646    linenum: The number of the line to check.
2647    error: The function to call with any errors found.
2648  """
2649  line = clean_lines.elided[linenum]
2650  if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
2651    error(filename, linenum, 'runtime/vlog', 5,
2652          'VLOG() should be used with numeric verbosity level.  '
2653          'Use LOG() if you want symbolic severity levels.')
2654
2655# Matches invalid increment: *count++, which moves pointer instead of
2656# incrementing a value.
2657_RE_PATTERN_INVALID_INCREMENT = re.compile(
2658    r'^\s*\*\w+(\+\+|--);')
2659
2660
2661def CheckInvalidIncrement(filename, clean_lines, linenum, error):
2662  """Checks for invalid increment *count++.
2663
2664  For example following function:
2665  void increment_counter(int* count) {
2666    *count++;
2667  }
2668  is invalid, because it effectively does count++, moving pointer, and should
2669  be replaced with ++*count, (*count)++ or *count += 1.
2670
2671  Args:
2672    filename: The name of the current file.
2673    clean_lines: A CleansedLines instance containing the file.
2674    linenum: The number of the line to check.
2675    error: The function to call with any errors found.
2676  """
2677  line = clean_lines.elided[linenum]
2678  if _RE_PATTERN_INVALID_INCREMENT.match(line):
2679    error(filename, linenum, 'runtime/invalid_increment', 5,
2680          'Changing pointer instead of value (or unused value of operator*).')
2681
2682
2683def IsMacroDefinition(clean_lines, linenum):
2684  if Search(r'^#define', clean_lines[linenum]):
2685    return True
2686
2687  if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
2688    return True
2689
2690  return False
2691
2692
2693def IsForwardClassDeclaration(clean_lines, linenum):
2694  return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
2695
2696
2697class _BlockInfo(object):
2698  """Stores information about a generic block of code."""
2699
2700  def __init__(self, linenum, seen_open_brace):
2701    self.starting_linenum = linenum
2702    self.seen_open_brace = seen_open_brace
2703    self.open_parentheses = 0
2704    self.inline_asm = _NO_ASM
2705    self.check_namespace_indentation = False
2706
2707  def CheckBegin(self, filename, clean_lines, linenum, error):
2708    """Run checks that applies to text up to the opening brace.
2709
2710    This is mostly for checking the text after the class identifier
2711    and the "{", usually where the base class is specified.  For other
2712    blocks, there isn't much to check, so we always pass.
2713
2714    Args:
2715      filename: The name of the current file.
2716      clean_lines: A CleansedLines instance containing the file.
2717      linenum: The number of the line to check.
2718      error: The function to call with any errors found.
2719    """
2720    pass
2721
2722  def CheckEnd(self, filename, clean_lines, linenum, error):
2723    """Run checks that applies to text after the closing brace.
2724
2725    This is mostly used for checking end of namespace comments.
2726
2727    Args:
2728      filename: The name of the current file.
2729      clean_lines: A CleansedLines instance containing the file.
2730      linenum: The number of the line to check.
2731      error: The function to call with any errors found.
2732    """
2733    pass
2734
2735  def IsBlockInfo(self):
2736    """Returns true if this block is a _BlockInfo.
2737
2738    This is convenient for verifying that an object is an instance of
2739    a _BlockInfo, but not an instance of any of the derived classes.
2740
2741    Returns:
2742      True for this class, False for derived classes.
2743    """
2744    return self.__class__ == _BlockInfo
2745
2746
2747class _ExternCInfo(_BlockInfo):
2748  """Stores information about an 'extern "C"' block."""
2749
2750  def __init__(self, linenum):
2751    _BlockInfo.__init__(self, linenum, True)
2752
2753
2754class _ClassInfo(_BlockInfo):
2755  """Stores information about a class."""
2756
2757  def __init__(self, name, class_or_struct, clean_lines, linenum):
2758    _BlockInfo.__init__(self, linenum, False)
2759    self.name = name
2760    self.is_derived = False
2761    self.check_namespace_indentation = True
2762    if class_or_struct == 'struct':
2763      self.access = 'public'
2764      self.is_struct = True
2765    else:
2766      self.access = 'private'
2767      self.is_struct = False
2768
2769    # Remember initial indentation level for this class.  Using raw_lines here
2770    # instead of elided to account for leading comments.
2771    self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
2772
2773    # Try to find the end of the class.  This will be confused by things like:
2774    #   class A {
2775    #   } *x = { ...
2776    #
2777    # But it's still good enough for CheckSectionSpacing.
2778    self.last_line = 0
2779    depth = 0
2780    for i in range(linenum, clean_lines.NumLines()):
2781      line = clean_lines.elided[i]
2782      depth += line.count('{') - line.count('}')
2783      if not depth:
2784        self.last_line = i
2785        break
2786
2787  def CheckBegin(self, filename, clean_lines, linenum, error):
2788    # Look for a bare ':'
2789    if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
2790      self.is_derived = True
2791
2792  def CheckEnd(self, filename, clean_lines, linenum, error):
2793    # If there is a DISALLOW macro, it should appear near the end of
2794    # the class.
2795    seen_last_thing_in_class = False
2796    for i in xrange(linenum - 1, self.starting_linenum, -1):
2797      match = Search(
2798          r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
2799          self.name + r'\)',
2800          clean_lines.elided[i])
2801      if match:
2802        if seen_last_thing_in_class:
2803          error(filename, i, 'readability/constructors', 3,
2804                match.group(1) + ' should be the last thing in the class')
2805        break
2806
2807      if not Match(r'^\s*$', clean_lines.elided[i]):
2808        seen_last_thing_in_class = True
2809
2810    # Check that closing brace is aligned with beginning of the class.
2811    # Only do this if the closing brace is indented by only whitespaces.
2812    # This means we will not check single-line class definitions.
2813    indent = Match(r'^( *)\}', clean_lines.elided[linenum])
2814    if indent and len(indent.group(1)) != self.class_indent:
2815      if self.is_struct:
2816        parent = 'struct ' + self.name
2817      else:
2818        parent = 'class ' + self.name
2819      error(filename, linenum, 'whitespace/indent', 3,
2820            'Closing brace should be aligned with beginning of %s' % parent)
2821
2822
2823class _NamespaceInfo(_BlockInfo):
2824  """Stores information about a namespace."""
2825
2826  def __init__(self, name, linenum):
2827    _BlockInfo.__init__(self, linenum, False)
2828    self.name = name or ''
2829    self.check_namespace_indentation = True
2830
2831  def CheckEnd(self, filename, clean_lines, linenum, error):
2832    """Check end of namespace comments."""
2833    line = clean_lines.raw_lines[linenum]
2834
2835    # Check how many lines is enclosed in this namespace.  Don't issue
2836    # warning for missing namespace comments if there aren't enough
2837    # lines.  However, do apply checks if there is already an end of
2838    # namespace comment and it's incorrect.
2839    #
2840    # TODO(unknown): We always want to check end of namespace comments
2841    # if a namespace is large, but sometimes we also want to apply the
2842    # check if a short namespace contained nontrivial things (something
2843    # other than forward declarations).  There is currently no logic on
2844    # deciding what these nontrivial things are, so this check is
2845    # triggered by namespace size only, which works most of the time.
2846    if (linenum - self.starting_linenum < 10
2847        and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
2848      return
2849
2850    # Look for matching comment at end of namespace.
2851    #
2852    # Note that we accept C style "/* */" comments for terminating
2853    # namespaces, so that code that terminate namespaces inside
2854    # preprocessor macros can be cpplint clean.
2855    #
2856    # We also accept stuff like "// end of namespace <name>." with the
2857    # period at the end.
2858    #
2859    # Besides these, we don't accept anything else, otherwise we might
2860    # get false negatives when existing comment is a substring of the
2861    # expected namespace.
2862    if self.name:
2863      # Named namespace
2864      if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
2865                    re.escape(self.name) + r'[\*/\.\\\s]*$'),
2866                   line):
2867        error(filename, linenum, 'readability/namespace', 5,
2868              'Namespace should be terminated with "// namespace %s"' %
2869              self.name)
2870    else:
2871      # Anonymous namespace
2872      if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
2873        # If "// namespace anonymous" or "// anonymous namespace (more text)",
2874        # mention "// anonymous namespace" as an acceptable form
2875        if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
2876          error(filename, linenum, 'readability/namespace', 5,
2877                'Anonymous namespace should be terminated with "// namespace"'
2878                ' or "// anonymous namespace"')
2879        else:
2880          error(filename, linenum, 'readability/namespace', 5,
2881                'Anonymous namespace should be terminated with "// namespace"')
2882
2883
2884class _PreprocessorInfo(object):
2885  """Stores checkpoints of nesting stacks when #if/#else is seen."""
2886
2887  def __init__(self, stack_before_if):
2888    # The entire nesting stack before #if
2889    self.stack_before_if = stack_before_if
2890
2891    # The entire nesting stack up to #else
2892    self.stack_before_else = []
2893
2894    # Whether we have already seen #else or #elif
2895    self.seen_else = False
2896
2897
2898class NestingState(object):
2899  """Holds states related to parsing braces."""
2900
2901  def __init__(self):
2902    # Stack for tracking all braces.  An object is pushed whenever we
2903    # see a "{", and popped when we see a "}".  Only 3 types of
2904    # objects are possible:
2905    # - _ClassInfo: a class or struct.
2906    # - _NamespaceInfo: a namespace.
2907    # - _BlockInfo: some other type of block.
2908    self.stack = []
2909
2910    # Top of the previous stack before each Update().
2911    #
2912    # Because the nesting_stack is updated at the end of each line, we
2913    # had to do some convoluted checks to find out what is the current
2914    # scope at the beginning of the line.  This check is simplified by
2915    # saving the previous top of nesting stack.
2916    #
2917    # We could save the full stack, but we only need the top.  Copying
2918    # the full nesting stack would slow down cpplint by ~10%.
2919    self.previous_stack_top = []
2920
2921    # Stack of _PreprocessorInfo objects.
2922    self.pp_stack = []
2923
2924  def SeenOpenBrace(self):
2925    """Check if we have seen the opening brace for the innermost block.
2926
2927    Returns:
2928      True if we have seen the opening brace, False if the innermost
2929      block is still expecting an opening brace.
2930    """
2931    return (not self.stack) or self.stack[-1].seen_open_brace
2932
2933  def InNamespaceBody(self):
2934    """Check if we are currently one level inside a namespace body.
2935
2936    Returns:
2937      True if top of the stack is a namespace block, False otherwise.
2938    """
2939    return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
2940
2941  def InExternC(self):
2942    """Check if we are currently one level inside an 'extern "C"' block.
2943
2944    Returns:
2945      True if top of the stack is an extern block, False otherwise.
2946    """
2947    return self.stack and isinstance(self.stack[-1], _ExternCInfo)
2948
2949  def InClassDeclaration(self):
2950    """Check if we are currently one level inside a class or struct declaration.
2951
2952    Returns:
2953      True if top of the stack is a class/struct, False otherwise.
2954    """
2955    return self.stack and isinstance(self.stack[-1], _ClassInfo)
2956
2957  def InAsmBlock(self):
2958    """Check if we are currently one level inside an inline ASM block.
2959
2960    Returns:
2961      True if the top of the stack is a block containing inline ASM.
2962    """
2963    return self.stack and self.stack[-1].inline_asm != _NO_ASM
2964
2965  def InTemplateArgumentList(self, clean_lines, linenum, pos):
2966    """Check if current position is inside template argument list.
2967
2968    Args:
2969      clean_lines: A CleansedLines instance containing the file.
2970      linenum: The number of the line to check.
2971      pos: position just after the suspected template argument.
2972    Returns:
2973      True if (linenum, pos) is inside template arguments.
2974    """
2975    while linenum < clean_lines.NumLines():
2976      # Find the earliest character that might indicate a template argument
2977      line = clean_lines.elided[linenum]
2978      match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
2979      if not match:
2980        linenum += 1
2981        pos = 0
2982        continue
2983      token = match.group(1)
2984      pos += len(match.group(0))
2985
2986      # These things do not look like template argument list:
2987      #   class Suspect {
2988      #   class Suspect x; }
2989      if token in ('{', '}', ';'): return False
2990
2991      # These things look like template argument list:
2992      #   template <class Suspect>
2993      #   template <class Suspect = default_value>
2994      #   template <class Suspect[]>
2995      #   template <class Suspect...>
2996      if token in ('>', '=', '[', ']', '.'): return True
2997
2998      # Check if token is an unmatched '<'.
2999      # If not, move on to the next character.
3000      if token != '<':
3001        pos += 1
3002        if pos >= len(line):
3003          linenum += 1
3004          pos = 0
3005        continue
3006
3007      # We can't be sure if we just find a single '<', and need to
3008      # find the matching '>'.
3009      (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
3010      if end_pos < 0:
3011        # Not sure if template argument list or syntax error in file
3012        return False
3013      linenum = end_line
3014      pos = end_pos
3015    return False
3016
3017  def UpdatePreprocessor(self, line):
3018    """Update preprocessor stack.
3019
3020    We need to handle preprocessors due to classes like this:
3021      #ifdef SWIG
3022      struct ResultDetailsPageElementExtensionPoint {
3023      #else
3024      struct ResultDetailsPageElementExtensionPoint : public Extension {
3025      #endif
3026
3027    We make the following assumptions (good enough for most files):
3028    - Preprocessor condition evaluates to true from #if up to first
3029      #else/#elif/#endif.
3030
3031    - Preprocessor condition evaluates to false from #else/#elif up
3032      to #endif.  We still perform lint checks on these lines, but
3033      these do not affect nesting stack.
3034
3035    Args:
3036      line: current line to check.
3037    """
3038    if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
3039      # Beginning of #if block, save the nesting stack here.  The saved
3040      # stack will allow us to restore the parsing state in the #else case.
3041      self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
3042    elif Match(r'^\s*#\s*(else|elif)\b', line):
3043      # Beginning of #else block
3044      if self.pp_stack:
3045        if not self.pp_stack[-1].seen_else:
3046          # This is the first #else or #elif block.  Remember the
3047          # whole nesting stack up to this point.  This is what we
3048          # keep after the #endif.
3049          self.pp_stack[-1].seen_else = True
3050          self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
3051
3052        # Restore the stack to how it was before the #if
3053        self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
3054      else:
3055        # TODO(unknown): unexpected #else, issue warning?
3056        pass
3057    elif Match(r'^\s*#\s*endif\b', line):
3058      # End of #if or #else blocks.
3059      if self.pp_stack:
3060        # If we saw an #else, we will need to restore the nesting
3061        # stack to its former state before the #else, otherwise we
3062        # will just continue from where we left off.
3063        if self.pp_stack[-1].seen_else:
3064          # Here we can just use a shallow copy since we are the last
3065          # reference to it.
3066          self.stack = self.pp_stack[-1].stack_before_else
3067        # Drop the corresponding #if
3068        self.pp_stack.pop()
3069      else:
3070        # TODO(unknown): unexpected #endif, issue warning?
3071        pass
3072
3073  # TODO(unknown): Update() is too long, but we will refactor later.
3074  def Update(self, filename, clean_lines, linenum, error):
3075    """Update nesting state with current line.
3076
3077    Args:
3078      filename: The name of the current file.
3079      clean_lines: A CleansedLines instance containing the file.
3080      linenum: The number of the line to check.
3081      error: The function to call with any errors found.
3082    """
3083    line = clean_lines.elided[linenum]
3084
3085    # Remember top of the previous nesting stack.
3086    #
3087    # The stack is always pushed/popped and not modified in place, so
3088    # we can just do a shallow copy instead of copy.deepcopy.  Using
3089    # deepcopy would slow down cpplint by ~28%.
3090    if self.stack:
3091      self.previous_stack_top = self.stack[-1]
3092    else:
3093      self.previous_stack_top = None
3094
3095    # Update pp_stack
3096    self.UpdatePreprocessor(line)
3097
3098    # Count parentheses.  This is to avoid adding struct arguments to
3099    # the nesting stack.
3100    if self.stack:
3101      inner_block = self.stack[-1]
3102      depth_change = line.count('(') - line.count(')')
3103      inner_block.open_parentheses += depth_change
3104
3105      # Also check if we are starting or ending an inline assembly block.
3106      if inner_block.inline_asm in (_NO_ASM, _END_ASM):
3107        if (depth_change != 0 and
3108            inner_block.open_parentheses == 1 and
3109            _MATCH_ASM.match(line)):
3110          # Enter assembly block
3111          inner_block.inline_asm = _INSIDE_ASM
3112        else:
3113          # Not entering assembly block.  If previous line was _END_ASM,
3114          # we will now shift to _NO_ASM state.
3115          inner_block.inline_asm = _NO_ASM
3116      elif (inner_block.inline_asm == _INSIDE_ASM and
3117            inner_block.open_parentheses == 0):
3118        # Exit assembly block
3119        inner_block.inline_asm = _END_ASM
3120
3121    # Consume namespace declaration at the beginning of the line.  Do
3122    # this in a loop so that we catch same line declarations like this:
3123    #   namespace proto2 { namespace bridge { class MessageSet; } }
3124    while True:
3125      # Match start of namespace.  The "\b\s*" below catches namespace
3126      # declarations even if it weren't followed by a whitespace, this
3127      # is so that we don't confuse our namespace checker.  The
3128      # missing spaces will be flagged by CheckSpacing.
3129      namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
3130      if not namespace_decl_match:
3131        break
3132
3133      new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
3134      self.stack.append(new_namespace)
3135
3136      line = namespace_decl_match.group(2)
3137      if line.find('{') != -1:
3138        new_namespace.seen_open_brace = True
3139        line = line[line.find('{') + 1:]
3140
3141    # Look for a class declaration in whatever is left of the line
3142    # after parsing namespaces.  The regexp accounts for decorated classes
3143    # such as in:
3144    #   class LOCKABLE API Object {
3145    #   };
3146    class_decl_match = Match(
3147        r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
3148        r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
3149        r'(.*)$', line)
3150    if (class_decl_match and
3151        (not self.stack or self.stack[-1].open_parentheses == 0)):
3152      # We do not want to accept classes that are actually template arguments:
3153      #   template <class Ignore1,
3154      #             class Ignore2 = Default<Args>,
3155      #             template <Args> class Ignore3>
3156      #   void Function() {};
3157      #
3158      # To avoid template argument cases, we scan forward and look for
3159      # an unmatched '>'.  If we see one, assume we are inside a
3160      # template argument list.
3161      end_declaration = len(class_decl_match.group(1))
3162      if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
3163        self.stack.append(_ClassInfo(
3164            class_decl_match.group(3), class_decl_match.group(2),
3165            clean_lines, linenum))
3166        line = class_decl_match.group(4)
3167
3168    # If we have not yet seen the opening brace for the innermost block,
3169    # run checks here.
3170    if not self.SeenOpenBrace():
3171      self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
3172
3173    # Update access control if we are inside a class/struct
3174    if self.stack and isinstance(self.stack[-1], _ClassInfo):
3175      classinfo = self.stack[-1]
3176      access_match = Match(
3177          r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
3178          r':(?:[^:]|$)',
3179          line)
3180      if access_match:
3181        classinfo.access = access_match.group(2)
3182
3183        # Check that access keywords are indented +1 space.  Skip this
3184        # check if the keywords are not preceded by whitespaces.
3185        indent = access_match.group(1)
3186        if (len(indent) != classinfo.class_indent + 1 and
3187            Match(r'^\s*$', indent)):
3188          if classinfo.is_struct:
3189            parent = 'struct ' + classinfo.name
3190          else:
3191            parent = 'class ' + classinfo.name
3192          slots = ''
3193          if access_match.group(3):
3194            slots = access_match.group(3)
3195          error(filename, linenum, 'whitespace/indent', 3,
3196                '%s%s: should be indented +1 space inside %s' % (
3197                    access_match.group(2), slots, parent))
3198
3199    # Consume braces or semicolons from what's left of the line
3200    while True:
3201      # Match first brace, semicolon, or closed parenthesis.
3202      matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
3203      if not matched:
3204        break
3205
3206      token = matched.group(1)
3207      if token == '{':
3208        # If namespace or class hasn't seen a opening brace yet, mark
3209        # namespace/class head as complete.  Push a new block onto the
3210        # stack otherwise.
3211        if not self.SeenOpenBrace():
3212          self.stack[-1].seen_open_brace = True
3213        elif Match(r'^extern\s*"[^"]*"\s*\{', line):
3214          self.stack.append(_ExternCInfo(linenum))
3215        else:
3216          self.stack.append(_BlockInfo(linenum, True))
3217          if _MATCH_ASM.match(line):
3218            self.stack[-1].inline_asm = _BLOCK_ASM
3219
3220      elif token == ';' or token == ')':
3221        # If we haven't seen an opening brace yet, but we already saw
3222        # a semicolon, this is probably a forward declaration.  Pop
3223        # the stack for these.
3224        #
3225        # Similarly, if we haven't seen an opening brace yet, but we
3226        # already saw a closing parenthesis, then these are probably
3227        # function arguments with extra "class" or "struct" keywords.
3228        # Also pop these stack for these.
3229        if not self.SeenOpenBrace():
3230          self.stack.pop()
3231      else:  # token == '}'
3232        # Perform end of block checks and pop the stack.
3233        if self.stack:
3234          self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
3235          self.stack.pop()
3236      line = matched.group(2)
3237
3238  def InnermostClass(self):
3239    """Get class info on the top of the stack.
3240
3241    Returns:
3242      A _ClassInfo object if we are inside a class, or None otherwise.
3243    """
3244    for i in range(len(self.stack), 0, -1):
3245      classinfo = self.stack[i - 1]
3246      if isinstance(classinfo, _ClassInfo):
3247        return classinfo
3248    return None
3249
3250  def CheckCompletedBlocks(self, filename, error):
3251    """Checks that all classes and namespaces have been completely parsed.
3252
3253    Call this when all lines in a file have been processed.
3254    Args:
3255      filename: The name of the current file.
3256      error: The function to call with any errors found.
3257    """
3258    # Note: This test can result in false positives if #ifdef constructs
3259    # get in the way of brace matching. See the testBuildClass test in
3260    # cpplint_unittest.py for an example of this.
3261    for obj in self.stack:
3262      if isinstance(obj, _ClassInfo):
3263        error(filename, obj.starting_linenum, 'build/class', 5,
3264              'Failed to find complete declaration of class %s' %
3265              obj.name)
3266      elif isinstance(obj, _NamespaceInfo):
3267        error(filename, obj.starting_linenum, 'build/namespaces', 5,
3268              'Failed to find complete declaration of namespace %s' %
3269              obj.name)
3270
3271
3272def CheckForNonStandardConstructs(filename, clean_lines, linenum,
3273                                  nesting_state, error):
3274  r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
3275
3276  Complain about several constructs which gcc-2 accepts, but which are
3277  not standard C++.  Warning about these in lint is one way to ease the
3278  transition to new compilers.
3279  - put storage class first (e.g. "static const" instead of "const static").
3280  - "%lld" instead of %qd" in printf-type functions.
3281  - "%1$d" is non-standard in printf-type functions.
3282  - "\%" is an undefined character escape sequence.
3283  - text after #endif is not allowed.
3284  - invalid inner-style forward declaration.
3285  - >? and <? operators, and their >?= and <?= cousins.
3286
3287  Additionally, check for constructor/destructor style violations and reference
3288  members, as it is very convenient to do so while checking for
3289  gcc-2 compliance.
3290
3291  Args:
3292    filename: The name of the current file.
3293    clean_lines: A CleansedLines instance containing the file.
3294    linenum: The number of the line to check.
3295    nesting_state: A NestingState instance which maintains information about
3296                   the current stack of nested blocks being parsed.
3297    error: A callable to which errors are reported, which takes 4 arguments:
3298           filename, line number, error level, and message
3299  """
3300
3301  # Remove comments from the line, but leave in strings for now.
3302  line = clean_lines.lines[linenum]
3303
3304  if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
3305    error(filename, linenum, 'runtime/printf_format', 3,
3306          '%q in format strings is deprecated.  Use %ll instead.')
3307
3308  if Search(r'printf\s*\(.*".*%\d+\$', line):
3309    error(filename, linenum, 'runtime/printf_format', 2,
3310          '%N$ formats are unconventional.  Try rewriting to avoid them.')
3311
3312  # Remove escaped backslashes before looking for undefined escapes.
3313  line = line.replace('\\\\', '')
3314
3315  if Search(r'("|\').*\\(%|\[|\(|{)', line):
3316    error(filename, linenum, 'build/printf_format', 3,
3317          '%, [, (, and { are undefined character escapes.  Unescape them.')
3318
3319  # For the rest, work with both comments and strings removed.
3320  line = clean_lines.elided[linenum]
3321
3322  if Search(r'\b(const|volatile|void|char|short|int|long'
3323            r'|float|double|signed|unsigned'
3324            r'|schar|u?int8|u?int16|u?int32|u?int64)'
3325            r'\s+(register|static|extern|typedef)\b',
3326            line):
3327    error(filename, linenum, 'build/storage_class', 5,
3328          'Storage-class specifier (static, extern, typedef, etc) should be '
3329          'at the beginning of the declaration.')
3330
3331  if Match(r'\s*#\s*endif\s*[^/\s]+', line):
3332    error(filename, linenum, 'build/endif_comment', 5,
3333          'Uncommented text after #endif is non-standard.  Use a comment.')
3334
3335  if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
3336    error(filename, linenum, 'build/forward_decl', 5,
3337          'Inner-style forward declarations are invalid.  Remove this line.')
3338
3339  if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
3340            line):
3341    error(filename, linenum, 'build/deprecated', 3,
3342          '>? and <? (max and min) operators are non-standard and deprecated.')
3343
3344  if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
3345    # TODO(unknown): Could it be expanded safely to arbitrary references,
3346    # without triggering too many false positives? The first
3347    # attempt triggered 5 warnings for mostly benign code in the regtest, hence
3348    # the restriction.
3349    # Here's the original regexp, for the reference:
3350    # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
3351    # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
3352    error(filename, linenum, 'runtime/member_string_references', 2,
3353          'const string& members are dangerous. It is much better to use '
3354          'alternatives, such as pointers or simple constants.')
3355
3356  # Everything else in this function operates on class declarations.
3357  # Return early if the top of the nesting stack is not a class, or if
3358  # the class head is not completed yet.
3359  classinfo = nesting_state.InnermostClass()
3360  if not classinfo or not classinfo.seen_open_brace:
3361    return
3362
3363  # The class may have been declared with namespace or classname qualifiers.
3364  # The constructor and destructor will not have those qualifiers.
3365  base_classname = classinfo.name.split('::')[-1]
3366
3367  # Look for single-argument constructors that aren't marked explicit.
3368  # Technically a valid construct, but against style.
3369  explicit_constructor_match = Match(
3370      r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
3371      r'(?:(?:inline|constexpr)\s+)*%s\s*'
3372      r'\(((?:[^()]|\([^()]*\))*)\)'
3373      % re.escape(base_classname),
3374      line)
3375
3376  if explicit_constructor_match:
3377    is_marked_explicit = explicit_constructor_match.group(1)
3378
3379    if not explicit_constructor_match.group(2):
3380      constructor_args = []
3381    else:
3382      constructor_args = explicit_constructor_match.group(2).split(',')
3383
3384    # collapse arguments so that commas in template parameter lists and function
3385    # argument parameter lists don't split arguments in two
3386    i = 0
3387    while i < len(constructor_args):
3388      constructor_arg = constructor_args[i]
3389      while (constructor_arg.count('<') > constructor_arg.count('>') or
3390             constructor_arg.count('(') > constructor_arg.count(')')):
3391        constructor_arg += ',' + constructor_args[i + 1]
3392        del constructor_args[i + 1]
3393      constructor_args[i] = constructor_arg
3394      i += 1
3395
3396    variadic_args = [arg for arg in constructor_args if '&&...' in arg]
3397    defaulted_args = [arg for arg in constructor_args if '=' in arg]
3398    noarg_constructor = (not constructor_args or  # empty arg list
3399                         # 'void' arg specifier
3400                         (len(constructor_args) == 1 and
3401                          constructor_args[0].strip() == 'void'))
3402    onearg_constructor = ((len(constructor_args) == 1 and  # exactly one arg
3403                           not noarg_constructor) or
3404                          # all but at most one arg defaulted
3405                          (len(constructor_args) >= 1 and
3406                           not noarg_constructor and
3407                           len(defaulted_args) >= len(constructor_args) - 1) or
3408                          # variadic arguments with zero or one argument
3409                          (len(constructor_args) <= 2 and
3410                           len(variadic_args) >= 1))
3411    initializer_list_constructor = bool(
3412        onearg_constructor and
3413        Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
3414    copy_constructor = bool(
3415        onearg_constructor and
3416        Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
3417              r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
3418              % re.escape(base_classname), constructor_args[0].strip()))
3419
3420    if (not is_marked_explicit and
3421        onearg_constructor and
3422        not initializer_list_constructor and
3423        not copy_constructor):
3424      if defaulted_args or variadic_args:
3425        error(filename, linenum, 'runtime/explicit', 5,
3426              'Constructors callable with one argument '
3427              'should be marked explicit.')
3428      else:
3429        error(filename, linenum, 'runtime/explicit', 5,
3430              'Single-parameter constructors should be marked explicit.')
3431    elif is_marked_explicit and not onearg_constructor:
3432      if noarg_constructor:
3433        error(filename, linenum, 'runtime/explicit', 5,
3434              'Zero-parameter constructors should not be marked explicit.')
3435
3436
3437def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
3438  """Checks for the correctness of various spacing around function calls.
3439
3440  Args:
3441    filename: The name of the current file.
3442    clean_lines: A CleansedLines instance containing the file.
3443    linenum: The number of the line to check.
3444    error: The function to call with any errors found.
3445  """
3446  line = clean_lines.elided[linenum]
3447
3448  # Since function calls often occur inside if/for/while/switch
3449  # expressions - which have their own, more liberal conventions - we
3450  # first see if we should be looking inside such an expression for a
3451  # function call, to which we can apply more strict standards.
3452  fncall = line    # if there's no control flow construct, look at whole line
3453  for pattern in (r'\bif\s*\((.*)\)\s*{',
3454                  r'\bfor\s*\((.*)\)\s*{',
3455                  r'\bwhile\s*\((.*)\)\s*[{;]',
3456                  r'\bswitch\s*\((.*)\)\s*{'):
3457    match = Search(pattern, line)
3458    if match:
3459      fncall = match.group(1)    # look inside the parens for function calls
3460      break
3461
3462  # Except in if/for/while/switch, there should never be space
3463  # immediately inside parens (eg "f( 3, 4 )").  We make an exception
3464  # for nested parens ( (a+b) + c ).  Likewise, there should never be
3465  # a space before a ( when it's a function argument.  I assume it's a
3466  # function argument when the char before the whitespace is legal in
3467  # a function name (alnum + _) and we're not starting a macro. Also ignore
3468  # pointers and references to arrays and functions coz they're too tricky:
3469  # we use a very simple way to recognize these:
3470  # " (something)(maybe-something)" or
3471  # " (something)(maybe-something," or
3472  # " (something)[something]"
3473  # Note that we assume the contents of [] to be short enough that
3474  # they'll never need to wrap.
3475  if (  # Ignore control structures.
3476      not Search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b',
3477                 fncall) and
3478      # Ignore pointers/references to functions.
3479      not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
3480      # Ignore pointers/references to arrays.
3481      not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
3482    if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):      # a ( used for a fn call
3483      error(filename, linenum, 'whitespace/parens', 4,
3484            'Extra space after ( in function call')
3485    elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
3486      error(filename, linenum, 'whitespace/parens', 2,
3487            'Extra space after (')
3488    if (Search(r'\w\s+\(', fncall) and
3489        not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
3490        not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
3491        not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
3492        not Search(r'\bcase\s+\(', fncall)):
3493      # TODO(unknown): Space after an operator function seem to be a common
3494      # error, silence those for now by restricting them to highest verbosity.
3495      if Search(r'\boperator_*\b', line):
3496        error(filename, linenum, 'whitespace/parens', 0,
3497              'Extra space before ( in function call')
3498      else:
3499        error(filename, linenum, 'whitespace/parens', 4,
3500              'Extra space before ( in function call')
3501    # If the ) is followed only by a newline or a { + newline, assume it's
3502    # part of a control statement (if/while/etc), and don't complain
3503    if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
3504      # If the closing parenthesis is preceded by only whitespaces,
3505      # try to give a more descriptive error message.
3506      if Search(r'^\s+\)', fncall):
3507        error(filename, linenum, 'whitespace/parens', 2,
3508              'Closing ) should be moved to the previous line')
3509      else:
3510        error(filename, linenum, 'whitespace/parens', 2,
3511              'Extra space before )')
3512
3513
3514def IsBlankLine(line):
3515  """Returns true if the given line is blank.
3516
3517  We consider a line to be blank if the line is empty or consists of
3518  only white spaces.
3519
3520  Args:
3521    line: A line of a string.
3522
3523  Returns:
3524    True, if the given line is blank.
3525  """
3526  return not line or line.isspace()
3527
3528
3529def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
3530                                 error):
3531  is_namespace_indent_item = (
3532      len(nesting_state.stack) > 1 and
3533      nesting_state.stack[-1].check_namespace_indentation and
3534      isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
3535      nesting_state.previous_stack_top == nesting_state.stack[-2])
3536
3537  if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
3538                                     clean_lines.elided, line):
3539    CheckItemIndentationInNamespace(filename, clean_lines.elided,
3540                                    line, error)
3541
3542
3543def CheckForFunctionLengths(filename, clean_lines, linenum,
3544                            function_state, error):
3545  """Reports for long function bodies.
3546
3547  For an overview why this is done, see:
3548  https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
3549
3550  Uses a simplistic algorithm assuming other style guidelines
3551  (especially spacing) are followed.
3552  Only checks unindented functions, so class members are unchecked.
3553  Trivial bodies are unchecked, so constructors with huge initializer lists
3554  may be missed.
3555  Blank/comment lines are not counted so as to avoid encouraging the removal
3556  of vertical space and comments just to get through a lint check.
3557  NOLINT *on the last line of a function* disables this check.
3558
3559  Args:
3560    filename: The name of the current file.
3561    clean_lines: A CleansedLines instance containing the file.
3562    linenum: The number of the line to check.
3563    function_state: Current function name and lines in body so far.
3564    error: The function to call with any errors found.
3565  """
3566  lines = clean_lines.lines
3567  line = lines[linenum]
3568  joined_line = ''
3569
3570  starting_func = False
3571  regexp = r'(\w(\w|::|\*|\&|\s)*)\('  # decls * & space::name( ...
3572  match_result = Match(regexp, line)
3573  if match_result:
3574    # If the name is all caps and underscores, figure it's a macro and
3575    # ignore it, unless it's TEST or TEST_F.
3576    function_name = match_result.group(1).split()[-1]
3577    if function_name == 'TEST' or function_name == 'TEST_F' or (
3578        not Match(r'[A-Z_]+$', function_name)):
3579      starting_func = True
3580
3581  if starting_func:
3582    body_found = False
3583    for start_linenum in xrange(linenum, clean_lines.NumLines()):
3584      start_line = lines[start_linenum]
3585      joined_line += ' ' + start_line.lstrip()
3586      if Search(r'(;|})', start_line):  # Declarations and trivial functions
3587        body_found = True
3588        break                              # ... ignore
3589      if Search(r'{', start_line):
3590        body_found = True
3591        function = Search(r'((\w|:)*)\(', line).group(1)
3592        if Match(r'TEST', function):    # Handle TEST... macros
3593          parameter_regexp = Search(r'(\(.*\))', joined_line)
3594          if parameter_regexp:             # Ignore bad syntax
3595            function += parameter_regexp.group(1)
3596        else:
3597          function += '()'
3598        function_state.Begin(function)
3599        break
3600    if not body_found:
3601      # No body for the function (or evidence of a non-function) was found.
3602      error(filename, linenum, 'readability/fn_size', 5,
3603            'Lint failed to find start of function body.')
3604  elif Match(r'^\}\s*$', line):  # function end
3605    function_state.Check(error, filename, linenum)
3606    function_state.End()
3607  elif not Match(r'^\s*$', line):
3608    function_state.Count()  # Count non-blank/non-comment lines.
3609
3610
3611_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
3612
3613
3614def CheckComment(line, filename, linenum, next_line_start, error):
3615  """Checks for common mistakes in comments.
3616
3617  Args:
3618    line: The line in question.
3619    filename: The name of the current file.
3620    linenum: The number of the line to check.
3621    next_line_start: The first non-whitespace column of the next line.
3622    error: The function to call with any errors found.
3623  """
3624  commentpos = line.find('//')
3625  if commentpos != -1:
3626    # Check if the // may be in quotes.  If so, ignore it
3627    if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
3628      # Allow one space for new scopes, two spaces otherwise:
3629      if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
3630          ((commentpos >= 1 and
3631            line[commentpos-1] not in string.whitespace) or
3632           (commentpos >= 2 and
3633            line[commentpos-2] not in string.whitespace))):
3634        error(filename, linenum, 'whitespace/comments', 2,
3635              'At least two spaces is best between code and comments')
3636
3637      # Checks for common mistakes in TODO comments.
3638      comment = line[commentpos:]
3639      match = _RE_PATTERN_TODO.match(comment)
3640      if match:
3641        # One whitespace is correct; zero whitespace is handled elsewhere.
3642        leading_whitespace = match.group(1)
3643        if len(leading_whitespace) > 1:
3644          error(filename, linenum, 'whitespace/todo', 2,
3645                'Too many spaces before TODO')
3646
3647        username = match.group(2)
3648        if not username:
3649          error(filename, linenum, 'readability/todo', 2,
3650                'Missing username in TODO; it should look like '
3651                '"// TODO(my_username): Stuff."')
3652
3653        middle_whitespace = match.group(3)
3654        # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
3655        if middle_whitespace != ' ' and middle_whitespace != '':
3656          error(filename, linenum, 'whitespace/todo', 2,
3657                'TODO(my_username) should be followed by a space')
3658
3659      # If the comment contains an alphanumeric character, there
3660      # should be a space somewhere between it and the // unless
3661      # it's a /// or //! Doxygen comment.
3662      if (Match(r'//[^ ]*\w', comment) and
3663          not Match(r'(///|//\!)(\s+|$)', comment)):
3664        error(filename, linenum, 'whitespace/comments', 4,
3665              'Should have a space between // and comment')
3666
3667
3668def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
3669  """Checks for the correctness of various spacing issues in the code.
3670
3671  Things we check for: spaces around operators, spaces after
3672  if/for/while/switch, no spaces around parens in function calls, two
3673  spaces between code and comment, don't start a block with a blank
3674  line, don't end a function with a blank line, don't add a blank line
3675  after public/protected/private, don't have too many blank lines in a row.
3676
3677  Args:
3678    filename: The name of the current file.
3679    clean_lines: A CleansedLines instance containing the file.
3680    linenum: The number of the line to check.
3681    nesting_state: A NestingState instance which maintains information about
3682                   the current stack of nested blocks being parsed.
3683    error: The function to call with any errors found.
3684  """
3685
3686  # Don't use "elided" lines here, otherwise we can't check commented lines.
3687  # Don't want to use "raw" either, because we don't want to check inside C++11
3688  # raw strings,
3689  raw = clean_lines.lines_without_raw_strings
3690  line = raw[linenum]
3691
3692  # Before nixing comments, check if the line is blank for no good
3693  # reason.  This includes the first line after a block is opened, and
3694  # blank lines at the end of a function (ie, right before a line like '}'
3695  #
3696  # Skip all the blank line checks if we are immediately inside a
3697  # namespace body.  In other words, don't issue blank line warnings
3698  # for this block:
3699  #   namespace {
3700  #
3701  #   }
3702  #
3703  # A warning about missing end of namespace comments will be issued instead.
3704  #
3705  # Also skip blank line checks for 'extern "C"' blocks, which are formatted
3706  # like namespaces.
3707  if (IsBlankLine(line) and
3708      not nesting_state.InNamespaceBody() and
3709      not nesting_state.InExternC()):
3710    elided = clean_lines.elided
3711    prev_line = elided[linenum - 1]
3712    prevbrace = prev_line.rfind('{')
3713    # TODO(unknown): Don't complain if line before blank line, and line after,
3714    #                both start with alnums and are indented the same amount.
3715    #                This ignores whitespace at the start of a namespace block
3716    #                because those are not usually indented.
3717    if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
3718      # OK, we have a blank line at the start of a code block.  Before we
3719      # complain, we check if it is an exception to the rule: The previous
3720      # non-empty line has the parameters of a function header that are indented
3721      # 4 spaces (because they did not fit in a 80 column line when placed on
3722      # the same line as the function name).  We also check for the case where
3723      # the previous line is indented 6 spaces, which may happen when the
3724      # initializers of a constructor do not fit into a 80 column line.
3725      exception = False
3726      if Match(r' {6}\w', prev_line):  # Initializer list?
3727        # We are looking for the opening column of initializer list, which
3728        # should be indented 4 spaces to cause 6 space indentation afterwards.
3729        search_position = linenum-2
3730        while (search_position >= 0
3731               and Match(r' {6}\w', elided[search_position])):
3732          search_position -= 1
3733        exception = (search_position >= 0
3734                     and elided[search_position][:5] == '    :')
3735      else:
3736        # Search for the function arguments or an initializer list.  We use a
3737        # simple heuristic here: If the line is indented 4 spaces; and we have a
3738        # closing paren, without the opening paren, followed by an opening brace
3739        # or colon (for initializer lists) we assume that it is the last line of
3740        # a function header.  If we have a colon indented 4 spaces, it is an
3741        # initializer list.
3742        exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
3743                           prev_line)
3744                     or Match(r' {4}:', prev_line))
3745
3746      if not exception:
3747        error(filename, linenum, 'whitespace/blank_line', 2,
3748              'Redundant blank line at the start of a code block '
3749              'should be deleted.')
3750    # Ignore blank lines at the end of a block in a long if-else
3751    # chain, like this:
3752    #   if (condition1) {
3753    #     // Something followed by a blank line
3754    #
3755    #   } else if (condition2) {
3756    #     // Something else
3757    #   }
3758    if linenum + 1 < clean_lines.NumLines():
3759      next_line = raw[linenum + 1]
3760      if (next_line
3761          and Match(r'\s*}', next_line)
3762          and next_line.find('} else ') == -1):
3763        error(filename, linenum, 'whitespace/blank_line', 3,
3764              'Redundant blank line at the end of a code block '
3765              'should be deleted.')
3766
3767    matched = Match(r'\s*(public|protected|private):', prev_line)
3768    if matched:
3769      error(filename, linenum, 'whitespace/blank_line', 3,
3770            'Do not leave a blank line after "%s:"' % matched.group(1))
3771
3772  # Next, check comments
3773  next_line_start = 0
3774  if linenum + 1 < clean_lines.NumLines():
3775    next_line = raw[linenum + 1]
3776    next_line_start = len(next_line) - len(next_line.lstrip())
3777  CheckComment(line, filename, linenum, next_line_start, error)
3778
3779  # get rid of comments and strings
3780  line = clean_lines.elided[linenum]
3781
3782  # You shouldn't have spaces before your brackets, except for C++11 attributes
3783  # or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
3784  if (Search(r'\w\s+\[(?!\[)', line) and
3785      not Search(r'(?:auto&?|delete|return)\s+\[', line)):
3786    error(filename, linenum, 'whitespace/braces', 5,
3787          'Extra space before [')
3788
3789  # In range-based for, we wanted spaces before and after the colon, but
3790  # not around "::" tokens that might appear.
3791  if (Search(r'for *\(.*[^:]:[^: ]', line) or
3792      Search(r'for *\(.*[^: ]:[^:]', line)):
3793    error(filename, linenum, 'whitespace/forcolon', 2,
3794          'Missing space around colon in range-based for loop')
3795
3796
3797def CheckOperatorSpacing(filename, clean_lines, linenum, error):
3798  """Checks for horizontal spacing around operators.
3799
3800  Args:
3801    filename: The name of the current file.
3802    clean_lines: A CleansedLines instance containing the file.
3803    linenum: The number of the line to check.
3804    error: The function to call with any errors found.
3805  """
3806  line = clean_lines.elided[linenum]
3807
3808  # Don't try to do spacing checks for operator methods.  Do this by
3809  # replacing the troublesome characters with something else,
3810  # preserving column position for all other characters.
3811  #
3812  # The replacement is done repeatedly to avoid false positives from
3813  # operators that call operators.
3814  while True:
3815    match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
3816    if match:
3817      line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
3818    else:
3819      break
3820
3821  # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
3822  # Otherwise not.  Note we only check for non-spaces on *both* sides;
3823  # sometimes people put non-spaces on one side when aligning ='s among
3824  # many lines (not that this is behavior that I approve of...)
3825  if ((Search(r'[\w.]=', line) or
3826       Search(r'=[\w.]', line))
3827      and not Search(r'\b(if|while|for) ', line)
3828      # Operators taken from [lex.operators] in C++11 standard.
3829      and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
3830      and not Search(r'operator=', line)):
3831    error(filename, linenum, 'whitespace/operators', 4,
3832          'Missing spaces around =')
3833
3834  # It's ok not to have spaces around binary operators like + - * /, but if
3835  # there's too little whitespace, we get concerned.  It's hard to tell,
3836  # though, so we punt on this one for now.  TODO.
3837
3838  # You should always have whitespace around binary operators.
3839  #
3840  # Check <= and >= first to avoid false positives with < and >, then
3841  # check non-include lines for spacing around < and >.
3842  #
3843  # If the operator is followed by a comma, assume it's be used in a
3844  # macro context and don't do any checks.  This avoids false
3845  # positives.
3846  #
3847  # Note that && is not included here.  This is because there are too
3848  # many false positives due to RValue references.
3849  match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
3850  if match:
3851    error(filename, linenum, 'whitespace/operators', 3,
3852          'Missing spaces around %s' % match.group(1))
3853  elif not Match(r'#.*include', line):
3854    # Look for < that is not surrounded by spaces.  This is only
3855    # triggered if both sides are missing spaces, even though
3856    # technically should should flag if at least one side is missing a
3857    # space.  This is done to avoid some false positives with shifts.
3858    match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
3859    if match:
3860      (_, _, end_pos) = CloseExpression(
3861          clean_lines, linenum, len(match.group(1)))
3862      if end_pos <= -1:
3863        error(filename, linenum, 'whitespace/operators', 3,
3864              'Missing spaces around <')
3865
3866    # Look for > that is not surrounded by spaces.  Similar to the
3867    # above, we only trigger if both sides are missing spaces to avoid
3868    # false positives with shifts.
3869    match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
3870    if match:
3871      (_, _, start_pos) = ReverseCloseExpression(
3872          clean_lines, linenum, len(match.group(1)))
3873      if start_pos <= -1:
3874        error(filename, linenum, 'whitespace/operators', 3,
3875              'Missing spaces around >')
3876
3877  # We allow no-spaces around << when used like this: 10<<20, but
3878  # not otherwise (particularly, not when used as streams)
3879  #
3880  # We also allow operators following an opening parenthesis, since
3881  # those tend to be macros that deal with operators.
3882  match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
3883  if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
3884      not (match.group(1) == 'operator' and match.group(2) == ';')):
3885    error(filename, linenum, 'whitespace/operators', 3,
3886          'Missing spaces around <<')
3887
3888  # We allow no-spaces around >> for almost anything.  This is because
3889  # C++11 allows ">>" to close nested templates, which accounts for
3890  # most cases when ">>" is not followed by a space.
3891  #
3892  # We still warn on ">>" followed by alpha character, because that is
3893  # likely due to ">>" being used for right shifts, e.g.:
3894  #   value >> alpha
3895  #
3896  # When ">>" is used to close templates, the alphanumeric letter that
3897  # follows would be part of an identifier, and there should still be
3898  # a space separating the template type and the identifier.
3899  #   type<type<type>> alpha
3900  match = Search(r'>>[a-zA-Z_]', line)
3901  if match:
3902    error(filename, linenum, 'whitespace/operators', 3,
3903          'Missing spaces around >>')
3904
3905  # There shouldn't be space around unary operators
3906  match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
3907  if match:
3908    error(filename, linenum, 'whitespace/operators', 4,
3909          'Extra space for operator %s' % match.group(1))
3910
3911
3912def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
3913  """Checks for horizontal spacing around parentheses.
3914
3915  Args:
3916    filename: The name of the current file.
3917    clean_lines: A CleansedLines instance containing the file.
3918    linenum: The number of the line to check.
3919    error: The function to call with any errors found.
3920  """
3921  line = clean_lines.elided[linenum]
3922
3923  # No spaces after an if, while, switch, or for
3924  match = Search(r' (if\(|for\(|while\(|switch\()', line)
3925  if match:
3926    error(filename, linenum, 'whitespace/parens', 5,
3927          'Missing space before ( in %s' % match.group(1))
3928
3929  # For if/for/while/switch, the left and right parens should be
3930  # consistent about how many spaces are inside the parens, and
3931  # there should either be zero or one spaces inside the parens.
3932  # We don't want: "if ( foo)" or "if ( foo   )".
3933  # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
3934  match = Search(r'\b(if|for|while|switch)\s*'
3935                 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
3936                 line)
3937  if match:
3938    if len(match.group(2)) != len(match.group(4)):
3939      if not (match.group(3) == ';' and
3940              len(match.group(2)) == 1 + len(match.group(4)) or
3941              not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
3942        error(filename, linenum, 'whitespace/parens', 5,
3943              'Mismatching spaces inside () in %s' % match.group(1))
3944    if len(match.group(2)) not in [0, 1]:
3945      error(filename, linenum, 'whitespace/parens', 5,
3946            'Should have zero or one spaces inside ( and ) in %s' %
3947            match.group(1))
3948
3949
3950def CheckCommaSpacing(filename, clean_lines, linenum, error):
3951  """Checks for horizontal spacing near commas and semicolons.
3952
3953  Args:
3954    filename: The name of the current file.
3955    clean_lines: A CleansedLines instance containing the file.
3956    linenum: The number of the line to check.
3957    error: The function to call with any errors found.
3958  """
3959  raw = clean_lines.lines_without_raw_strings
3960  line = clean_lines.elided[linenum]
3961
3962  # You should always have a space after a comma (either as fn arg or operator)
3963  #
3964  # This does not apply when the non-space character following the
3965  # comma is another comma, since the only time when that happens is
3966  # for empty macro arguments.
3967  #
3968  # We run this check in two passes: first pass on elided lines to
3969  # verify that lines contain missing whitespaces, second pass on raw
3970  # lines to confirm that those missing whitespaces are not due to
3971  # elided comments.
3972  if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
3973      Search(r',[^,\s]', raw[linenum])):
3974    error(filename, linenum, 'whitespace/comma', 3,
3975          'Missing space after ,')
3976
3977  # You should always have a space after a semicolon
3978  # except for few corner cases
3979  # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
3980  # space after ;
3981  if Search(r';[^\s};\\)/]', line):
3982    error(filename, linenum, 'whitespace/semicolon', 3,
3983          'Missing space after ;')
3984
3985
3986def _IsType(clean_lines, nesting_state, expr):
3987  """Check if expression looks like a type name, returns true if so.
3988
3989  Args:
3990    clean_lines: A CleansedLines instance containing the file.
3991    nesting_state: A NestingState instance which maintains information about
3992                   the current stack of nested blocks being parsed.
3993    expr: The expression to check.
3994  Returns:
3995    True, if token looks like a type.
3996  """
3997  # Keep only the last token in the expression
3998  last_word = Match(r'^.*(\b\S+)$', expr)
3999  if last_word:
4000    token = last_word.group(1)
4001  else:
4002    token = expr
4003
4004  # Match native types and stdint types
4005  if _TYPES.match(token):
4006    return True
4007
4008  # Try a bit harder to match templated types.  Walk up the nesting
4009  # stack until we find something that resembles a typename
4010  # declaration for what we are looking for.
4011  typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
4012                      r'\b')
4013  block_index = len(nesting_state.stack) - 1
4014  while block_index >= 0:
4015    if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
4016      return False
4017
4018    # Found where the opening brace is.  We want to scan from this
4019    # line up to the beginning of the function, minus a few lines.
4020    #   template <typename Type1,  // stop scanning here
4021    #             ...>
4022    #   class C
4023    #     : public ... {  // start scanning here
4024    last_line = nesting_state.stack[block_index].starting_linenum
4025
4026    next_block_start = 0
4027    if block_index > 0:
4028      next_block_start = nesting_state.stack[block_index - 1].starting_linenum
4029    first_line = last_line
4030    while first_line >= next_block_start:
4031      if clean_lines.elided[first_line].find('template') >= 0:
4032        break
4033      first_line -= 1
4034    if first_line < next_block_start:
4035      # Didn't find any "template" keyword before reaching the next block,
4036      # there are probably no template things to check for this block
4037      block_index -= 1
4038      continue
4039
4040    # Look for typename in the specified range
4041    for i in xrange(first_line, last_line + 1, 1):
4042      if Search(typename_pattern, clean_lines.elided[i]):
4043        return True
4044    block_index -= 1
4045
4046  return False
4047
4048
4049def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
4050  """Checks for horizontal spacing near commas.
4051
4052  Args:
4053    filename: The name of the current file.
4054    clean_lines: A CleansedLines instance containing the file.
4055    linenum: The number of the line to check.
4056    nesting_state: A NestingState instance which maintains information about
4057                   the current stack of nested blocks being parsed.
4058    error: The function to call with any errors found.
4059  """
4060  line = clean_lines.elided[linenum]
4061
4062  # Except after an opening paren, or after another opening brace (in case of
4063  # an initializer list, for instance), you should have spaces before your
4064  # braces when they are delimiting blocks, classes, namespaces etc.
4065  # And since you should never have braces at the beginning of a line,
4066  # this is an easy test.  Except that braces used for initialization don't
4067  # follow the same rule; we often don't want spaces before those.
4068  match = Match(r'^(.*[^ ({>]){', line)
4069
4070  if match:
4071    # Try a bit harder to check for brace initialization.  This
4072    # happens in one of the following forms:
4073    #   Constructor() : initializer_list_{} { ... }
4074    #   Constructor{}.MemberFunction()
4075    #   Type variable{};
4076    #   FunctionCall(type{}, ...);
4077    #   LastArgument(..., type{});
4078    #   LOG(INFO) << type{} << " ...";
4079    #   map_of_type[{...}] = ...;
4080    #   ternary = expr ? new type{} : nullptr;
4081    #   OuterTemplate<InnerTemplateConstructor<Type>{}>
4082    #
4083    # We check for the character following the closing brace, and
4084    # silence the warning if it's one of those listed above, i.e.
4085    # "{.;,)<>]:".
4086    #
4087    # To account for nested initializer list, we allow any number of
4088    # closing braces up to "{;,)<".  We can't simply silence the
4089    # warning on first sight of closing brace, because that would
4090    # cause false negatives for things that are not initializer lists.
4091    #   Silence this:         But not this:
4092    #     Outer{                if (...) {
4093    #       Inner{...}            if (...){  // Missing space before {
4094    #     };                    }
4095    #
4096    # There is a false negative with this approach if people inserted
4097    # spurious semicolons, e.g. "if (cond){};", but we will catch the
4098    # spurious semicolon with a separate check.
4099    leading_text = match.group(1)
4100    (endline, endlinenum, endpos) = CloseExpression(
4101        clean_lines, linenum, len(match.group(1)))
4102    trailing_text = ''
4103    if endpos > -1:
4104      trailing_text = endline[endpos:]
4105    for offset in xrange(endlinenum + 1,
4106                         min(endlinenum + 3, clean_lines.NumLines() - 1)):
4107      trailing_text += clean_lines.elided[offset]
4108    # We also suppress warnings for `uint64_t{expression}` etc., as the style
4109    # guide recommends brace initialization for integral types to avoid
4110    # overflow/truncation.
4111    if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
4112        and not _IsType(clean_lines, nesting_state, leading_text)):
4113      error(filename, linenum, 'whitespace/braces', 5,
4114            'Missing space before {')
4115
4116  # Make sure '} else {' has spaces.
4117  if Search(r'}else', line):
4118    error(filename, linenum, 'whitespace/braces', 5,
4119          'Missing space before else')
4120
4121  # You shouldn't have a space before a semicolon at the end of the line.
4122  # There's a special case for "for" since the style guide allows space before
4123  # the semicolon there.
4124  if Search(r':\s*;\s*$', line):
4125    error(filename, linenum, 'whitespace/semicolon', 5,
4126          'Semicolon defining empty statement. Use {} instead.')
4127  elif Search(r'^\s*;\s*$', line):
4128    error(filename, linenum, 'whitespace/semicolon', 5,
4129          'Line contains only semicolon. If this should be an empty statement, '
4130          'use {} instead.')
4131  elif (Search(r'\s+;\s*$', line) and
4132        not Search(r'\bfor\b', line)):
4133    error(filename, linenum, 'whitespace/semicolon', 5,
4134          'Extra space before last semicolon. If this should be an empty '
4135          'statement, use {} instead.')
4136
4137
4138def IsDecltype(clean_lines, linenum, column):
4139  """Check if the token ending on (linenum, column) is decltype().
4140
4141  Args:
4142    clean_lines: A CleansedLines instance containing the file.
4143    linenum: the number of the line to check.
4144    column: end column of the token to check.
4145  Returns:
4146    True if this token is decltype() expression, False otherwise.
4147  """
4148  (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
4149  if start_col < 0:
4150    return False
4151  if Search(r'\bdecltype\s*$', text[0:start_col]):
4152    return True
4153  return False
4154
4155def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
4156  """Checks for additional blank line issues related to sections.
4157
4158  Currently the only thing checked here is blank line before protected/private.
4159
4160  Args:
4161    filename: The name of the current file.
4162    clean_lines: A CleansedLines instance containing the file.
4163    class_info: A _ClassInfo objects.
4164    linenum: The number of the line to check.
4165    error: The function to call with any errors found.
4166  """
4167  # Skip checks if the class is small, where small means 25 lines or less.
4168  # 25 lines seems like a good cutoff since that's the usual height of
4169  # terminals, and any class that can't fit in one screen can't really
4170  # be considered "small".
4171  #
4172  # Also skip checks if we are on the first line.  This accounts for
4173  # classes that look like
4174  #   class Foo { public: ... };
4175  #
4176  # If we didn't find the end of the class, last_line would be zero,
4177  # and the check will be skipped by the first condition.
4178  if (class_info.last_line - class_info.starting_linenum <= 24 or
4179      linenum <= class_info.starting_linenum):
4180    return
4181
4182  matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
4183  if matched:
4184    # Issue warning if the line before public/protected/private was
4185    # not a blank line, but don't do this if the previous line contains
4186    # "class" or "struct".  This can happen two ways:
4187    #  - We are at the beginning of the class.
4188    #  - We are forward-declaring an inner class that is semantically
4189    #    private, but needed to be public for implementation reasons.
4190    # Also ignores cases where the previous line ends with a backslash as can be
4191    # common when defining classes in C macros.
4192    prev_line = clean_lines.lines[linenum - 1]
4193    if (not IsBlankLine(prev_line) and
4194        not Search(r'\b(class|struct)\b', prev_line) and
4195        not Search(r'\\$', prev_line)):
4196      # Try a bit harder to find the beginning of the class.  This is to
4197      # account for multi-line base-specifier lists, e.g.:
4198      #   class Derived
4199      #       : public Base {
4200      end_class_head = class_info.starting_linenum
4201      for i in range(class_info.starting_linenum, linenum):
4202        if Search(r'\{\s*$', clean_lines.lines[i]):
4203          end_class_head = i
4204          break
4205      if end_class_head < linenum - 1:
4206        error(filename, linenum, 'whitespace/blank_line', 3,
4207              '"%s:" should be preceded by a blank line' % matched.group(1))
4208
4209
4210def GetPreviousNonBlankLine(clean_lines, linenum):
4211  """Return the most recent non-blank line and its line number.
4212
4213  Args:
4214    clean_lines: A CleansedLines instance containing the file contents.
4215    linenum: The number of the line to check.
4216
4217  Returns:
4218    A tuple with two elements.  The first element is the contents of the last
4219    non-blank line before the current line, or the empty string if this is the
4220    first non-blank line.  The second is the line number of that line, or -1
4221    if this is the first non-blank line.
4222  """
4223
4224  prevlinenum = linenum - 1
4225  while prevlinenum >= 0:
4226    prevline = clean_lines.elided[prevlinenum]
4227    if not IsBlankLine(prevline):     # if not a blank line...
4228      return (prevline, prevlinenum)
4229    prevlinenum -= 1
4230  return ('', -1)
4231
4232
4233def CheckBraces(filename, clean_lines, linenum, error):
4234  """Looks for misplaced braces (e.g. at the end of line).
4235
4236  Args:
4237    filename: The name of the current file.
4238    clean_lines: A CleansedLines instance containing the file.
4239    linenum: The number of the line to check.
4240    error: The function to call with any errors found.
4241  """
4242
4243  line = clean_lines.elided[linenum]        # get rid of comments and strings
4244
4245  if Match(r'\s*{\s*$', line):
4246    # We allow an open brace to start a line in the case where someone is using
4247    # braces in a block to explicitly create a new scope, which is commonly used
4248    # to control the lifetime of stack-allocated variables.  Braces are also
4249    # used for brace initializers inside function calls.  We don't detect this
4250    # perfectly: we just don't complain if the last non-whitespace character on
4251    # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
4252    # previous line starts a preprocessor block. We also allow a brace on the
4253    # following line if it is part of an array initialization and would not fit
4254    # within the 80 character limit of the preceding line.
4255    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4256    if (not Search(r'[,;:}{(]\s*$', prevline) and
4257        not Match(r'\s*#', prevline) and
4258        not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
4259      error(filename, linenum, 'whitespace/braces', 4,
4260            '{ should almost always be at the end of the previous line')
4261
4262  # An else clause should be on the same line as the preceding closing brace.
4263  if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
4264    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4265    if Match(r'\s*}\s*$', prevline):
4266      error(filename, linenum, 'whitespace/newline', 4,
4267            'An else should appear on the same line as the preceding }')
4268
4269  # If braces come on one side of an else, they should be on both.
4270  # However, we have to worry about "else if" that spans multiple lines!
4271  if Search(r'else if\s*\(', line):       # could be multi-line if
4272    brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
4273    # find the ( after the if
4274    pos = line.find('else if')
4275    pos = line.find('(', pos)
4276    if pos > 0:
4277      (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
4278      brace_on_right = endline[endpos:].find('{') != -1
4279      if brace_on_left != brace_on_right:    # must be brace after if
4280        error(filename, linenum, 'readability/braces', 5,
4281              'If an else has a brace on one side, it should have it on both')
4282  elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
4283    error(filename, linenum, 'readability/braces', 5,
4284          'If an else has a brace on one side, it should have it on both')
4285
4286  # Likewise, an else should never have the else clause on the same line
4287  if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
4288    error(filename, linenum, 'whitespace/newline', 4,
4289          'Else clause should never be on same line as else (use 2 lines)')
4290
4291  # In the same way, a do/while should never be on one line
4292  if Match(r'\s*do [^\s{]', line):
4293    error(filename, linenum, 'whitespace/newline', 4,
4294          'do/while clauses should not be on a single line')
4295
4296  # Check single-line if/else bodies. The style guide says 'curly braces are not
4297  # required for single-line statements'. We additionally allow multi-line,
4298  # single statements, but we reject anything with more than one semicolon in
4299  # it. This means that the first semicolon after the if should be at the end of
4300  # its line, and the line after that should have an indent level equal to or
4301  # lower than the if. We also check for ambiguous if/else nesting without
4302  # braces.
4303  if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
4304  if if_else_match and not Match(r'\s*#', line):
4305    if_indent = GetIndentLevel(line)
4306    endline, endlinenum, endpos = line, linenum, if_else_match.end()
4307    if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
4308    if if_match:
4309      # This could be a multiline if condition, so find the end first.
4310      pos = if_match.end() - 1
4311      (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
4312    # Check for an opening brace, either directly after the if or on the next
4313    # line. If found, this isn't a single-statement conditional.
4314    if (not Match(r'\s*{', endline[endpos:])
4315        and not (Match(r'\s*$', endline[endpos:])
4316                 and endlinenum < (len(clean_lines.elided) - 1)
4317                 and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
4318      while (endlinenum < len(clean_lines.elided)
4319             and ';' not in clean_lines.elided[endlinenum][endpos:]):
4320        endlinenum += 1
4321        endpos = 0
4322      if endlinenum < len(clean_lines.elided):
4323        endline = clean_lines.elided[endlinenum]
4324        # We allow a mix of whitespace and closing braces (e.g. for one-liner
4325        # methods) and a single \ after the semicolon (for macros)
4326        endpos = endline.find(';')
4327        if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
4328          # Semicolon isn't the last character, there's something trailing.
4329          # Output a warning if the semicolon is not contained inside
4330          # a lambda expression.
4331          if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
4332                       endline):
4333            error(filename, linenum, 'readability/braces', 4,
4334                  'If/else bodies with multiple statements require braces')
4335        elif endlinenum < len(clean_lines.elided) - 1:
4336          # Make sure the next line is dedented
4337          next_line = clean_lines.elided[endlinenum + 1]
4338          next_indent = GetIndentLevel(next_line)
4339          # With ambiguous nested if statements, this will error out on the
4340          # if that *doesn't* match the else, regardless of whether it's the
4341          # inner one or outer one.
4342          if (if_match and Match(r'\s*else\b', next_line)
4343              and next_indent != if_indent):
4344            error(filename, linenum, 'readability/braces', 4,
4345                  'Else clause should be indented at the same level as if. '
4346                  'Ambiguous nested if/else chains require braces.')
4347          elif next_indent > if_indent:
4348            error(filename, linenum, 'readability/braces', 4,
4349                  'If/else bodies with multiple statements require braces')
4350
4351
4352def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
4353  """Looks for redundant trailing semicolon.
4354
4355  Args:
4356    filename: The name of the current file.
4357    clean_lines: A CleansedLines instance containing the file.
4358    linenum: The number of the line to check.
4359    error: The function to call with any errors found.
4360  """
4361
4362  line = clean_lines.elided[linenum]
4363
4364  # Block bodies should not be followed by a semicolon.  Due to C++11
4365  # brace initialization, there are more places where semicolons are
4366  # required than not, so we explicitly list the allowed rules rather
4367  # than listing the disallowed ones.  These are the places where "};"
4368  # should be replaced by just "}":
4369  # 1. Some flavor of block following closing parenthesis:
4370  #    for (;;) {};
4371  #    while (...) {};
4372  #    switch (...) {};
4373  #    Function(...) {};
4374  #    if (...) {};
4375  #    if (...) else if (...) {};
4376  #
4377  # 2. else block:
4378  #    if (...) else {};
4379  #
4380  # 3. const member function:
4381  #    Function(...) const {};
4382  #
4383  # 4. Block following some statement:
4384  #    x = 42;
4385  #    {};
4386  #
4387  # 5. Block at the beginning of a function:
4388  #    Function(...) {
4389  #      {};
4390  #    }
4391  #
4392  #    Note that naively checking for the preceding "{" will also match
4393  #    braces inside multi-dimensional arrays, but this is fine since
4394  #    that expression will not contain semicolons.
4395  #
4396  # 6. Block following another block:
4397  #    while (true) {}
4398  #    {};
4399  #
4400  # 7. End of namespaces:
4401  #    namespace {};
4402  #
4403  #    These semicolons seems far more common than other kinds of
4404  #    redundant semicolons, possibly due to people converting classes
4405  #    to namespaces.  For now we do not warn for this case.
4406  #
4407  # Try matching case 1 first.
4408  match = Match(r'^(.*\)\s*)\{', line)
4409  if match:
4410    # Matched closing parenthesis (case 1).  Check the token before the
4411    # matching opening parenthesis, and don't warn if it looks like a
4412    # macro.  This avoids these false positives:
4413    #  - macro that defines a base class
4414    #  - multi-line macro that defines a base class
4415    #  - macro that defines the whole class-head
4416    #
4417    # But we still issue warnings for macros that we know are safe to
4418    # warn, specifically:
4419    #  - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
4420    #  - TYPED_TEST
4421    #  - INTERFACE_DEF
4422    #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
4423    #
4424    # We implement a list of safe macros instead of a list of
4425    # unsafe macros, even though the latter appears less frequently in
4426    # google code and would have been easier to implement.  This is because
4427    # the downside for getting the allowed checks wrong means some extra
4428    # semicolons, while the downside for getting disallowed checks wrong
4429    # would result in compile errors.
4430    #
4431    # In addition to macros, we also don't want to warn on
4432    #  - Compound literals
4433    #  - Lambdas
4434    #  - alignas specifier with anonymous structs
4435    #  - decltype
4436    closing_brace_pos = match.group(1).rfind(')')
4437    opening_parenthesis = ReverseCloseExpression(
4438        clean_lines, linenum, closing_brace_pos)
4439    if opening_parenthesis[2] > -1:
4440      line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
4441      macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
4442      func = Match(r'^(.*\])\s*$', line_prefix)
4443      if ((macro and
4444           macro.group(1) not in (
4445               'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
4446               'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
4447               'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
4448          (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
4449          Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
4450          Search(r'\bdecltype$', line_prefix) or
4451          Search(r'\s+=\s*$', line_prefix)):
4452        match = None
4453    if (match and
4454        opening_parenthesis[1] > 1 and
4455        Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
4456      # Multi-line lambda-expression
4457      match = None
4458
4459  else:
4460    # Try matching cases 2-3.
4461    match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
4462    if not match:
4463      # Try matching cases 4-6.  These are always matched on separate lines.
4464      #
4465      # Note that we can't simply concatenate the previous line to the
4466      # current line and do a single match, otherwise we may output
4467      # duplicate warnings for the blank line case:
4468      #   if (cond) {
4469      #     // blank line
4470      #   }
4471      prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4472      if prevline and Search(r'[;{}]\s*$', prevline):
4473        match = Match(r'^(\s*)\{', line)
4474
4475  # Check matching closing brace
4476  if match:
4477    (endline, endlinenum, endpos) = CloseExpression(
4478        clean_lines, linenum, len(match.group(1)))
4479    if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
4480      # Current {} pair is eligible for semicolon check, and we have found
4481      # the redundant semicolon, output warning here.
4482      #
4483      # Note: because we are scanning forward for opening braces, and
4484      # outputting warnings for the matching closing brace, if there are
4485      # nested blocks with trailing semicolons, we will get the error
4486      # messages in reversed order.
4487
4488      # We need to check the line forward for NOLINT
4489      raw_lines = clean_lines.raw_lines
4490      ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
4491                              error)
4492      ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
4493                              error)
4494
4495      error(filename, endlinenum, 'readability/braces', 4,
4496            "You don't need a ; after a }")
4497
4498
4499def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
4500  """Look for empty loop/conditional body with only a single semicolon.
4501
4502  Args:
4503    filename: The name of the current file.
4504    clean_lines: A CleansedLines instance containing the file.
4505    linenum: The number of the line to check.
4506    error: The function to call with any errors found.
4507  """
4508
4509  # Search for loop keywords at the beginning of the line.  Because only
4510  # whitespaces are allowed before the keywords, this will also ignore most
4511  # do-while-loops, since those lines should start with closing brace.
4512  #
4513  # We also check "if" blocks here, since an empty conditional block
4514  # is likely an error.
4515  line = clean_lines.elided[linenum]
4516  matched = Match(r'\s*(for|while|if)\s*\(', line)
4517  if matched:
4518    # Find the end of the conditional expression.
4519    (end_line, end_linenum, end_pos) = CloseExpression(
4520        clean_lines, linenum, line.find('('))
4521
4522    # Output warning if what follows the condition expression is a semicolon.
4523    # No warning for all other cases, including whitespace or newline, since we
4524    # have a separate check for semicolons preceded by whitespace.
4525    if end_pos >= 0 and Match(r';', end_line[end_pos:]):
4526      if matched.group(1) == 'if':
4527        error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
4528              'Empty conditional bodies should use {}')
4529      else:
4530        error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
4531              'Empty loop bodies should use {} or continue')
4532
4533    # Check for if statements that have completely empty bodies (no comments)
4534    # and no else clauses.
4535    if end_pos >= 0 and matched.group(1) == 'if':
4536      # Find the position of the opening { for the if statement.
4537      # Return without logging an error if it has no brackets.
4538      opening_linenum = end_linenum
4539      opening_line_fragment = end_line[end_pos:]
4540      # Loop until EOF or find anything that's not whitespace or opening {.
4541      while not Search(r'^\s*\{', opening_line_fragment):
4542        if Search(r'^(?!\s*$)', opening_line_fragment):
4543          # Conditional has no brackets.
4544          return
4545        opening_linenum += 1
4546        if opening_linenum == len(clean_lines.elided):
4547          # Couldn't find conditional's opening { or any code before EOF.
4548          return
4549        opening_line_fragment = clean_lines.elided[opening_linenum]
4550      # Set opening_line (opening_line_fragment may not be entire opening line).
4551      opening_line = clean_lines.elided[opening_linenum]
4552
4553      # Find the position of the closing }.
4554      opening_pos = opening_line_fragment.find('{')
4555      if opening_linenum == end_linenum:
4556        # We need to make opening_pos relative to the start of the entire line.
4557        opening_pos += end_pos
4558      (closing_line, closing_linenum, closing_pos) = CloseExpression(
4559          clean_lines, opening_linenum, opening_pos)
4560      if closing_pos < 0:
4561        return
4562
4563      # Now construct the body of the conditional. This consists of the portion
4564      # of the opening line after the {, all lines until the closing line,
4565      # and the portion of the closing line before the }.
4566      if (clean_lines.raw_lines[opening_linenum] !=
4567          CleanseComments(clean_lines.raw_lines[opening_linenum])):
4568        # Opening line ends with a comment, so conditional isn't empty.
4569        return
4570      if closing_linenum > opening_linenum:
4571        # Opening line after the {. Ignore comments here since we checked above.
4572        bodylist = list(opening_line[opening_pos+1:])
4573        # All lines until closing line, excluding closing line, with comments.
4574        bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
4575        # Closing line before the }. Won't (and can't) have comments.
4576        bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
4577        body = '\n'.join(bodylist)
4578      else:
4579        # If statement has brackets and fits on a single line.
4580        body = opening_line[opening_pos+1:closing_pos-1]
4581
4582      # Check if the body is empty
4583      if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
4584        return
4585      # The body is empty. Now make sure there's not an else clause.
4586      current_linenum = closing_linenum
4587      current_line_fragment = closing_line[closing_pos:]
4588      # Loop until EOF or find anything that's not whitespace or else clause.
4589      while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
4590        if Search(r'^(?=\s*else)', current_line_fragment):
4591          # Found an else clause, so don't log an error.
4592          return
4593        current_linenum += 1
4594        if current_linenum == len(clean_lines.elided):
4595          break
4596        current_line_fragment = clean_lines.elided[current_linenum]
4597
4598      # The body is empty and there's no else clause until EOF or other code.
4599      error(filename, end_linenum, 'whitespace/empty_if_body', 4,
4600            ('If statement had no body and no else clause'))
4601
4602
4603def FindCheckMacro(line):
4604  """Find a replaceable CHECK-like macro.
4605
4606  Args:
4607    line: line to search on.
4608  Returns:
4609    (macro name, start position), or (None, -1) if no replaceable
4610    macro is found.
4611  """
4612  for macro in _CHECK_MACROS:
4613    i = line.find(macro)
4614    if i >= 0:
4615      # Find opening parenthesis.  Do a regular expression match here
4616      # to make sure that we are matching the expected CHECK macro, as
4617      # opposed to some other macro that happens to contain the CHECK
4618      # substring.
4619      matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
4620      if not matched:
4621        continue
4622      return (macro, len(matched.group(1)))
4623  return (None, -1)
4624
4625
4626def CheckCheck(filename, clean_lines, linenum, error):
4627  """Checks the use of CHECK and EXPECT macros.
4628
4629  Args:
4630    filename: The name of the current file.
4631    clean_lines: A CleansedLines instance containing the file.
4632    linenum: The number of the line to check.
4633    error: The function to call with any errors found.
4634  """
4635
4636  # Decide the set of replacement macros that should be suggested
4637  lines = clean_lines.elided
4638  (check_macro, start_pos) = FindCheckMacro(lines[linenum])
4639  if not check_macro:
4640    return
4641
4642  # Find end of the boolean expression by matching parentheses
4643  (last_line, end_line, end_pos) = CloseExpression(
4644      clean_lines, linenum, start_pos)
4645  if end_pos < 0:
4646    return
4647
4648  # If the check macro is followed by something other than a
4649  # semicolon, assume users will log their own custom error messages
4650  # and don't suggest any replacements.
4651  if not Match(r'\s*;', last_line[end_pos:]):
4652    return
4653
4654  if linenum == end_line:
4655    expression = lines[linenum][start_pos + 1:end_pos - 1]
4656  else:
4657    expression = lines[linenum][start_pos + 1:]
4658    for i in xrange(linenum + 1, end_line):
4659      expression += lines[i]
4660    expression += last_line[0:end_pos - 1]
4661
4662  # Parse expression so that we can take parentheses into account.
4663  # This avoids false positives for inputs like "CHECK((a < 4) == b)",
4664  # which is not replaceable by CHECK_LE.
4665  lhs = ''
4666  rhs = ''
4667  operator = None
4668  while expression:
4669    matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
4670                    r'==|!=|>=|>|<=|<|\()(.*)$', expression)
4671    if matched:
4672      token = matched.group(1)
4673      if token == '(':
4674        # Parenthesized operand
4675        expression = matched.group(2)
4676        (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
4677        if end < 0:
4678          return  # Unmatched parenthesis
4679        lhs += '(' + expression[0:end]
4680        expression = expression[end:]
4681      elif token in ('&&', '||'):
4682        # Logical and/or operators.  This means the expression
4683        # contains more than one term, for example:
4684        #   CHECK(42 < a && a < b);
4685        #
4686        # These are not replaceable with CHECK_LE, so bail out early.
4687        return
4688      elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
4689        # Non-relational operator
4690        lhs += token
4691        expression = matched.group(2)
4692      else:
4693        # Relational operator
4694        operator = token
4695        rhs = matched.group(2)
4696        break
4697    else:
4698      # Unparenthesized operand.  Instead of appending to lhs one character
4699      # at a time, we do another regular expression match to consume several
4700      # characters at once if possible.  Trivial benchmark shows that this
4701      # is more efficient when the operands are longer than a single
4702      # character, which is generally the case.
4703      matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
4704      if not matched:
4705        matched = Match(r'^(\s*\S)(.*)$', expression)
4706        if not matched:
4707          break
4708      lhs += matched.group(1)
4709      expression = matched.group(2)
4710
4711  # Only apply checks if we got all parts of the boolean expression
4712  if not (lhs and operator and rhs):
4713    return
4714
4715  # Check that rhs do not contain logical operators.  We already know
4716  # that lhs is fine since the loop above parses out && and ||.
4717  if rhs.find('&&') > -1 or rhs.find('||') > -1:
4718    return
4719
4720  # At least one of the operands must be a constant literal.  This is
4721  # to avoid suggesting replacements for unprintable things like
4722  # CHECK(variable != iterator)
4723  #
4724  # The following pattern matches decimal, hex integers, strings, and
4725  # characters (in that order).
4726  lhs = lhs.strip()
4727  rhs = rhs.strip()
4728  match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
4729  if Match(match_constant, lhs) or Match(match_constant, rhs):
4730    # Note: since we know both lhs and rhs, we can provide a more
4731    # descriptive error message like:
4732    #   Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
4733    # Instead of:
4734    #   Consider using CHECK_EQ instead of CHECK(a == b)
4735    #
4736    # We are still keeping the less descriptive message because if lhs
4737    # or rhs gets long, the error message might become unreadable.
4738    error(filename, linenum, 'readability/check', 2,
4739          'Consider using %s instead of %s(a %s b)' % (
4740              _CHECK_REPLACEMENT[check_macro][operator],
4741              check_macro, operator))
4742
4743
4744def CheckAltTokens(filename, clean_lines, linenum, error):
4745  """Check alternative keywords being used in boolean expressions.
4746
4747  Args:
4748    filename: The name of the current file.
4749    clean_lines: A CleansedLines instance containing the file.
4750    linenum: The number of the line to check.
4751    error: The function to call with any errors found.
4752  """
4753  line = clean_lines.elided[linenum]
4754
4755  # Avoid preprocessor lines
4756  if Match(r'^\s*#', line):
4757    return
4758
4759  # Last ditch effort to avoid multi-line comments.  This will not help
4760  # if the comment started before the current line or ended after the
4761  # current line, but it catches most of the false positives.  At least,
4762  # it provides a way to workaround this warning for people who use
4763  # multi-line comments in preprocessor macros.
4764  #
4765  # TODO(unknown): remove this once cpplint has better support for
4766  # multi-line comments.
4767  if line.find('/*') >= 0 or line.find('*/') >= 0:
4768    return
4769
4770  for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
4771    error(filename, linenum, 'readability/alt_tokens', 2,
4772          'Use operator %s instead of %s' % (
4773              _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
4774
4775
4776def GetLineWidth(line):
4777  """Determines the width of the line in column positions.
4778
4779  Args:
4780    line: A string, which may be a Unicode string.
4781
4782  Returns:
4783    The width of the line in column positions, accounting for Unicode
4784    combining characters and wide characters.
4785  """
4786  if isinstance(line, unicode):
4787    width = 0
4788    for uc in unicodedata.normalize('NFC', line):
4789      if unicodedata.east_asian_width(uc) in ('W', 'F'):
4790        width += 2
4791      elif not unicodedata.combining(uc):
4792        # Issue 337
4793        # https://mail.python.org/pipermail/python-list/2012-August/628809.html
4794        if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
4795          # https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
4796          is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
4797          # https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
4798          is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
4799          if not is_wide_build and is_low_surrogate:
4800            width -= 1
4801
4802        width += 1
4803    return width
4804  else:
4805    return len(line)
4806
4807
4808def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
4809               error):
4810  """Checks rules from the 'C++ style rules' section of cppguide.html.
4811
4812  Most of these rules are hard to test (naming, comment style), but we
4813  do what we can.  In particular we check for 2-space indents, line lengths,
4814  tab usage, spaces inside code, etc.
4815
4816  Args:
4817    filename: The name of the current file.
4818    clean_lines: A CleansedLines instance containing the file.
4819    linenum: The number of the line to check.
4820    file_extension: The extension (without the dot) of the filename.
4821    nesting_state: A NestingState instance which maintains information about
4822                   the current stack of nested blocks being parsed.
4823    error: The function to call with any errors found.
4824  """
4825
4826  # Don't use "elided" lines here, otherwise we can't check commented lines.
4827  # Don't want to use "raw" either, because we don't want to check inside C++11
4828  # raw strings,
4829  raw_lines = clean_lines.lines_without_raw_strings
4830  line = raw_lines[linenum]
4831  prev = raw_lines[linenum - 1] if linenum > 0 else ''
4832
4833  if line.find('\t') != -1:
4834    error(filename, linenum, 'whitespace/tab', 1,
4835          'Tab found; better to use spaces')
4836
4837  # One or three blank spaces at the beginning of the line is weird; it's
4838  # hard to reconcile that with 2-space indents.
4839  # NOTE: here are the conditions rob pike used for his tests.  Mine aren't
4840  # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces
4841  # if(RLENGTH > 20) complain = 0;
4842  # if(match($0, " +(error|private|public|protected):")) complain = 0;
4843  # if(match(prev, "&& *$")) complain = 0;
4844  # if(match(prev, "\\|\\| *$")) complain = 0;
4845  # if(match(prev, "[\",=><] *$")) complain = 0;
4846  # if(match($0, " <<")) complain = 0;
4847  # if(match(prev, " +for \\(")) complain = 0;
4848  # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
4849  scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
4850  classinfo = nesting_state.InnermostClass()
4851  initial_spaces = 0
4852  cleansed_line = clean_lines.elided[linenum]
4853  while initial_spaces < len(line) and line[initial_spaces] == ' ':
4854    initial_spaces += 1
4855  # There are certain situations we allow one space, notably for
4856  # section labels, and also lines containing multi-line raw strings.
4857  # We also don't check for lines that look like continuation lines
4858  # (of lines ending in double quotes, commas, equals, or angle brackets)
4859  # because the rules for how to indent those are non-trivial.
4860  if (not Search(r'[",=><] *$', prev) and
4861      (initial_spaces == 1 or initial_spaces == 3) and
4862      not Match(scope_or_label_pattern, cleansed_line) and
4863      not (clean_lines.raw_lines[linenum] != line and
4864           Match(r'^\s*""', line))):
4865    error(filename, linenum, 'whitespace/indent', 3,
4866          'Weird number of spaces at line-start.  '
4867          'Are you using a 2-space indent?')
4868
4869  if line and line[-1].isspace():
4870    error(filename, linenum, 'whitespace/end_of_line', 4,
4871          'Line ends in whitespace.  Consider deleting these extra spaces.')
4872
4873  # Check if the line is a header guard.
4874  is_header_guard = False
4875  if IsHeaderExtension(file_extension):
4876    cppvar = GetHeaderGuardCPPVariable(filename)
4877    if (line.startswith('#ifndef %s' % cppvar) or
4878        line.startswith('#define %s' % cppvar) or
4879        line.startswith('#endif  // %s' % cppvar)):
4880      is_header_guard = True
4881  # #include lines and header guards can be long, since there's no clean way to
4882  # split them.
4883  #
4884  # URLs can be long too.  It's possible to split these, but it makes them
4885  # harder to cut&paste.
4886  #
4887  # The "$Id:...$" comment may also get very long without it being the
4888  # developers fault.
4889  #
4890  # Doxygen documentation copying can get pretty long when using an overloaded
4891  # function declaration
4892  if (not line.startswith('#include') and not is_header_guard and
4893      not Match(r'^\s*//.*http(s?)://\S*$', line) and
4894      not Match(r'^\s*//\s*[^\s]*$', line) and
4895      not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
4896      not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
4897    line_width = GetLineWidth(line)
4898    if line_width > _line_length:
4899      error(filename, linenum, 'whitespace/line_length', 2,
4900            'Lines should be <= %i characters long' % _line_length)
4901
4902  if (cleansed_line.count(';') > 1 and
4903      # allow simple single line lambdas
4904      not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
4905                line) and
4906      # for loops are allowed two ;'s (and may run over two lines).
4907      cleansed_line.find('for') == -1 and
4908      (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
4909       GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
4910      # It's ok to have many commands in a switch case that fits in 1 line
4911      not ((cleansed_line.find('case ') != -1 or
4912            cleansed_line.find('default:') != -1) and
4913           cleansed_line.find('break;') != -1)):
4914    error(filename, linenum, 'whitespace/newline', 0,
4915          'More than one command on the same line')
4916
4917  # Some more style checks
4918  CheckBraces(filename, clean_lines, linenum, error)
4919  CheckTrailingSemicolon(filename, clean_lines, linenum, error)
4920  CheckEmptyBlockBody(filename, clean_lines, linenum, error)
4921  CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
4922  CheckOperatorSpacing(filename, clean_lines, linenum, error)
4923  CheckParenthesisSpacing(filename, clean_lines, linenum, error)
4924  CheckCommaSpacing(filename, clean_lines, linenum, error)
4925  CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
4926  CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
4927  CheckCheck(filename, clean_lines, linenum, error)
4928  CheckAltTokens(filename, clean_lines, linenum, error)
4929  classinfo = nesting_state.InnermostClass()
4930  if classinfo:
4931    CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
4932
4933
4934_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
4935# Matches the first component of a filename delimited by -s and _s. That is:
4936#  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
4937#  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
4938#  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
4939#  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
4940_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
4941
4942
4943def _DropCommonSuffixes(filename):
4944  """Drops common suffixes like _test.cc or -inl.h from filename.
4945
4946  For example:
4947    >>> _DropCommonSuffixes('foo/foo-inl.h')
4948    'foo/foo'
4949    >>> _DropCommonSuffixes('foo/bar/foo.cc')
4950    'foo/bar/foo'
4951    >>> _DropCommonSuffixes('foo/foo_internal.h')
4952    'foo/foo'
4953    >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
4954    'foo/foo_unusualinternal'
4955
4956  Args:
4957    filename: The input filename.
4958
4959  Returns:
4960    The filename with the common suffix removed.
4961  """
4962  for suffix in itertools.chain(
4963      ('%s.%s' % (test_suffix.lstrip('_'), ext)
4964       for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
4965      ('%s.%s' % (suffix, ext)
4966       for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
4967    if (filename.endswith(suffix) and len(filename) > len(suffix) and
4968        filename[-len(suffix) - 1] in ('-', '_')):
4969      return filename[:-len(suffix) - 1]
4970  return os.path.splitext(filename)[0]
4971
4972
4973def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
4974  """Figures out what kind of header 'include' is.
4975
4976  Args:
4977    fileinfo: The current file cpplint is running over. A FileInfo instance.
4978    include: The path to a #included file.
4979    used_angle_brackets: True if the #include used <> rather than "".
4980    include_order: "default" or other value allowed in program arguments
4981
4982  Returns:
4983    One of the _XXX_HEADER constants.
4984
4985  For example:
4986    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
4987    _C_SYS_HEADER
4988    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
4989    _CPP_SYS_HEADER
4990    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
4991    _OTHER_SYS_HEADER
4992    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
4993    _LIKELY_MY_HEADER
4994    >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
4995    ...                  'bar/foo_other_ext.h', False)
4996    _POSSIBLE_MY_HEADER
4997    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
4998    _OTHER_HEADER
4999  """
5000  # This is a list of all standard c++ header files, except
5001  # those already checked for above.
5002  is_cpp_header = include in _CPP_HEADERS
5003
5004  # Mark include as C header if in list or in a known folder for standard-ish C headers.
5005  is_std_c_header = (include_order == "default") or (include in _C_HEADERS
5006            # additional linux glibc header folders
5007            or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
5008
5009  # Headers with C++ extensions shouldn't be considered C system headers
5010  is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
5011
5012  if is_system:
5013    if is_cpp_header:
5014      return _CPP_SYS_HEADER
5015    if is_std_c_header:
5016      return _C_SYS_HEADER
5017    else:
5018      return _OTHER_SYS_HEADER
5019
5020  # If the target file and the include we're checking share a
5021  # basename when we drop common extensions, and the include
5022  # lives in . , then it's likely to be owned by the target file.
5023  target_dir, target_base = (
5024      os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
5025  include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
5026  target_dir_pub = os.path.normpath(target_dir + '/../public')
5027  target_dir_pub = target_dir_pub.replace('\\', '/')
5028  if target_base == include_base and (
5029      include_dir == target_dir or
5030      include_dir == target_dir_pub):
5031    return _LIKELY_MY_HEADER
5032
5033  # If the target and include share some initial basename
5034  # component, it's possible the target is implementing the
5035  # include, so it's allowed to be first, but we'll never
5036  # complain if it's not there.
5037  target_first_component = _RE_FIRST_COMPONENT.match(target_base)
5038  include_first_component = _RE_FIRST_COMPONENT.match(include_base)
5039  if (target_first_component and include_first_component and
5040      target_first_component.group(0) ==
5041      include_first_component.group(0)):
5042    return _POSSIBLE_MY_HEADER
5043
5044  return _OTHER_HEADER
5045
5046
5047
5048def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
5049  """Check rules that are applicable to #include lines.
5050
5051  Strings on #include lines are NOT removed from elided line, to make
5052  certain tasks easier. However, to prevent false positives, checks
5053  applicable to #include lines in CheckLanguage must be put here.
5054
5055  Args:
5056    filename: The name of the current file.
5057    clean_lines: A CleansedLines instance containing the file.
5058    linenum: The number of the line to check.
5059    include_state: An _IncludeState instance in which the headers are inserted.
5060    error: The function to call with any errors found.
5061  """
5062  fileinfo = FileInfo(filename)
5063  line = clean_lines.lines[linenum]
5064
5065  # "include" should use the new style "foo/bar.h" instead of just "bar.h"
5066  # Only do this check if the included header follows google naming
5067  # conventions.  If not, assume that it's a 3rd party API that
5068  # requires special include conventions.
5069  #
5070  # We also make an exception for Lua headers, which follow google
5071  # naming convention but not the include convention.
5072  match = Match(r'#include\s*"([^/]+\.h)"', line)
5073  if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
5074    error(filename, linenum, 'build/include_subdir', 4,
5075          'Include the directory when naming .h files')
5076
5077  # we shouldn't include a file more than once. actually, there are a
5078  # handful of instances where doing so is okay, but in general it's
5079  # not.
5080  match = _RE_PATTERN_INCLUDE.search(line)
5081  if match:
5082    include = match.group(2)
5083    used_angle_brackets = (match.group(1) == '<')
5084    duplicate_line = include_state.FindHeader(include)
5085    if duplicate_line >= 0:
5086      error(filename, linenum, 'build/include', 4,
5087            '"%s" already included at %s:%s' %
5088            (include, filename, duplicate_line))
5089      return
5090
5091    for extension in GetNonHeaderExtensions():
5092      if (include.endswith('.' + extension) and
5093          os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
5094        error(filename, linenum, 'build/include', 4,
5095              'Do not include .' + extension + ' files from other packages')
5096        return
5097
5098    # We DO want to include a 3rd party looking header if it matches the
5099    # filename. Otherwise we get an erroneous error "...should include its
5100    # header" error later.
5101    third_src_header = False
5102    for ext in GetHeaderExtensions():
5103      basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
5104      headerfile = basefilename + '.' + ext
5105      headername = FileInfo(headerfile).RepositoryName()
5106      if headername in include or include in headername:
5107        third_src_header = True
5108        break
5109
5110    if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
5111      include_state.include_list[-1].append((include, linenum))
5112
5113      # We want to ensure that headers appear in the right order:
5114      # 1) for foo.cc, foo.h  (preferred location)
5115      # 2) c system files
5116      # 3) cpp system files
5117      # 4) for foo.cc, foo.h  (deprecated location)
5118      # 5) other google headers
5119      #
5120      # We classify each include statement as one of those 5 types
5121      # using a number of techniques. The include_state object keeps
5122      # track of the highest type seen, and complains if we see a
5123      # lower type after that.
5124      error_message = include_state.CheckNextIncludeOrder(
5125          _ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
5126      if error_message:
5127        error(filename, linenum, 'build/include_order', 4,
5128              '%s. Should be: %s.h, c system, c++ system, other.' %
5129              (error_message, fileinfo.BaseName()))
5130      canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
5131      if not include_state.IsInAlphabeticalOrder(
5132          clean_lines, linenum, canonical_include):
5133        error(filename, linenum, 'build/include_alpha', 4,
5134              'Include "%s" not in alphabetical order' % include)
5135      include_state.SetLastHeader(canonical_include)
5136
5137
5138
5139def _GetTextInside(text, start_pattern):
5140  r"""Retrieves all the text between matching open and close parentheses.
5141
5142  Given a string of lines and a regular expression string, retrieve all the text
5143  following the expression and between opening punctuation symbols like
5144  (, [, or {, and the matching close-punctuation symbol. This properly nested
5145  occurrences of the punctuations, so for the text like
5146    printf(a(), b(c()));
5147  a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
5148  start_pattern must match string having an open punctuation symbol at the end.
5149
5150  Args:
5151    text: The lines to extract text. Its comments and strings must be elided.
5152           It can be single line and can span multiple lines.
5153    start_pattern: The regexp string indicating where to start extracting
5154                   the text.
5155  Returns:
5156    The extracted text.
5157    None if either the opening string or ending punctuation could not be found.
5158  """
5159  # TODO(unknown): Audit cpplint.py to see what places could be profitably
5160  # rewritten to use _GetTextInside (and use inferior regexp matching today).
5161
5162  # Give opening punctuations to get the matching close-punctuations.
5163  matching_punctuation = {'(': ')', '{': '}', '[': ']'}
5164  closing_punctuation = set(itervalues(matching_punctuation))
5165
5166  # Find the position to start extracting text.
5167  match = re.search(start_pattern, text, re.M)
5168  if not match:  # start_pattern not found in text.
5169    return None
5170  start_position = match.end(0)
5171
5172  assert start_position > 0, (
5173      'start_pattern must ends with an opening punctuation.')
5174  assert text[start_position - 1] in matching_punctuation, (
5175      'start_pattern must ends with an opening punctuation.')
5176  # Stack of closing punctuations we expect to have in text after position.
5177  punctuation_stack = [matching_punctuation[text[start_position - 1]]]
5178  position = start_position
5179  while punctuation_stack and position < len(text):
5180    if text[position] == punctuation_stack[-1]:
5181      punctuation_stack.pop()
5182    elif text[position] in closing_punctuation:
5183      # A closing punctuation without matching opening punctuations.
5184      return None
5185    elif text[position] in matching_punctuation:
5186      punctuation_stack.append(matching_punctuation[text[position]])
5187    position += 1
5188  if punctuation_stack:
5189    # Opening punctuations left without matching close-punctuations.
5190    return None
5191  # punctuations match.
5192  return text[start_position:position - 1]
5193
5194
5195# Patterns for matching call-by-reference parameters.
5196#
5197# Supports nested templates up to 2 levels deep using this messy pattern:
5198#   < (?: < (?: < [^<>]*
5199#               >
5200#           |   [^<>] )*
5201#         >
5202#     |   [^<>] )*
5203#   >
5204_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*'  # =~ [[:alpha:]][[:alnum:]]*
5205_RE_PATTERN_TYPE = (
5206    r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
5207    r'(?:\w|'
5208    r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
5209    r'::)+')
5210# A call-by-reference parameter ends with '& identifier'.
5211_RE_PATTERN_REF_PARAM = re.compile(
5212    r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
5213    r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
5214# A call-by-const-reference parameter either ends with 'const& identifier'
5215# or looks like 'const type& identifier' when 'type' is atomic.
5216_RE_PATTERN_CONST_REF_PARAM = (
5217    r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
5218    r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
5219# Stream types.
5220_RE_PATTERN_REF_STREAM_PARAM = (
5221    r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
5222
5223
5224def CheckLanguage(filename, clean_lines, linenum, file_extension,
5225                  include_state, nesting_state, error):
5226  """Checks rules from the 'C++ language rules' section of cppguide.html.
5227
5228  Some of these rules are hard to test (function overloading, using
5229  uint32 inappropriately), but we do the best we can.
5230
5231  Args:
5232    filename: The name of the current file.
5233    clean_lines: A CleansedLines instance containing the file.
5234    linenum: The number of the line to check.
5235    file_extension: The extension (without the dot) of the filename.
5236    include_state: An _IncludeState instance in which the headers are inserted.
5237    nesting_state: A NestingState instance which maintains information about
5238                   the current stack of nested blocks being parsed.
5239    error: The function to call with any errors found.
5240  """
5241  # If the line is empty or consists of entirely a comment, no need to
5242  # check it.
5243  line = clean_lines.elided[linenum]
5244  if not line:
5245    return
5246
5247  match = _RE_PATTERN_INCLUDE.search(line)
5248  if match:
5249    CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
5250    return
5251
5252  # Reset include state across preprocessor directives.  This is meant
5253  # to silence warnings for conditional includes.
5254  match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
5255  if match:
5256    include_state.ResetSection(match.group(1))
5257
5258
5259  # Perform other checks now that we are sure that this is not an include line
5260  CheckCasts(filename, clean_lines, linenum, error)
5261  CheckGlobalStatic(filename, clean_lines, linenum, error)
5262  CheckPrintf(filename, clean_lines, linenum, error)
5263
5264  if IsHeaderExtension(file_extension):
5265    # TODO(unknown): check that 1-arg constructors are explicit.
5266    #                How to tell it's a constructor?
5267    #                (handled in CheckForNonStandardConstructs for now)
5268    # TODO(unknown): check that classes declare or disable copy/assign
5269    #                (level 1 error)
5270    pass
5271
5272  # Check if people are using the verboten C basic types.  The only exception
5273  # we regularly allow is "unsigned short port" for port.
5274  if Search(r'\bshort port\b', line):
5275    if not Search(r'\bunsigned short port\b', line):
5276      error(filename, linenum, 'runtime/int', 4,
5277            'Use "unsigned short" for ports, not "short"')
5278  else:
5279    match = Search(r'\b(short|long(?! +double)|long long)\b', line)
5280    if match:
5281      error(filename, linenum, 'runtime/int', 4,
5282            'Use int16/int64/etc, rather than the C type %s' % match.group(1))
5283
5284  # Check if some verboten operator overloading is going on
5285  # TODO(unknown): catch out-of-line unary operator&:
5286  #   class X {};
5287  #   int operator&(const X& x) { return 42; }  // unary operator&
5288  # The trick is it's hard to tell apart from binary operator&:
5289  #   class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
5290  if Search(r'\boperator\s*&\s*\(\s*\)', line):
5291    error(filename, linenum, 'runtime/operator', 4,
5292          'Unary operator& is dangerous.  Do not use it.')
5293
5294  # Check for suspicious usage of "if" like
5295  # } if (a == b) {
5296  if Search(r'\}\s*if\s*\(', line):
5297    error(filename, linenum, 'readability/braces', 4,
5298          'Did you mean "else if"? If not, start a new line for "if".')
5299
5300  # Check for potential format string bugs like printf(foo).
5301  # We constrain the pattern not to pick things like DocidForPrintf(foo).
5302  # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
5303  # TODO(unknown): Catch the following case. Need to change the calling
5304  # convention of the whole function to process multiple line to handle it.
5305  #   printf(
5306  #       boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
5307  printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
5308  if printf_args:
5309    match = Match(r'([\w.\->()]+)$', printf_args)
5310    if match and match.group(1) != '__VA_ARGS__':
5311      function_name = re.search(r'\b((?:string)?printf)\s*\(',
5312                                line, re.I).group(1)
5313      error(filename, linenum, 'runtime/printf', 4,
5314            'Potential format string bug. Do %s("%%s", %s) instead.'
5315            % (function_name, match.group(1)))
5316
5317  # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
5318  match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
5319  if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
5320    error(filename, linenum, 'runtime/memset', 4,
5321          'Did you mean "memset(%s, 0, %s)"?'
5322          % (match.group(1), match.group(2)))
5323
5324  if Search(r'\busing namespace\b', line):
5325    if Search(r'\bliterals\b', line):
5326      error(filename, linenum, 'build/namespaces_literals', 5,
5327            'Do not use namespace using-directives.  '
5328            'Use using-declarations instead.')
5329    else:
5330      error(filename, linenum, 'build/namespaces', 5,
5331            'Do not use namespace using-directives.  '
5332            'Use using-declarations instead.')
5333
5334  # Detect variable-length arrays.
5335  match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
5336  if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
5337      match.group(3).find(']') == -1):
5338    # Split the size using space and arithmetic operators as delimiters.
5339    # If any of the resulting tokens are not compile time constants then
5340    # report the error.
5341    tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
5342    is_const = True
5343    skip_next = False
5344    for tok in tokens:
5345      if skip_next:
5346        skip_next = False
5347        continue
5348
5349      if Search(r'sizeof\(.+\)', tok): continue
5350      if Search(r'arraysize\(\w+\)', tok): continue
5351
5352      tok = tok.lstrip('(')
5353      tok = tok.rstrip(')')
5354      if not tok: continue
5355      if Match(r'\d+', tok): continue
5356      if Match(r'0[xX][0-9a-fA-F]+', tok): continue
5357      if Match(r'k[A-Z0-9]\w*', tok): continue
5358      if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
5359      if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
5360      # A catch all for tricky sizeof cases, including 'sizeof expression',
5361      # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
5362      # requires skipping the next token because we split on ' ' and '*'.
5363      if tok.startswith('sizeof'):
5364        skip_next = True
5365        continue
5366      is_const = False
5367      break
5368    if not is_const:
5369      error(filename, linenum, 'runtime/arrays', 1,
5370            'Do not use variable-length arrays.  Use an appropriately named '
5371            "('k' followed by CamelCase) compile-time constant for the size.")
5372
5373  # Check for use of unnamed namespaces in header files.  Registration
5374  # macros are typically OK, so we allow use of "namespace {" on lines
5375  # that end with backslashes.
5376  if (IsHeaderExtension(file_extension)
5377      and Search(r'\bnamespace\s*{', line)
5378      and line[-1] != '\\'):
5379    error(filename, linenum, 'build/namespaces_headers', 4,
5380          'Do not use unnamed namespaces in header files.  See '
5381          'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
5382          ' for more information.')
5383
5384
5385def CheckGlobalStatic(filename, clean_lines, linenum, error):
5386  """Check for unsafe global or static objects.
5387
5388  Args:
5389    filename: The name of the current file.
5390    clean_lines: A CleansedLines instance containing the file.
5391    linenum: The number of the line to check.
5392    error: The function to call with any errors found.
5393  """
5394  line = clean_lines.elided[linenum]
5395
5396  # Match two lines at a time to support multiline declarations
5397  if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
5398    line += clean_lines.elided[linenum + 1].strip()
5399
5400  # Check for people declaring static/global STL strings at the top level.
5401  # This is dangerous because the C++ language does not guarantee that
5402  # globals with constructors are initialized before the first access, and
5403  # also because globals can be destroyed when some threads are still running.
5404  # TODO(unknown): Generalize this to also find static unique_ptr instances.
5405  # TODO(unknown): File bugs for clang-tidy to find these.
5406  match = Match(
5407      r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
5408      r'([a-zA-Z0-9_:]+)\b(.*)',
5409      line)
5410
5411  # Remove false positives:
5412  # - String pointers (as opposed to values).
5413  #    string *pointer
5414  #    const string *pointer
5415  #    string const *pointer
5416  #    string *const pointer
5417  #
5418  # - Functions and template specializations.
5419  #    string Function<Type>(...
5420  #    string Class<Type>::Method(...
5421  #
5422  # - Operators.  These are matched separately because operator names
5423  #   cross non-word boundaries, and trying to match both operators
5424  #   and functions at the same time would decrease accuracy of
5425  #   matching identifiers.
5426  #    string Class::operator*()
5427  if (match and
5428      not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
5429      not Search(r'\boperator\W', line) and
5430      not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
5431    if Search(r'\bconst\b', line):
5432      error(filename, linenum, 'runtime/string', 4,
5433            'For a static/global string constant, use a C style string '
5434            'instead: "%schar%s %s[]".' %
5435            (match.group(1), match.group(2) or '', match.group(3)))
5436    else:
5437      error(filename, linenum, 'runtime/string', 4,
5438            'Static/global string variables are not permitted.')
5439
5440  if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
5441      Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
5442    error(filename, linenum, 'runtime/init', 4,
5443          'You seem to be initializing a member variable with itself.')
5444
5445
5446def CheckPrintf(filename, clean_lines, linenum, error):
5447  """Check for printf related issues.
5448
5449  Args:
5450    filename: The name of the current file.
5451    clean_lines: A CleansedLines instance containing the file.
5452    linenum: The number of the line to check.
5453    error: The function to call with any errors found.
5454  """
5455  line = clean_lines.elided[linenum]
5456
5457  # When snprintf is used, the second argument shouldn't be a literal.
5458  match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
5459  if match and match.group(2) != '0':
5460    # If 2nd arg is zero, snprintf is used to calculate size.
5461    error(filename, linenum, 'runtime/printf', 3,
5462          'If you can, use sizeof(%s) instead of %s as the 2nd arg '
5463          'to snprintf.' % (match.group(1), match.group(2)))
5464
5465  # Check if some verboten C functions are being used.
5466  if Search(r'\bsprintf\s*\(', line):
5467    error(filename, linenum, 'runtime/printf', 5,
5468          'Never use sprintf. Use snprintf instead.')
5469  match = Search(r'\b(strcpy|strcat)\s*\(', line)
5470  if match:
5471    error(filename, linenum, 'runtime/printf', 4,
5472          'Almost always, snprintf is better than %s' % match.group(1))
5473
5474
5475def IsDerivedFunction(clean_lines, linenum):
5476  """Check if current line contains an inherited function.
5477
5478  Args:
5479    clean_lines: A CleansedLines instance containing the file.
5480    linenum: The number of the line to check.
5481  Returns:
5482    True if current line contains a function with "override"
5483    virt-specifier.
5484  """
5485  # Scan back a few lines for start of current function
5486  for i in xrange(linenum, max(-1, linenum - 10), -1):
5487    match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
5488    if match:
5489      # Look for "override" after the matching closing parenthesis
5490      line, _, closing_paren = CloseExpression(
5491          clean_lines, i, len(match.group(1)))
5492      return (closing_paren >= 0 and
5493              Search(r'\boverride\b', line[closing_paren:]))
5494  return False
5495
5496
5497def IsOutOfLineMethodDefinition(clean_lines, linenum):
5498  """Check if current line contains an out-of-line method definition.
5499
5500  Args:
5501    clean_lines: A CleansedLines instance containing the file.
5502    linenum: The number of the line to check.
5503  Returns:
5504    True if current line contains an out-of-line method definition.
5505  """
5506  # Scan back a few lines for start of current function
5507  for i in xrange(linenum, max(-1, linenum - 10), -1):
5508    if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
5509      return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
5510  return False
5511
5512
5513def IsInitializerList(clean_lines, linenum):
5514  """Check if current line is inside constructor initializer list.
5515
5516  Args:
5517    clean_lines: A CleansedLines instance containing the file.
5518    linenum: The number of the line to check.
5519  Returns:
5520    True if current line appears to be inside constructor initializer
5521    list, False otherwise.
5522  """
5523  for i in xrange(linenum, 1, -1):
5524    line = clean_lines.elided[i]
5525    if i == linenum:
5526      remove_function_body = Match(r'^(.*)\{\s*$', line)
5527      if remove_function_body:
5528        line = remove_function_body.group(1)
5529
5530    if Search(r'\s:\s*\w+[({]', line):
5531      # A lone colon tend to indicate the start of a constructor
5532      # initializer list.  It could also be a ternary operator, which
5533      # also tend to appear in constructor initializer lists as
5534      # opposed to parameter lists.
5535      return True
5536    if Search(r'\}\s*,\s*$', line):
5537      # A closing brace followed by a comma is probably the end of a
5538      # brace-initialized member in constructor initializer list.
5539      return True
5540    if Search(r'[{};]\s*$', line):
5541      # Found one of the following:
5542      # - A closing brace or semicolon, probably the end of the previous
5543      #   function.
5544      # - An opening brace, probably the start of current class or namespace.
5545      #
5546      # Current line is probably not inside an initializer list since
5547      # we saw one of those things without seeing the starting colon.
5548      return False
5549
5550  # Got to the beginning of the file without seeing the start of
5551  # constructor initializer list.
5552  return False
5553
5554
5555def CheckForNonConstReference(filename, clean_lines, linenum,
5556                              nesting_state, error):
5557  """Check for non-const references.
5558
5559  Separate from CheckLanguage since it scans backwards from current
5560  line, instead of scanning forward.
5561
5562  Args:
5563    filename: The name of the current file.
5564    clean_lines: A CleansedLines instance containing the file.
5565    linenum: The number of the line to check.
5566    nesting_state: A NestingState instance which maintains information about
5567                   the current stack of nested blocks being parsed.
5568    error: The function to call with any errors found.
5569  """
5570  # Do nothing if there is no '&' on current line.
5571  line = clean_lines.elided[linenum]
5572  if '&' not in line:
5573    return
5574
5575  # If a function is inherited, current function doesn't have much of
5576  # a choice, so any non-const references should not be blamed on
5577  # derived function.
5578  if IsDerivedFunction(clean_lines, linenum):
5579    return
5580
5581  # Don't warn on out-of-line method definitions, as we would warn on the
5582  # in-line declaration, if it isn't marked with 'override'.
5583  if IsOutOfLineMethodDefinition(clean_lines, linenum):
5584    return
5585
5586  # Long type names may be broken across multiple lines, usually in one
5587  # of these forms:
5588  #   LongType
5589  #       ::LongTypeContinued &identifier
5590  #   LongType::
5591  #       LongTypeContinued &identifier
5592  #   LongType<
5593  #       ...>::LongTypeContinued &identifier
5594  #
5595  # If we detected a type split across two lines, join the previous
5596  # line to current line so that we can match const references
5597  # accordingly.
5598  #
5599  # Note that this only scans back one line, since scanning back
5600  # arbitrary number of lines would be expensive.  If you have a type
5601  # that spans more than 2 lines, please use a typedef.
5602  if linenum > 1:
5603    previous = None
5604    if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
5605      # previous_line\n + ::current_line
5606      previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
5607                        clean_lines.elided[linenum - 1])
5608    elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
5609      # previous_line::\n + current_line
5610      previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
5611                        clean_lines.elided[linenum - 1])
5612    if previous:
5613      line = previous.group(1) + line.lstrip()
5614    else:
5615      # Check for templated parameter that is split across multiple lines
5616      endpos = line.rfind('>')
5617      if endpos > -1:
5618        (_, startline, startpos) = ReverseCloseExpression(
5619            clean_lines, linenum, endpos)
5620        if startpos > -1 and startline < linenum:
5621          # Found the matching < on an earlier line, collect all
5622          # pieces up to current line.
5623          line = ''
5624          for i in xrange(startline, linenum + 1):
5625            line += clean_lines.elided[i].strip()
5626
5627  # Check for non-const references in function parameters.  A single '&' may
5628  # found in the following places:
5629  #   inside expression: binary & for bitwise AND
5630  #   inside expression: unary & for taking the address of something
5631  #   inside declarators: reference parameter
5632  # We will exclude the first two cases by checking that we are not inside a
5633  # function body, including one that was just introduced by a trailing '{'.
5634  # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
5635  if (nesting_state.previous_stack_top and
5636      not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
5637           isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
5638    # Not at toplevel, not within a class, and not within a namespace
5639    return
5640
5641  # Avoid initializer lists.  We only need to scan back from the
5642  # current line for something that starts with ':'.
5643  #
5644  # We don't need to check the current line, since the '&' would
5645  # appear inside the second set of parentheses on the current line as
5646  # opposed to the first set.
5647  if linenum > 0:
5648    for i in xrange(linenum - 1, max(0, linenum - 10), -1):
5649      previous_line = clean_lines.elided[i]
5650      if not Search(r'[),]\s*$', previous_line):
5651        break
5652      if Match(r'^\s*:\s+\S', previous_line):
5653        return
5654
5655  # Avoid preprocessors
5656  if Search(r'\\\s*$', line):
5657    return
5658
5659  # Avoid constructor initializer lists
5660  if IsInitializerList(clean_lines, linenum):
5661    return
5662
5663  # We allow non-const references in a few standard places, like functions
5664  # called "swap()" or iostream operators like "<<" or ">>".  Do not check
5665  # those function parameters.
5666  #
5667  # We also accept & in static_assert, which looks like a function but
5668  # it's actually a declaration expression.
5669  allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
5670                           r'operator\s*[<>][<>]|'
5671                           r'static_assert|COMPILE_ASSERT'
5672                           r')\s*\(')
5673  if Search(allowed_functions, line):
5674    return
5675  elif not Search(r'\S+\([^)]*$', line):
5676    # Don't see an allowed function on this line.  Actually we
5677    # didn't see any function name on this line, so this is likely a
5678    # multi-line parameter list.  Try a bit harder to catch this case.
5679    for i in xrange(2):
5680      if (linenum > i and
5681          Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
5682        return
5683
5684  decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body
5685  for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
5686    if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
5687        not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
5688      error(filename, linenum, 'runtime/references', 2,
5689            'Is this a non-const reference? '
5690            'If so, make const or use a pointer: ' +
5691            ReplaceAll(' *<', '<', parameter))
5692
5693
5694def CheckCasts(filename, clean_lines, linenum, error):
5695  """Various cast related checks.
5696
5697  Args:
5698    filename: The name of the current file.
5699    clean_lines: A CleansedLines instance containing the file.
5700    linenum: The number of the line to check.
5701    error: The function to call with any errors found.
5702  """
5703  line = clean_lines.elided[linenum]
5704
5705  # Check to see if they're using an conversion function cast.
5706  # I just try to capture the most common basic types, though there are more.
5707  # Parameterless conversion functions, such as bool(), are allowed as they are
5708  # probably a member operator declaration or default constructor.
5709  match = Search(
5710      r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
5711      r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
5712      r'(\([^)].*)', line)
5713  expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
5714  if match and not expecting_function:
5715    matched_type = match.group(2)
5716
5717    # matched_new_or_template is used to silence two false positives:
5718    # - New operators
5719    # - Template arguments with function types
5720    #
5721    # For template arguments, we match on types immediately following
5722    # an opening bracket without any spaces.  This is a fast way to
5723    # silence the common case where the function type is the first
5724    # template argument.  False negative with less-than comparison is
5725    # avoided because those operators are usually followed by a space.
5726    #
5727    #   function<double(double)>   // bracket + no space = false positive
5728    #   value < double(42)         // bracket + space = true positive
5729    matched_new_or_template = match.group(1)
5730
5731    # Avoid arrays by looking for brackets that come after the closing
5732    # parenthesis.
5733    if Match(r'\([^()]+\)\s*\[', match.group(3)):
5734      return
5735
5736    # Other things to ignore:
5737    # - Function pointers
5738    # - Casts to pointer types
5739    # - Placement new
5740    # - Alias declarations
5741    matched_funcptr = match.group(3)
5742    if (matched_new_or_template is None and
5743        not (matched_funcptr and
5744             (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
5745                    matched_funcptr) or
5746              matched_funcptr.startswith('(*)'))) and
5747        not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
5748        not Search(r'new\(\S+\)\s*' + matched_type, line)):
5749      error(filename, linenum, 'readability/casting', 4,
5750            'Using deprecated casting style.  '
5751            'Use static_cast<%s>(...) instead' %
5752            matched_type)
5753
5754  if not expecting_function:
5755    CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
5756                    r'\((int|float|double|bool|char|u?int(16|32|64)|size_t)\)', error)
5757
5758  # This doesn't catch all cases. Consider (const char * const)"hello".
5759  #
5760  # (char *) "foo" should always be a const_cast (reinterpret_cast won't
5761  # compile).
5762  if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
5763                     r'\((char\s?\*+\s?)\)\s*"', error):
5764    pass
5765  else:
5766    # Check pointer casts for other than string constants
5767    CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
5768                    r'\((\w+\s?\*+\s?)\)', error)
5769
5770  # In addition, we look for people taking the address of a cast.  This
5771  # is dangerous -- casts can assign to temporaries, so the pointer doesn't
5772  # point where you think.
5773  #
5774  # Some non-identifier character is required before the '&' for the
5775  # expression to be recognized as a cast.  These are casts:
5776  #   expression = &static_cast<int*>(temporary());
5777  #   function(&(int*)(temporary()));
5778  #
5779  # This is not a cast:
5780  #   reference_type&(int* function_param);
5781  match = Search(
5782      r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
5783      r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
5784  if match:
5785    # Try a better error message when the & is bound to something
5786    # dereferenced by the casted pointer, as opposed to the casted
5787    # pointer itself.
5788    parenthesis_error = False
5789    match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
5790    if match:
5791      _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
5792      if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
5793        _, y2, x2 = CloseExpression(clean_lines, y1, x1)
5794        if x2 >= 0:
5795          extended_line = clean_lines.elided[y2][x2:]
5796          if y2 < clean_lines.NumLines() - 1:
5797            extended_line += clean_lines.elided[y2 + 1]
5798          if Match(r'\s*(?:->|\[)', extended_line):
5799            parenthesis_error = True
5800
5801    if parenthesis_error:
5802      error(filename, linenum, 'readability/casting', 4,
5803            ('Are you taking an address of something dereferenced '
5804             'from a cast?  Wrapping the dereferenced expression in '
5805             'parentheses will make the binding more obvious'))
5806    else:
5807      error(filename, linenum, 'runtime/casting', 4,
5808            ('Are you taking an address of a cast?  '
5809             'This is dangerous: could be a temp var.  '
5810             'Take the address before doing the cast, rather than after'))
5811
5812
5813def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
5814  """Checks for a C-style cast by looking for the pattern.
5815
5816  Args:
5817    filename: The name of the current file.
5818    clean_lines: A CleansedLines instance containing the file.
5819    linenum: The number of the line to check.
5820    cast_type: The string for the C++ cast to recommend.  This is either
5821      reinterpret_cast, static_cast, or const_cast, depending.
5822    pattern: The regular expression used to find C-style casts.
5823    error: The function to call with any errors found.
5824
5825  Returns:
5826    True if an error was emitted.
5827    False otherwise.
5828  """
5829  line = clean_lines.elided[linenum]
5830  match = Search(pattern, line)
5831  if not match:
5832    return False
5833
5834  # Exclude lines with keywords that tend to look like casts
5835  context = line[0:match.start(1) - 1]
5836  if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
5837    return False
5838
5839  # Try expanding current context to see if we one level of
5840  # parentheses inside a macro.
5841  if linenum > 0:
5842    for i in xrange(linenum - 1, max(0, linenum - 5), -1):
5843      context = clean_lines.elided[i] + context
5844  if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
5845    return False
5846
5847  # operator++(int) and operator--(int)
5848  if context.endswith(' operator++') or context.endswith(' operator--'):
5849    return False
5850
5851  # A single unnamed argument for a function tends to look like old style cast.
5852  # If we see those, don't issue warnings for deprecated casts.
5853  remainder = line[match.end(0):]
5854  if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
5855           remainder):
5856    return False
5857
5858  # At this point, all that should be left is actual casts.
5859  error(filename, linenum, 'readability/casting', 4,
5860        'Using C-style cast.  Use %s<%s>(...) instead' %
5861        (cast_type, match.group(1)))
5862
5863  return True
5864
5865
5866def ExpectingFunctionArgs(clean_lines, linenum):
5867  """Checks whether where function type arguments are expected.
5868
5869  Args:
5870    clean_lines: A CleansedLines instance containing the file.
5871    linenum: The number of the line to check.
5872
5873  Returns:
5874    True if the line at 'linenum' is inside something that expects arguments
5875    of function types.
5876  """
5877  line = clean_lines.elided[linenum]
5878  return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
5879          (linenum >= 2 and
5880           (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
5881                  clean_lines.elided[linenum - 1]) or
5882            Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
5883                  clean_lines.elided[linenum - 2]) or
5884            Search(r'\bstd::m?function\s*\<\s*$',
5885                   clean_lines.elided[linenum - 1]))))
5886
5887
5888_HEADERS_CONTAINING_TEMPLATES = (
5889    ('<deque>', ('deque',)),
5890    ('<functional>', ('unary_function', 'binary_function',
5891                      'plus', 'minus', 'multiplies', 'divides', 'modulus',
5892                      'negate',
5893                      'equal_to', 'not_equal_to', 'greater', 'less',
5894                      'greater_equal', 'less_equal',
5895                      'logical_and', 'logical_or', 'logical_not',
5896                      'unary_negate', 'not1', 'binary_negate', 'not2',
5897                      'bind1st', 'bind2nd',
5898                      'pointer_to_unary_function',
5899                      'pointer_to_binary_function',
5900                      'ptr_fun',
5901                      'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
5902                      'mem_fun_ref_t',
5903                      'const_mem_fun_t', 'const_mem_fun1_t',
5904                      'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
5905                      'mem_fun_ref',
5906                     )),
5907    ('<limits>', ('numeric_limits',)),
5908    ('<list>', ('list',)),
5909    ('<map>', ('multimap',)),
5910    ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
5911                  'unique_ptr', 'weak_ptr')),
5912    ('<queue>', ('queue', 'priority_queue',)),
5913    ('<set>', ('multiset',)),
5914    ('<stack>', ('stack',)),
5915    ('<string>', ('char_traits', 'basic_string',)),
5916    ('<tuple>', ('tuple',)),
5917    ('<unordered_map>', ('unordered_map', 'unordered_multimap')),
5918    ('<unordered_set>', ('unordered_set', 'unordered_multiset')),
5919    ('<utility>', ('pair',)),
5920    ('<vector>', ('vector',)),
5921
5922    # gcc extensions.
5923    # Note: std::hash is their hash, ::hash is our hash
5924    ('<hash_map>', ('hash_map', 'hash_multimap',)),
5925    ('<hash_set>', ('hash_set', 'hash_multiset',)),
5926    ('<slist>', ('slist',)),
5927    )
5928
5929_HEADERS_MAYBE_TEMPLATES = (
5930    ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
5931                     'transform',
5932                    )),
5933    ('<utility>', ('forward', 'make_pair', 'move', 'swap')),
5934    )
5935
5936_RE_PATTERN_STRING = re.compile(r'\bstring\b')
5937
5938_re_pattern_headers_maybe_templates = []
5939for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
5940  for _template in _templates:
5941    # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
5942    # 'type::max()'.
5943    _re_pattern_headers_maybe_templates.append(
5944        (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
5945            _template,
5946            _header))
5947# Match set<type>, but not foo->set<type>, foo.set<type>
5948_re_pattern_headers_maybe_templates.append(
5949    (re.compile(r'[^>.]\bset\s*\<'),
5950        'set<>',
5951        '<set>'))
5952# Match 'map<type> var' and 'std::map<type>(...)', but not 'map<type>(...)''
5953_re_pattern_headers_maybe_templates.append(
5954    (re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'),
5955        'map<>',
5956        '<map>'))
5957
5958# Other scripts may reach in and modify this pattern.
5959_re_pattern_templates = []
5960for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
5961  for _template in _templates:
5962    _re_pattern_templates.append(
5963        (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
5964         _template + '<>',
5965         _header))
5966
5967
5968def FilesBelongToSameModule(filename_cc, filename_h):
5969  """Check if these two filenames belong to the same module.
5970
5971  The concept of a 'module' here is a as follows:
5972  foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
5973  same 'module' if they are in the same directory.
5974  some/path/public/xyzzy and some/path/internal/xyzzy are also considered
5975  to belong to the same module here.
5976
5977  If the filename_cc contains a longer path than the filename_h, for example,
5978  '/absolute/path/to/base/sysinfo.cc', and this file would include
5979  'base/sysinfo.h', this function also produces the prefix needed to open the
5980  header. This is used by the caller of this function to more robustly open the
5981  header file. We don't have access to the real include paths in this context,
5982  so we need this guesswork here.
5983
5984  Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
5985  according to this implementation. Because of this, this function gives
5986  some false positives. This should be sufficiently rare in practice.
5987
5988  Args:
5989    filename_cc: is the path for the source (e.g. .cc) file
5990    filename_h: is the path for the header path
5991
5992  Returns:
5993    Tuple with a bool and a string:
5994    bool: True if filename_cc and filename_h belong to the same module.
5995    string: the additional prefix needed to open the header file.
5996  """
5997  fileinfo_cc = FileInfo(filename_cc)
5998  if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
5999    return (False, '')
6000
6001  fileinfo_h = FileInfo(filename_h)
6002  if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')):
6003    return (False, '')
6004
6005  filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
6006  matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
6007  if matched_test_suffix:
6008    filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
6009
6010  filename_cc = filename_cc.replace('/public/', '/')
6011  filename_cc = filename_cc.replace('/internal/', '/')
6012
6013  filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
6014  if filename_h.endswith('-inl'):
6015    filename_h = filename_h[:-len('-inl')]
6016  filename_h = filename_h.replace('/public/', '/')
6017  filename_h = filename_h.replace('/internal/', '/')
6018
6019  files_belong_to_same_module = filename_cc.endswith(filename_h)
6020  common_path = ''
6021  if files_belong_to_same_module:
6022    common_path = filename_cc[:-len(filename_h)]
6023  return files_belong_to_same_module, common_path
6024
6025
6026def UpdateIncludeState(filename, include_dict, io=codecs):
6027  """Fill up the include_dict with new includes found from the file.
6028
6029  Args:
6030    filename: the name of the header to read.
6031    include_dict: a dictionary in which the headers are inserted.
6032    io: The io factory to use to read the file. Provided for testability.
6033
6034  Returns:
6035    True if a header was successfully added. False otherwise.
6036  """
6037  headerfile = None
6038  try:
6039    with io.open(filename, 'r', 'utf8', 'replace') as headerfile:
6040      linenum = 0
6041      for line in headerfile:
6042        linenum += 1
6043        clean_line = CleanseComments(line)
6044        match = _RE_PATTERN_INCLUDE.search(clean_line)
6045        if match:
6046          include = match.group(2)
6047          include_dict.setdefault(include, linenum)
6048    return True
6049  except IOError:
6050    return False
6051
6052
6053
6054def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
6055                              io=codecs):
6056  """Reports for missing stl includes.
6057
6058  This function will output warnings to make sure you are including the headers
6059  necessary for the stl containers and functions that you use. We only give one
6060  reason to include a header. For example, if you use both equal_to<> and
6061  less<> in a .h file, only one (the latter in the file) of these will be
6062  reported as a reason to include the <functional>.
6063
6064  Args:
6065    filename: The name of the current file.
6066    clean_lines: A CleansedLines instance containing the file.
6067    include_state: An _IncludeState instance.
6068    error: The function to call with any errors found.
6069    io: The IO factory to use to read the header file. Provided for unittest
6070        injection.
6071  """
6072  required = {}  # A map of header name to linenumber and the template entity.
6073                 # Example of required: { '<functional>': (1219, 'less<>') }
6074
6075  for linenum in xrange(clean_lines.NumLines()):
6076    line = clean_lines.elided[linenum]
6077    if not line or line[0] == '#':
6078      continue
6079
6080    # String is special -- it is a non-templatized type in STL.
6081    matched = _RE_PATTERN_STRING.search(line)
6082    if matched:
6083      # Don't warn about strings in non-STL namespaces:
6084      # (We check only the first match per line; good enough.)
6085      prefix = line[:matched.start()]
6086      if prefix.endswith('std::') or not prefix.endswith('::'):
6087        required['<string>'] = (linenum, 'string')
6088
6089    for pattern, template, header in _re_pattern_headers_maybe_templates:
6090      if pattern.search(line):
6091        required[header] = (linenum, template)
6092
6093    # The following function is just a speed up, no semantics are changed.
6094    if not '<' in line:  # Reduces the cpu time usage by skipping lines.
6095      continue
6096
6097    for pattern, template, header in _re_pattern_templates:
6098      matched = pattern.search(line)
6099      if matched:
6100        # Don't warn about IWYU in non-STL namespaces:
6101        # (We check only the first match per line; good enough.)
6102        prefix = line[:matched.start()]
6103        if prefix.endswith('std::') or not prefix.endswith('::'):
6104          required[header] = (linenum, template)
6105
6106  # The policy is that if you #include something in foo.h you don't need to
6107  # include it again in foo.cc. Here, we will look at possible includes.
6108  # Let's flatten the include_state include_list and copy it into a dictionary.
6109  include_dict = dict([item for sublist in include_state.include_list
6110                       for item in sublist])
6111
6112  # Did we find the header for this file (if any) and successfully load it?
6113  header_found = False
6114
6115  # Use the absolute path so that matching works properly.
6116  abs_filename = FileInfo(filename).FullName()
6117
6118  # For Emacs's flymake.
6119  # If cpplint is invoked from Emacs's flymake, a temporary file is generated
6120  # by flymake and that file name might end with '_flymake.cc'. In that case,
6121  # restore original file name here so that the corresponding header file can be
6122  # found.
6123  # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
6124  # instead of 'foo_flymake.h'
6125  abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
6126
6127  # include_dict is modified during iteration, so we iterate over a copy of
6128  # the keys.
6129  header_keys = list(include_dict.keys())
6130  for header in header_keys:
6131    (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
6132    fullpath = common_path + header
6133    if same_module and UpdateIncludeState(fullpath, include_dict, io):
6134      header_found = True
6135
6136  # If we can't find the header file for a .cc, assume it's because we don't
6137  # know where to look. In that case we'll give up as we're not sure they
6138  # didn't include it in the .h file.
6139  # TODO(unknown): Do a better job of finding .h files so we are confident that
6140  # not having the .h file means there isn't one.
6141  if not header_found:
6142    for extension in GetNonHeaderExtensions():
6143      if filename.endswith('.' + extension):
6144        return
6145
6146  # All the lines have been processed, report the errors found.
6147  for required_header_unstripped in sorted(required, key=required.__getitem__):
6148    template = required[required_header_unstripped][1]
6149    if required_header_unstripped.strip('<>"') not in include_dict:
6150      error(filename, required[required_header_unstripped][0],
6151            'build/include_what_you_use', 4,
6152            'Add #include ' + required_header_unstripped + ' for ' + template)
6153
6154
6155_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
6156
6157
6158def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
6159  """Check that make_pair's template arguments are deduced.
6160
6161  G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
6162  specified explicitly, and such use isn't intended in any case.
6163
6164  Args:
6165    filename: The name of the current file.
6166    clean_lines: A CleansedLines instance containing the file.
6167    linenum: The number of the line to check.
6168    error: The function to call with any errors found.
6169  """
6170  line = clean_lines.elided[linenum]
6171  match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
6172  if match:
6173    error(filename, linenum, 'build/explicit_make_pair',
6174          4,  # 4 = high confidence
6175          'For C++11-compatibility, omit template arguments from make_pair'
6176          ' OR use pair directly OR if appropriate, construct a pair directly')
6177
6178
6179def CheckRedundantVirtual(filename, clean_lines, linenum, error):
6180  """Check if line contains a redundant "virtual" function-specifier.
6181
6182  Args:
6183    filename: The name of the current file.
6184    clean_lines: A CleansedLines instance containing the file.
6185    linenum: The number of the line to check.
6186    error: The function to call with any errors found.
6187  """
6188  # Look for "virtual" on current line.
6189  line = clean_lines.elided[linenum]
6190  virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
6191  if not virtual: return
6192
6193  # Ignore "virtual" keywords that are near access-specifiers.  These
6194  # are only used in class base-specifier and do not apply to member
6195  # functions.
6196  if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
6197      Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
6198    return
6199
6200  # Ignore the "virtual" keyword from virtual base classes.  Usually
6201  # there is a column on the same line in these cases (virtual base
6202  # classes are rare in google3 because multiple inheritance is rare).
6203  if Match(r'^.*[^:]:[^:].*$', line): return
6204
6205  # Look for the next opening parenthesis.  This is the start of the
6206  # parameter list (possibly on the next line shortly after virtual).
6207  # TODO(unknown): doesn't work if there are virtual functions with
6208  # decltype() or other things that use parentheses, but csearch suggests
6209  # that this is rare.
6210  end_col = -1
6211  end_line = -1
6212  start_col = len(virtual.group(2))
6213  for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
6214    line = clean_lines.elided[start_line][start_col:]
6215    parameter_list = Match(r'^([^(]*)\(', line)
6216    if parameter_list:
6217      # Match parentheses to find the end of the parameter list
6218      (_, end_line, end_col) = CloseExpression(
6219          clean_lines, start_line, start_col + len(parameter_list.group(1)))
6220      break
6221    start_col = 0
6222
6223  if end_col < 0:
6224    return  # Couldn't find end of parameter list, give up
6225
6226  # Look for "override" or "final" after the parameter list
6227  # (possibly on the next few lines).
6228  for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
6229    line = clean_lines.elided[i][end_col:]
6230    match = Search(r'\b(override|final)\b', line)
6231    if match:
6232      error(filename, linenum, 'readability/inheritance', 4,
6233            ('"virtual" is redundant since function is '
6234             'already declared as "%s"' % match.group(1)))
6235
6236    # Set end_col to check whole lines after we are done with the
6237    # first line.
6238    end_col = 0
6239    if Search(r'[^\w]\s*$', line):
6240      break
6241
6242
6243def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
6244  """Check if line contains a redundant "override" or "final" virt-specifier.
6245
6246  Args:
6247    filename: The name of the current file.
6248    clean_lines: A CleansedLines instance containing the file.
6249    linenum: The number of the line to check.
6250    error: The function to call with any errors found.
6251  """
6252  # Look for closing parenthesis nearby.  We need one to confirm where
6253  # the declarator ends and where the virt-specifier starts to avoid
6254  # false positives.
6255  line = clean_lines.elided[linenum]
6256  declarator_end = line.rfind(')')
6257  if declarator_end >= 0:
6258    fragment = line[declarator_end:]
6259  else:
6260    if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
6261      fragment = line
6262    else:
6263      return
6264
6265  # Check that at most one of "override" or "final" is present, not both
6266  if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
6267    error(filename, linenum, 'readability/inheritance', 4,
6268          ('"override" is redundant since function is '
6269           'already declared as "final"'))
6270
6271
6272
6273
6274# Returns true if we are at a new block, and it is directly
6275# inside of a namespace.
6276def IsBlockInNameSpace(nesting_state, is_forward_declaration):
6277  """Checks that the new block is directly in a namespace.
6278
6279  Args:
6280    nesting_state: The _NestingState object that contains info about our state.
6281    is_forward_declaration: If the class is a forward declared class.
6282  Returns:
6283    Whether or not the new block is directly in a namespace.
6284  """
6285  if is_forward_declaration:
6286    return len(nesting_state.stack) >= 1 and (
6287      isinstance(nesting_state.stack[-1], _NamespaceInfo))
6288
6289
6290  return (len(nesting_state.stack) > 1 and
6291          nesting_state.stack[-1].check_namespace_indentation and
6292          isinstance(nesting_state.stack[-2], _NamespaceInfo))
6293
6294
6295def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
6296                                    raw_lines_no_comments, linenum):
6297  """This method determines if we should apply our namespace indentation check.
6298
6299  Args:
6300    nesting_state: The current nesting state.
6301    is_namespace_indent_item: If we just put a new class on the stack, True.
6302      If the top of the stack is not a class, or we did not recently
6303      add the class, False.
6304    raw_lines_no_comments: The lines without the comments.
6305    linenum: The current line number we are processing.
6306
6307  Returns:
6308    True if we should apply our namespace indentation check. Currently, it
6309    only works for classes and namespaces inside of a namespace.
6310  """
6311
6312  is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
6313                                                     linenum)
6314
6315  if not (is_namespace_indent_item or is_forward_declaration):
6316    return False
6317
6318  # If we are in a macro, we do not want to check the namespace indentation.
6319  if IsMacroDefinition(raw_lines_no_comments, linenum):
6320    return False
6321
6322  return IsBlockInNameSpace(nesting_state, is_forward_declaration)
6323
6324
6325# Call this method if the line is directly inside of a namespace.
6326# If the line above is blank (excluding comments) or the start of
6327# an inner namespace, it cannot be indented.
6328def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
6329                                    error):
6330  line = raw_lines_no_comments[linenum]
6331  if Match(r'^\s+', line):
6332    error(filename, linenum, 'runtime/indentation_namespace', 4,
6333          'Do not indent within a namespace')
6334
6335
6336def ProcessLine(filename, file_extension, clean_lines, line,
6337                include_state, function_state, nesting_state, error,
6338                extra_check_functions=None):
6339  """Processes a single line in the file.
6340
6341  Args:
6342    filename: Filename of the file that is being processed.
6343    file_extension: The extension (dot not included) of the file.
6344    clean_lines: An array of strings, each representing a line of the file,
6345                 with comments stripped.
6346    line: Number of line being processed.
6347    include_state: An _IncludeState instance in which the headers are inserted.
6348    function_state: A _FunctionState instance which counts function lines, etc.
6349    nesting_state: A NestingState instance which maintains information about
6350                   the current stack of nested blocks being parsed.
6351    error: A callable to which errors are reported, which takes 4 arguments:
6352           filename, line number, error level, and message
6353    extra_check_functions: An array of additional check functions that will be
6354                           run on each source line. Each function takes 4
6355                           arguments: filename, clean_lines, line, error
6356  """
6357  raw_lines = clean_lines.raw_lines
6358  ParseNolintSuppressions(filename, raw_lines[line], line, error)
6359  nesting_state.Update(filename, clean_lines, line, error)
6360  CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
6361                               error)
6362  if nesting_state.InAsmBlock(): return
6363  CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
6364  CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
6365  CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
6366  CheckLanguage(filename, clean_lines, line, file_extension, include_state,
6367                nesting_state, error)
6368  CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
6369  CheckForNonStandardConstructs(filename, clean_lines, line,
6370                                nesting_state, error)
6371  CheckVlogArguments(filename, clean_lines, line, error)
6372  CheckPosixThreading(filename, clean_lines, line, error)
6373  CheckInvalidIncrement(filename, clean_lines, line, error)
6374  CheckMakePairUsesDeduction(filename, clean_lines, line, error)
6375  CheckRedundantVirtual(filename, clean_lines, line, error)
6376  CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
6377  if extra_check_functions:
6378    for check_fn in extra_check_functions:
6379      check_fn(filename, clean_lines, line, error)
6380
6381def FlagCxx11Features(filename, clean_lines, linenum, error):
6382  """Flag those c++11 features that we only allow in certain places.
6383
6384  Args:
6385    filename: The name of the current file.
6386    clean_lines: A CleansedLines instance containing the file.
6387    linenum: The number of the line to check.
6388    error: The function to call with any errors found.
6389  """
6390  line = clean_lines.elided[linenum]
6391
6392  include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
6393
6394  # Flag unapproved C++ TR1 headers.
6395  if include and include.group(1).startswith('tr1/'):
6396    error(filename, linenum, 'build/c++tr1', 5,
6397          ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
6398
6399  # Flag unapproved C++11 headers.
6400  if include and include.group(1) in ('cfenv',
6401                                      'condition_variable',
6402                                      'fenv.h',
6403                                      'future',
6404                                      'mutex',
6405                                      'thread',
6406                                      'chrono',
6407                                      'ratio',
6408                                      'regex',
6409                                      'system_error',
6410                                     ):
6411    error(filename, linenum, 'build/c++11', 5,
6412          ('<%s> is an unapproved C++11 header.') % include.group(1))
6413
6414  # The only place where we need to worry about C++11 keywords and library
6415  # features in preprocessor directives is in macro definitions.
6416  if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
6417
6418  # These are classes and free functions.  The classes are always
6419  # mentioned as std::*, but we only catch the free functions if
6420  # they're not found by ADL.  They're alphabetical by header.
6421  for top_name in (
6422      # type_traits
6423      'alignment_of',
6424      'aligned_union',
6425      ):
6426    if Search(r'\bstd::%s\b' % top_name, line):
6427      error(filename, linenum, 'build/c++11', 5,
6428            ('std::%s is an unapproved C++11 class or function.  Send c-style '
6429             'an example of where it would make your code more readable, and '
6430             'they may let you use it.') % top_name)
6431
6432
6433def FlagCxx14Features(filename, clean_lines, linenum, error):
6434  """Flag those C++14 features that we restrict.
6435
6436  Args:
6437    filename: The name of the current file.
6438    clean_lines: A CleansedLines instance containing the file.
6439    linenum: The number of the line to check.
6440    error: The function to call with any errors found.
6441  """
6442  line = clean_lines.elided[linenum]
6443
6444  include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
6445
6446  # Flag unapproved C++14 headers.
6447  if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
6448    error(filename, linenum, 'build/c++14', 5,
6449          ('<%s> is an unapproved C++14 header.') % include.group(1))
6450
6451
6452def ProcessFileData(filename, file_extension, lines, error,
6453                    extra_check_functions=None):
6454  """Performs lint checks and reports any errors to the given error function.
6455
6456  Args:
6457    filename: Filename of the file that is being processed.
6458    file_extension: The extension (dot not included) of the file.
6459    lines: An array of strings, each representing a line of the file, with the
6460           last element being empty if the file is terminated with a newline.
6461    error: A callable to which errors are reported, which takes 4 arguments:
6462           filename, line number, error level, and message
6463    extra_check_functions: An array of additional check functions that will be
6464                           run on each source line. Each function takes 4
6465                           arguments: filename, clean_lines, line, error
6466  """
6467  lines = (['// marker so line numbers and indices both start at 1'] + lines +
6468           ['// marker so line numbers end in a known way'])
6469
6470  include_state = _IncludeState()
6471  function_state = _FunctionState()
6472  nesting_state = NestingState()
6473
6474  ResetNolintSuppressions()
6475
6476  CheckForCopyright(filename, lines, error)
6477  ProcessGlobalSuppresions(lines)
6478  RemoveMultiLineComments(filename, lines, error)
6479  clean_lines = CleansedLines(lines)
6480
6481  if IsHeaderExtension(file_extension):
6482    CheckForHeaderGuard(filename, clean_lines, error)
6483
6484  for line in xrange(clean_lines.NumLines()):
6485    ProcessLine(filename, file_extension, clean_lines, line,
6486                include_state, function_state, nesting_state, error,
6487                extra_check_functions)
6488    FlagCxx11Features(filename, clean_lines, line, error)
6489  nesting_state.CheckCompletedBlocks(filename, error)
6490
6491  CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
6492
6493  # Check that the .cc file has included its header if it exists.
6494  if _IsSourceExtension(file_extension):
6495    CheckHeaderFileIncluded(filename, include_state, error)
6496
6497  # We check here rather than inside ProcessLine so that we see raw
6498  # lines rather than "cleaned" lines.
6499  CheckForBadCharacters(filename, lines, error)
6500
6501  CheckForNewlineAtEOF(filename, lines, error)
6502
6503def ProcessConfigOverrides(filename):
6504  """ Loads the configuration files and processes the config overrides.
6505
6506  Args:
6507    filename: The name of the file being processed by the linter.
6508
6509  Returns:
6510    False if the current |filename| should not be processed further.
6511  """
6512
6513  abs_filename = os.path.abspath(filename)
6514  cfg_filters = []
6515  keep_looking = True
6516  while keep_looking:
6517    abs_path, base_name = os.path.split(abs_filename)
6518    if not base_name:
6519      break  # Reached the root directory.
6520
6521    cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
6522    abs_filename = abs_path
6523    if not os.path.isfile(cfg_file):
6524      continue
6525
6526    try:
6527      with open(cfg_file) as file_handle:
6528        for line in file_handle:
6529          line, _, _ = line.partition('#')  # Remove comments.
6530          if not line.strip():
6531            continue
6532
6533          name, _, val = line.partition('=')
6534          name = name.strip()
6535          val = val.strip()
6536          if name == 'set noparent':
6537            keep_looking = False
6538          elif name == 'filter':
6539            cfg_filters.append(val)
6540          elif name == 'exclude_files':
6541            # When matching exclude_files pattern, use the base_name of
6542            # the current file name or the directory name we are processing.
6543            # For example, if we are checking for lint errors in /foo/bar/baz.cc
6544            # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
6545            # file's "exclude_files" filter is meant to be checked against "bar"
6546            # and not "baz" nor "bar/baz.cc".
6547            if base_name:
6548              pattern = re.compile(val)
6549              if pattern.match(base_name):
6550                if _cpplint_state.quiet:
6551                  # Suppress "Ignoring file" warning when using --quiet.
6552                  return False
6553                _cpplint_state.PrintInfo('Ignoring "%s": file excluded by "%s". '
6554                                 'File path component "%s" matches '
6555                                 'pattern "%s"\n' %
6556                                 (filename, cfg_file, base_name, val))
6557                return False
6558          elif name == 'linelength':
6559            global _line_length
6560            try:
6561              _line_length = int(val)
6562            except ValueError:
6563              _cpplint_state.PrintError('Line length must be numeric.')
6564          elif name == 'extensions':
6565            ProcessExtensionsOption(val)
6566          elif name == 'root':
6567            global _root
6568            # root directories are specified relative to CPPLINT.cfg dir.
6569            _root = os.path.join(os.path.dirname(cfg_file), val)
6570          elif name == 'headers':
6571            ProcessHppHeadersOption(val)
6572          elif name == 'includeorder':
6573            ProcessIncludeOrderOption(val)
6574          else:
6575            _cpplint_state.PrintError(
6576                'Invalid configuration option (%s) in file %s\n' %
6577                (name, cfg_file))
6578
6579    except IOError:
6580      _cpplint_state.PrintError(
6581          "Skipping config file '%s': Can't open for reading\n" % cfg_file)
6582      keep_looking = False
6583
6584  # Apply all the accumulated filters in reverse order (top-level directory
6585  # config options having the least priority).
6586  for cfg_filter in reversed(cfg_filters):
6587    _AddFilters(cfg_filter)
6588
6589  return True
6590
6591
6592def ProcessFile(filename, vlevel, extra_check_functions=None):
6593  """Does google-lint on a single file.
6594
6595  Args:
6596    filename: The name of the file to parse.
6597
6598    vlevel: The level of errors to report.  Every error of confidence
6599    >= verbose_level will be reported.  0 is a good default.
6600
6601    extra_check_functions: An array of additional check functions that will be
6602                           run on each source line. Each function takes 4
6603                           arguments: filename, clean_lines, line, error
6604  """
6605
6606  _SetVerboseLevel(vlevel)
6607  _BackupFilters()
6608  old_errors = _cpplint_state.error_count
6609
6610  if not ProcessConfigOverrides(filename):
6611    _RestoreFilters()
6612    return
6613
6614  lf_lines = []
6615  crlf_lines = []
6616  try:
6617    # Support the UNIX convention of using "-" for stdin.  Note that
6618    # we are not opening the file with universal newline support
6619    # (which codecs doesn't support anyway), so the resulting lines do
6620    # contain trailing '\r' characters if we are reading a file that
6621    # has CRLF endings.
6622    # If after the split a trailing '\r' is present, it is removed
6623    # below.
6624    if filename == '-':
6625      lines = codecs.StreamReaderWriter(sys.stdin,
6626                                        codecs.getreader('utf8'),
6627                                        codecs.getwriter('utf8'),
6628                                        'replace').read().split('\n')
6629    else:
6630      with codecs.open(filename, 'r', 'utf8', 'replace') as target_file:
6631        lines = target_file.read().split('\n')
6632
6633    # Remove trailing '\r'.
6634    # The -1 accounts for the extra trailing blank line we get from split()
6635    for linenum in range(len(lines) - 1):
6636      if lines[linenum].endswith('\r'):
6637        lines[linenum] = lines[linenum].rstrip('\r')
6638        crlf_lines.append(linenum + 1)
6639      else:
6640        lf_lines.append(linenum + 1)
6641
6642  except IOError:
6643    _cpplint_state.PrintError(
6644        "Skipping input '%s': Can't open for reading\n" % filename)
6645    _RestoreFilters()
6646    return
6647
6648  # Note, if no dot is found, this will give the entire filename as the ext.
6649  file_extension = filename[filename.rfind('.') + 1:]
6650
6651  # When reading from stdin, the extension is unknown, so no cpplint tests
6652  # should rely on the extension.
6653  if filename != '-' and file_extension not in GetAllExtensions():
6654    _cpplint_state.PrintError('Ignoring %s; not a valid file name '
6655                     '(%s)\n' % (filename, ', '.join(GetAllExtensions())))
6656  else:
6657    ProcessFileData(filename, file_extension, lines, Error,
6658                    extra_check_functions)
6659
6660    # If end-of-line sequences are a mix of LF and CR-LF, issue
6661    # warnings on the lines with CR.
6662    #
6663    # Don't issue any warnings if all lines are uniformly LF or CR-LF,
6664    # since critique can handle these just fine, and the style guide
6665    # doesn't dictate a particular end of line sequence.
6666    #
6667    # We can't depend on os.linesep to determine what the desired
6668    # end-of-line sequence should be, since that will return the
6669    # server-side end-of-line sequence.
6670    if lf_lines and crlf_lines:
6671      # Warn on every line with CR.  An alternative approach might be to
6672      # check whether the file is mostly CRLF or just LF, and warn on the
6673      # minority, we bias toward LF here since most tools prefer LF.
6674      for linenum in crlf_lines:
6675        Error(filename, linenum, 'whitespace/newline', 1,
6676              'Unexpected \\r (^M) found; better to use only \\n')
6677
6678  # Suppress printing anything if --quiet was passed unless the error
6679  # count has increased after processing this file.
6680  if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
6681    _cpplint_state.PrintInfo('Done processing %s\n' % filename)
6682  _RestoreFilters()
6683
6684
6685def PrintUsage(message):
6686  """Prints a brief usage string and exits, optionally with an error message.
6687
6688  Args:
6689    message: The optional error message.
6690  """
6691  sys.stderr.write(_USAGE  % (sorted(list(GetAllExtensions())),
6692       ','.join(sorted(list(GetAllExtensions()))),
6693       sorted(GetHeaderExtensions()),
6694       ','.join(sorted(GetHeaderExtensions()))))
6695
6696  if message:
6697    sys.exit('\nFATAL ERROR: ' + message)
6698  else:
6699    sys.exit(0)
6700
6701def PrintVersion():
6702  sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n')
6703  sys.stdout.write('cpplint ' + __VERSION__ + '\n')
6704  sys.stdout.write('Python ' + sys.version + '\n')
6705  sys.exit(0)
6706
6707def PrintCategories():
6708  """Prints a list of all the error-categories used by error messages.
6709
6710  These are the categories used to filter messages via --filter.
6711  """
6712  sys.stderr.write(''.join('  %s\n' % cat for cat in _ERROR_CATEGORIES))
6713  sys.exit(0)
6714
6715
6716def ParseArguments(args):
6717  """Parses the command line arguments.
6718
6719  This may set the output format and verbosity level as side-effects.
6720
6721  Args:
6722    args: The command line arguments:
6723
6724  Returns:
6725    The list of filenames to lint.
6726  """
6727  try:
6728    (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
6729                                                 'v=',
6730                                                 'version',
6731                                                 'counting=',
6732                                                 'filter=',
6733                                                 'root=',
6734                                                 'repository=',
6735                                                 'linelength=',
6736                                                 'extensions=',
6737                                                 'exclude=',
6738                                                 'recursive',
6739                                                 'headers=',
6740                                                 'includeorder=',
6741                                                 'quiet'])
6742  except getopt.GetoptError:
6743    PrintUsage('Invalid arguments.')
6744
6745  verbosity = _VerboseLevel()
6746  output_format = _OutputFormat()
6747  filters = ''
6748  quiet = _Quiet()
6749  counting_style = ''
6750  recursive = False
6751
6752  for (opt, val) in opts:
6753    if opt == '--help':
6754      PrintUsage(None)
6755    if opt == '--version':
6756      PrintVersion()
6757    elif opt == '--output':
6758      if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'):
6759        PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
6760                   'sed, gsed and junit.')
6761      output_format = val
6762    elif opt == '--quiet':
6763      quiet = True
6764    elif opt == '--verbose' or opt == '--v':
6765      verbosity = int(val)
6766    elif opt == '--filter':
6767      filters = val
6768      if not filters:
6769        PrintCategories()
6770    elif opt == '--counting':
6771      if val not in ('total', 'toplevel', 'detailed'):
6772        PrintUsage('Valid counting options are total, toplevel, and detailed')
6773      counting_style = val
6774    elif opt == '--root':
6775      global _root
6776      _root = val
6777    elif opt == '--repository':
6778      global _repository
6779      _repository = val
6780    elif opt == '--linelength':
6781      global _line_length
6782      try:
6783        _line_length = int(val)
6784      except ValueError:
6785        PrintUsage('Line length must be digits.')
6786    elif opt == '--exclude':
6787      global _excludes
6788      if not _excludes:
6789        _excludes = set()
6790      _excludes.update(glob.glob(val))
6791    elif opt == '--extensions':
6792      ProcessExtensionsOption(val)
6793    elif opt == '--headers':
6794      ProcessHppHeadersOption(val)
6795    elif opt == '--recursive':
6796      recursive = True
6797    elif opt == '--includeorder':
6798      ProcessIncludeOrderOption(val)
6799
6800  if not filenames:
6801    PrintUsage('No files were specified.')
6802
6803  if recursive:
6804    filenames = _ExpandDirectories(filenames)
6805
6806  if _excludes:
6807    filenames = _FilterExcludedFiles(filenames)
6808
6809  _SetOutputFormat(output_format)
6810  _SetQuiet(quiet)
6811  _SetVerboseLevel(verbosity)
6812  _SetFilters(filters)
6813  _SetCountingStyle(counting_style)
6814
6815  filenames.sort()
6816  return filenames
6817
6818def _ExpandDirectories(filenames):
6819  """Searches a list of filenames and replaces directories in the list with
6820  all files descending from those directories. Files with extensions not in
6821  the valid extensions list are excluded.
6822
6823  Args:
6824    filenames: A list of files or directories
6825
6826  Returns:
6827    A list of all files that are members of filenames or descended from a
6828    directory in filenames
6829  """
6830  expanded = set()
6831  for filename in filenames:
6832    if not os.path.isdir(filename):
6833      expanded.add(filename)
6834      continue
6835
6836    for root, _, files in os.walk(filename):
6837      for loopfile in files:
6838        fullname = os.path.join(root, loopfile)
6839        if fullname.startswith('.' + os.path.sep):
6840          fullname = fullname[len('.' + os.path.sep):]
6841        expanded.add(fullname)
6842
6843  filtered = []
6844  for filename in expanded:
6845    if os.path.splitext(filename)[1][1:] in GetAllExtensions():
6846      filtered.append(filename)
6847  return filtered
6848
6849def _FilterExcludedFiles(fnames):
6850  """Filters out files listed in the --exclude command line switch. File paths
6851  in the switch are evaluated relative to the current working directory
6852  """
6853  exclude_paths = [os.path.abspath(f) for f in _excludes]
6854  # because globbing does not work recursively, exclude all subpath of all excluded entries
6855  return [f for f in fnames
6856          if not any(e for e in exclude_paths
6857                  if _IsParentOrSame(e, os.path.abspath(f)))]
6858
6859def _IsParentOrSame(parent, child):
6860  """Return true if child is subdirectory of parent.
6861  Assumes both paths are absolute and don't contain symlinks.
6862  """
6863  parent = os.path.normpath(parent)
6864  child = os.path.normpath(child)
6865  if parent == child:
6866    return True
6867
6868  prefix = os.path.commonprefix([parent, child])
6869  if prefix != parent:
6870    return False
6871  # Note: os.path.commonprefix operates on character basis, so
6872  # take extra care of situations like '/foo/ba' and '/foo/bar/baz'
6873  child_suffix = child[len(prefix):]
6874  child_suffix = child_suffix.lstrip(os.sep)
6875  return child == os.path.join(prefix, child_suffix)
6876
6877def main():
6878  filenames = ParseArguments(sys.argv[1:])
6879  backup_err = sys.stderr
6880  try:
6881    # Change stderr to write with replacement characters so we don't die
6882    # if we try to print something containing non-ASCII characters.
6883    sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
6884
6885    _cpplint_state.ResetErrorCounts()
6886    for filename in filenames:
6887      ProcessFile(filename, _cpplint_state.verbose_level)
6888    # If --quiet is passed, suppress printing error count unless there are errors.
6889    if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
6890      _cpplint_state.PrintErrorCounts()
6891
6892    if _cpplint_state.output_format == 'junit':
6893      sys.stderr.write(_cpplint_state.FormatJUnitXML())
6894
6895  finally:
6896    sys.stderr = backup_err
6897
6898  sys.exit(_cpplint_state.error_count > 0)
6899
6900
6901if __name__ == '__main__':
6902  main()
6903