1 //===--- Lexer.h - C Language Family Lexer ----------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the Lexer interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_CLANG_LEXER_H 15 #define LLVM_CLANG_LEXER_H 16 17 #include "clang/Lex/PreprocessorLexer.h" 18 #include "clang/Basic/LangOptions.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include <string> 21 #include <cassert> 22 23 namespace clang { 24 class DiagnosticsEngine; 25 class SourceManager; 26 class Preprocessor; 27 class DiagnosticBuilder; 28 29 /// ConflictMarkerKind - Kinds of conflict marker which the lexer might be 30 /// recovering from. 31 enum ConflictMarkerKind { 32 /// Not within a conflict marker. 33 CMK_None, 34 /// A normal or diff3 conflict marker, initiated by at least 7 <s, 35 /// separated by at least 7 =s or |s, and terminated by at least 7 >s. 36 CMK_Normal, 37 /// A Perforce-style conflict marker, initiated by 4 >s, separated by 4 =s, 38 /// and terminated by 4 <s. 39 CMK_Perforce 40 }; 41 42 /// Lexer - This provides a simple interface that turns a text buffer into a 43 /// stream of tokens. This provides no support for file reading or buffering, 44 /// or buffering/seeking of tokens, only forward lexing is supported. It relies 45 /// on the specified Preprocessor object to handle preprocessor directives, etc. 46 class Lexer : public PreprocessorLexer { 47 virtual void anchor(); 48 49 //===--------------------------------------------------------------------===// 50 // Constant configuration values for this lexer. 51 const char *BufferStart; // Start of the buffer. 52 const char *BufferEnd; // End of the buffer. 53 SourceLocation FileLoc; // Location for start of file. 54 LangOptions LangOpts; // LangOpts enabled by this language (cache). 55 bool Is_PragmaLexer; // True if lexer for _Pragma handling. 56 57 //===--------------------------------------------------------------------===// 58 // Context-specific lexing flags set by the preprocessor. 59 // 60 61 /// ExtendedTokenMode - The lexer can optionally keep comments and whitespace 62 /// and return them as tokens. This is used for -C and -CC modes, and 63 /// whitespace preservation can be useful for some clients that want to lex 64 /// the file in raw mode and get every character from the file. 65 /// 66 /// When this is set to 2 it returns comments and whitespace. When set to 1 67 /// it returns comments, when it is set to 0 it returns normal tokens only. 68 unsigned char ExtendedTokenMode; 69 70 //===--------------------------------------------------------------------===// 71 // Context that changes as the file is lexed. 72 // NOTE: any state that mutates when in raw mode must have save/restore code 73 // in Lexer::isNextPPTokenLParen. 74 75 // BufferPtr - Current pointer into the buffer. This is the next character 76 // to be lexed. 77 const char *BufferPtr; 78 79 // IsAtStartOfLine - True if the next lexed token should get the "start of 80 // line" flag set on it. 81 bool IsAtStartOfLine; 82 83 // CurrentConflictMarkerState - The kind of conflict marker we are handling. 84 ConflictMarkerKind CurrentConflictMarkerState; 85 86 Lexer(const Lexer&); // DO NOT IMPLEMENT 87 void operator=(const Lexer&); // DO NOT IMPLEMENT 88 friend class Preprocessor; 89 90 void InitLexer(const char *BufStart, const char *BufPtr, const char *BufEnd); 91 public: 92 93 /// Lexer constructor - Create a new lexer object for the specified buffer 94 /// with the specified preprocessor managing the lexing process. This lexer 95 /// assumes that the associated file buffer and Preprocessor objects will 96 /// outlive it, so it doesn't take ownership of either of them. 97 Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer, Preprocessor &PP); 98 99 /// Lexer constructor - Create a new raw lexer object. This object is only 100 /// suitable for calls to 'LexRawToken'. This lexer assumes that the text 101 /// range will outlive it, so it doesn't take ownership of it. 102 Lexer(SourceLocation FileLoc, const LangOptions &LangOpts, 103 const char *BufStart, const char *BufPtr, const char *BufEnd); 104 105 /// Lexer constructor - Create a new raw lexer object. This object is only 106 /// suitable for calls to 'LexRawToken'. This lexer assumes that the text 107 /// range will outlive it, so it doesn't take ownership of it. 108 Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer, 109 const SourceManager &SM, const LangOptions &LangOpts); 110 111 /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for 112 /// _Pragma expansion. This has a variety of magic semantics that this method 113 /// sets up. It returns a new'd Lexer that must be delete'd when done. 114 static Lexer *Create_PragmaLexer(SourceLocation SpellingLoc, 115 SourceLocation ExpansionLocStart, 116 SourceLocation ExpansionLocEnd, 117 unsigned TokLen, Preprocessor &PP); 118 119 120 /// getLangOpts - Return the language features currently enabled. 121 /// NOTE: this lexer modifies features as a file is parsed! getLangOpts()122 const LangOptions &getLangOpts() const { return LangOpts; } 123 124 /// getFileLoc - Return the File Location for the file we are lexing out of. 125 /// The physical location encodes the location where the characters come from, 126 /// the virtual location encodes where we should *claim* the characters came 127 /// from. Currently this is only used by _Pragma handling. getFileLoc()128 SourceLocation getFileLoc() const { return FileLoc; } 129 130 /// Lex - Return the next token in the file. If this is the end of file, it 131 /// return the tok::eof token. Return true if an error occurred and 132 /// compilation should terminate, false if normal. This implicitly involves 133 /// the preprocessor. Lex(Token & Result)134 void Lex(Token &Result) { 135 // Start a new token. 136 Result.startToken(); 137 138 // NOTE, any changes here should also change code after calls to 139 // Preprocessor::HandleDirective 140 if (IsAtStartOfLine) { 141 Result.setFlag(Token::StartOfLine); 142 IsAtStartOfLine = false; 143 } 144 145 // Get a token. Note that this may delete the current lexer if the end of 146 // file is reached. 147 LexTokenInternal(Result); 148 } 149 150 /// isPragmaLexer - Returns true if this Lexer is being used to lex a pragma. isPragmaLexer()151 bool isPragmaLexer() const { return Is_PragmaLexer; } 152 153 /// IndirectLex - An indirect call to 'Lex' that can be invoked via 154 /// the PreprocessorLexer interface. IndirectLex(Token & Result)155 void IndirectLex(Token &Result) { Lex(Result); } 156 157 /// LexFromRawLexer - Lex a token from a designated raw lexer (one with no 158 /// associated preprocessor object. Return true if the 'next character to 159 /// read' pointer points at the end of the lexer buffer, false otherwise. LexFromRawLexer(Token & Result)160 bool LexFromRawLexer(Token &Result) { 161 assert(LexingRawMode && "Not already in raw mode!"); 162 Lex(Result); 163 // Note that lexing to the end of the buffer doesn't implicitly delete the 164 // lexer when in raw mode. 165 return BufferPtr == BufferEnd; 166 } 167 168 /// isKeepWhitespaceMode - Return true if the lexer should return tokens for 169 /// every character in the file, including whitespace and comments. This 170 /// should only be used in raw mode, as the preprocessor is not prepared to 171 /// deal with the excess tokens. isKeepWhitespaceMode()172 bool isKeepWhitespaceMode() const { 173 return ExtendedTokenMode > 1; 174 } 175 176 /// SetKeepWhitespaceMode - This method lets clients enable or disable 177 /// whitespace retention mode. SetKeepWhitespaceMode(bool Val)178 void SetKeepWhitespaceMode(bool Val) { 179 assert((!Val || LexingRawMode) && 180 "Can only enable whitespace retention in raw mode"); 181 ExtendedTokenMode = Val ? 2 : 0; 182 } 183 184 /// inKeepCommentMode - Return true if the lexer should return comments as 185 /// tokens. inKeepCommentMode()186 bool inKeepCommentMode() const { 187 return ExtendedTokenMode > 0; 188 } 189 190 /// SetCommentRetentionMode - Change the comment retention mode of the lexer 191 /// to the specified mode. This is really only useful when lexing in raw 192 /// mode, because otherwise the lexer needs to manage this. SetCommentRetentionState(bool Mode)193 void SetCommentRetentionState(bool Mode) { 194 assert(!isKeepWhitespaceMode() && 195 "Can't play with comment retention state when retaining whitespace"); 196 ExtendedTokenMode = Mode ? 1 : 0; 197 } 198 getBufferStart()199 const char *getBufferStart() const { return BufferStart; } 200 201 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an 202 /// uninterpreted string. This switches the lexer out of directive mode. 203 std::string ReadToEndOfLine(); 204 205 206 /// Diag - Forwarding function for diagnostics. This translate a source 207 /// position in the current buffer into a SourceLocation object for rendering. 208 DiagnosticBuilder Diag(const char *Loc, unsigned DiagID) const; 209 210 /// getSourceLocation - Return a source location identifier for the specified 211 /// offset in the current file. 212 SourceLocation getSourceLocation(const char *Loc, unsigned TokLen = 1) const; 213 214 /// getSourceLocation - Return a source location for the next character in 215 /// the current file. getSourceLocation()216 SourceLocation getSourceLocation() { return getSourceLocation(BufferPtr); } 217 218 /// \brief Return the current location in the buffer. getBufferLocation()219 const char *getBufferLocation() const { return BufferPtr; } 220 221 /// Stringify - Convert the specified string into a C string by escaping '\' 222 /// and " characters. This does not add surrounding ""'s to the string. 223 /// If Charify is true, this escapes the ' character instead of ". 224 static std::string Stringify(const std::string &Str, bool Charify = false); 225 226 /// Stringify - Convert the specified string into a C string by escaping '\' 227 /// and " characters. This does not add surrounding ""'s to the string. 228 static void Stringify(SmallVectorImpl<char> &Str); 229 230 231 /// getSpelling - This method is used to get the spelling of a token into a 232 /// preallocated buffer, instead of as an std::string. The caller is required 233 /// to allocate enough space for the token, which is guaranteed to be at least 234 /// Tok.getLength() bytes long. The length of the actual result is returned. 235 /// 236 /// Note that this method may do two possible things: it may either fill in 237 /// the buffer specified with characters, or it may *change the input pointer* 238 /// to point to a constant buffer with the data already in it (avoiding a 239 /// copy). The caller is not allowed to modify the returned buffer pointer 240 /// if an internal buffer is returned. 241 static unsigned getSpelling(const Token &Tok, const char *&Buffer, 242 const SourceManager &SourceMgr, 243 const LangOptions &LangOpts, 244 bool *Invalid = 0); 245 246 /// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a 247 /// token is the characters used to represent the token in the source file 248 /// after trigraph expansion and escaped-newline folding. In particular, this 249 /// wants to get the true, uncanonicalized, spelling of things like digraphs 250 /// UCNs, etc. 251 static std::string getSpelling(const Token &Tok, 252 const SourceManager &SourceMgr, 253 const LangOptions &LangOpts, 254 bool *Invalid = 0); 255 256 /// getSpelling - This method is used to get the spelling of the 257 /// token at the given source location. If, as is usually true, it 258 /// is not necessary to copy any data, then the returned string may 259 /// not point into the provided buffer. 260 /// 261 /// This method lexes at the expansion depth of the given 262 /// location and does not jump to the expansion or spelling 263 /// location. 264 static StringRef getSpelling(SourceLocation loc, 265 SmallVectorImpl<char> &buffer, 266 const SourceManager &SourceMgr, 267 const LangOptions &LangOpts, 268 bool *invalid = 0); 269 270 /// MeasureTokenLength - Relex the token at the specified location and return 271 /// its length in bytes in the input file. If the token needs cleaning (e.g. 272 /// includes a trigraph or an escaped newline) then this count includes bytes 273 /// that are part of that. 274 static unsigned MeasureTokenLength(SourceLocation Loc, 275 const SourceManager &SM, 276 const LangOptions &LangOpts); 277 278 /// \brief Given a location any where in a source buffer, find the location 279 /// that corresponds to the beginning of the token in which the original 280 /// source location lands. 281 /// 282 /// \param Loc 283 static SourceLocation GetBeginningOfToken(SourceLocation Loc, 284 const SourceManager &SM, 285 const LangOptions &LangOpts); 286 287 /// AdvanceToTokenCharacter - If the current SourceLocation specifies a 288 /// location at the start of a token, return a new location that specifies a 289 /// character within the token. This handles trigraphs and escaped newlines. 290 static SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart, 291 unsigned Character, 292 const SourceManager &SM, 293 const LangOptions &LangOpts); 294 295 /// \brief Computes the source location just past the end of the 296 /// token at this source location. 297 /// 298 /// This routine can be used to produce a source location that 299 /// points just past the end of the token referenced by \p Loc, and 300 /// is generally used when a diagnostic needs to point just after a 301 /// token where it expected something different that it received. If 302 /// the returned source location would not be meaningful (e.g., if 303 /// it points into a macro), this routine returns an invalid 304 /// source location. 305 /// 306 /// \param Offset an offset from the end of the token, where the source 307 /// location should refer to. The default offset (0) produces a source 308 /// location pointing just past the end of the token; an offset of 1 produces 309 /// a source location pointing to the last character in the token, etc. 310 static SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset, 311 const SourceManager &SM, 312 const LangOptions &LangOpts); 313 314 /// \brief Returns true if the given MacroID location points at the first 315 /// token of the macro expansion. 316 /// 317 /// \param MacroBegin If non-null and function returns true, it is set to 318 /// begin location of the macro. 319 static bool isAtStartOfMacroExpansion(SourceLocation loc, 320 const SourceManager &SM, 321 const LangOptions &LangOpts, 322 SourceLocation *MacroBegin = 0); 323 324 /// \brief Returns true if the given MacroID location points at the last 325 /// token of the macro expansion. 326 /// 327 /// \param MacroBegin If non-null and function returns true, it is set to 328 /// end location of the macro. 329 static bool isAtEndOfMacroExpansion(SourceLocation loc, 330 const SourceManager &SM, 331 const LangOptions &LangOpts, 332 SourceLocation *MacroEnd = 0); 333 334 /// \brief Accepts a range and returns a character range with file locations. 335 /// 336 /// Returns a null range if a part of the range resides inside a macro 337 /// expansion or the range does not reside on the same FileID. 338 static CharSourceRange makeFileCharRange(CharSourceRange Range, 339 const SourceManager &SM, 340 const LangOptions &LangOpts); 341 342 /// \brief Returns a string for the source that the range encompasses. 343 static StringRef getSourceText(CharSourceRange Range, 344 const SourceManager &SM, 345 const LangOptions &LangOpts, 346 bool *Invalid = 0); 347 348 /// \brief Retrieve the name of the immediate macro expansion. 349 /// 350 /// This routine starts from a source location, and finds the name of the macro 351 /// responsible for its immediate expansion. It looks through any intervening 352 /// macro argument expansions to compute this. It returns a StringRef which 353 /// refers to the SourceManager-owned buffer of the source where that macro 354 /// name is spelled. Thus, the result shouldn't out-live that SourceManager. 355 static StringRef getImmediateMacroName(SourceLocation Loc, 356 const SourceManager &SM, 357 const LangOptions &LangOpts); 358 359 /// \brief Compute the preamble of the given file. 360 /// 361 /// The preamble of a file contains the initial comments, include directives, 362 /// and other preprocessor directives that occur before the code in this 363 /// particular file actually begins. The preamble of the main source file is 364 /// a potential prefix header. 365 /// 366 /// \param Buffer The memory buffer containing the file's contents. 367 /// 368 /// \param MaxLines If non-zero, restrict the length of the preamble 369 /// to fewer than this number of lines. 370 /// 371 /// \returns The offset into the file where the preamble ends and the rest 372 /// of the file begins along with a boolean value indicating whether 373 /// the preamble ends at the beginning of a new line. 374 static std::pair<unsigned, bool> 375 ComputePreamble(const llvm::MemoryBuffer *Buffer, const LangOptions &LangOpts, 376 unsigned MaxLines = 0); 377 378 //===--------------------------------------------------------------------===// 379 // Internal implementation interfaces. 380 private: 381 382 /// LexTokenInternal - Internal interface to lex a preprocessing token. Called 383 /// by Lex. 384 /// 385 void LexTokenInternal(Token &Result); 386 387 /// FormTokenWithChars - When we lex a token, we have identified a span 388 /// starting at BufferPtr, going to TokEnd that forms the token. This method 389 /// takes that range and assigns it to the token as its location and size. In 390 /// addition, since tokens cannot overlap, this also updates BufferPtr to be 391 /// TokEnd. FormTokenWithChars(Token & Result,const char * TokEnd,tok::TokenKind Kind)392 void FormTokenWithChars(Token &Result, const char *TokEnd, 393 tok::TokenKind Kind) { 394 unsigned TokLen = TokEnd-BufferPtr; 395 Result.setLength(TokLen); 396 Result.setLocation(getSourceLocation(BufferPtr, TokLen)); 397 Result.setKind(Kind); 398 BufferPtr = TokEnd; 399 } 400 401 /// isNextPPTokenLParen - Return 1 if the next unexpanded token will return a 402 /// tok::l_paren token, 0 if it is something else and 2 if there are no more 403 /// tokens in the buffer controlled by this lexer. 404 unsigned isNextPPTokenLParen(); 405 406 //===--------------------------------------------------------------------===// 407 // Lexer character reading interfaces. 408 public: 409 410 // This lexer is built on two interfaces for reading characters, both of which 411 // automatically provide phase 1/2 translation. getAndAdvanceChar is used 412 // when we know that we will be reading a character from the input buffer and 413 // that this character will be part of the result token. This occurs in (f.e.) 414 // string processing, because we know we need to read until we find the 415 // closing '"' character. 416 // 417 // The second interface is the combination of getCharAndSize with 418 // ConsumeChar. getCharAndSize reads a phase 1/2 translated character, 419 // returning it and its size. If the lexer decides that this character is 420 // part of the current token, it calls ConsumeChar on it. This two stage 421 // approach allows us to emit diagnostics for characters (e.g. warnings about 422 // trigraphs), knowing that they only are emitted if the character is 423 // consumed. 424 425 /// isObviouslySimpleCharacter - Return true if the specified character is 426 /// obviously the same in translation phase 1 and translation phase 3. This 427 /// can return false for characters that end up being the same, but it will 428 /// never return true for something that needs to be mapped. isObviouslySimpleCharacter(char C)429 static bool isObviouslySimpleCharacter(char C) { 430 return C != '?' && C != '\\'; 431 } 432 433 /// getAndAdvanceChar - Read a single 'character' from the specified buffer, 434 /// advance over it, and return it. This is tricky in several cases. Here we 435 /// just handle the trivial case and fall-back to the non-inlined 436 /// getCharAndSizeSlow method to handle the hard case. getAndAdvanceChar(const char * & Ptr,Token & Tok)437 inline char getAndAdvanceChar(const char *&Ptr, Token &Tok) { 438 // If this is not a trigraph and not a UCN or escaped newline, return 439 // quickly. 440 if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++; 441 442 unsigned Size = 0; 443 char C = getCharAndSizeSlow(Ptr, Size, &Tok); 444 Ptr += Size; 445 return C; 446 } 447 448 private: 449 /// ConsumeChar - When a character (identified by getCharAndSize) is consumed 450 /// and added to a given token, check to see if there are diagnostics that 451 /// need to be emitted or flags that need to be set on the token. If so, do 452 /// it. ConsumeChar(const char * Ptr,unsigned Size,Token & Tok)453 const char *ConsumeChar(const char *Ptr, unsigned Size, Token &Tok) { 454 // Normal case, we consumed exactly one token. Just return it. 455 if (Size == 1) 456 return Ptr+Size; 457 458 // Otherwise, re-lex the character with a current token, allowing 459 // diagnostics to be emitted and flags to be set. 460 Size = 0; 461 getCharAndSizeSlow(Ptr, Size, &Tok); 462 return Ptr+Size; 463 } 464 465 /// getCharAndSize - Peek a single 'character' from the specified buffer, 466 /// get its size, and return it. This is tricky in several cases. Here we 467 /// just handle the trivial case and fall-back to the non-inlined 468 /// getCharAndSizeSlow method to handle the hard case. getCharAndSize(const char * Ptr,unsigned & Size)469 inline char getCharAndSize(const char *Ptr, unsigned &Size) { 470 // If this is not a trigraph and not a UCN or escaped newline, return 471 // quickly. 472 if (isObviouslySimpleCharacter(Ptr[0])) { 473 Size = 1; 474 return *Ptr; 475 } 476 477 Size = 0; 478 return getCharAndSizeSlow(Ptr, Size); 479 } 480 481 /// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize 482 /// method. 483 char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0); 484 public: 485 486 /// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever 487 /// emit a warning. getCharAndSizeNoWarn(const char * Ptr,unsigned & Size,const LangOptions & LangOpts)488 static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size, 489 const LangOptions &LangOpts) { 490 // If this is not a trigraph and not a UCN or escaped newline, return 491 // quickly. 492 if (isObviouslySimpleCharacter(Ptr[0])) { 493 Size = 1; 494 return *Ptr; 495 } 496 497 Size = 0; 498 return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts); 499 } 500 501 /// getEscapedNewLineSize - Return the size of the specified escaped newline, 502 /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" on entry 503 /// to this function. 504 static unsigned getEscapedNewLineSize(const char *P); 505 506 /// SkipEscapedNewLines - If P points to an escaped newline (or a series of 507 /// them), skip over them and return the first non-escaped-newline found, 508 /// otherwise return P. 509 static const char *SkipEscapedNewLines(const char *P); 510 511 /// \brief Checks that the given token is the first token that occurs after 512 /// the given location (this excludes comments and whitespace). Returns the 513 /// location immediately after the specified token. If the token is not found 514 /// or the location is inside a macro, the returned source location will be 515 /// invalid. 516 static SourceLocation findLocationAfterToken(SourceLocation loc, 517 tok::TokenKind TKind, 518 const SourceManager &SM, 519 const LangOptions &LangOpts, 520 bool SkipTrailingWhitespaceAndNewLine); 521 522 private: 523 524 /// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a 525 /// diagnostic. 526 static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 527 const LangOptions &LangOpts); 528 529 //===--------------------------------------------------------------------===// 530 // Other lexer functions. 531 532 void SkipBytes(unsigned Bytes, bool StartOfLine); 533 534 const char *LexUDSuffix(Token &Result, const char *CurPtr); 535 536 // Helper functions to lex the remainder of a token of the specific type. 537 void LexIdentifier (Token &Result, const char *CurPtr); 538 void LexNumericConstant (Token &Result, const char *CurPtr); 539 void LexStringLiteral (Token &Result, const char *CurPtr, 540 tok::TokenKind Kind); 541 void LexRawStringLiteral (Token &Result, const char *CurPtr, 542 tok::TokenKind Kind); 543 void LexAngledStringLiteral(Token &Result, const char *CurPtr); 544 void LexCharConstant (Token &Result, const char *CurPtr, 545 tok::TokenKind Kind); 546 bool LexEndOfFile (Token &Result, const char *CurPtr); 547 548 bool SkipWhitespace (Token &Result, const char *CurPtr); 549 bool SkipBCPLComment (Token &Result, const char *CurPtr); 550 bool SkipBlockComment (Token &Result, const char *CurPtr); 551 bool SaveBCPLComment (Token &Result, const char *CurPtr); 552 553 bool IsStartOfConflictMarker(const char *CurPtr); 554 bool HandleEndOfConflictMarker(const char *CurPtr); 555 556 bool isCodeCompletionPoint(const char *CurPtr) const; cutOffLexing()557 void cutOffLexing() { BufferPtr = BufferEnd; } 558 }; 559 560 561 } // end namespace clang 562 563 #endif 564