1 //===--- FormatTokenLexer.cpp - Lex FormatTokens -------------*- C++ ----*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements FormatTokenLexer, which tokenizes a source file
11 /// into a FormatToken stream suitable for ClangFormat.
12 ///
13 //===----------------------------------------------------------------------===//
14
15 #include "FormatTokenLexer.h"
16 #include "FormatToken.h"
17 #include "clang/Basic/SourceLocation.h"
18 #include "clang/Basic/SourceManager.h"
19 #include "clang/Format/Format.h"
20 #include "llvm/Support/Regex.h"
21
22 namespace clang {
23 namespace format {
24
FormatTokenLexer(const SourceManager & SourceMgr,FileID ID,unsigned Column,const FormatStyle & Style,encoding::Encoding Encoding,llvm::SpecificBumpPtrAllocator<FormatToken> & Allocator,IdentifierTable & IdentTable)25 FormatTokenLexer::FormatTokenLexer(
26 const SourceManager &SourceMgr, FileID ID, unsigned Column,
27 const FormatStyle &Style, encoding::Encoding Encoding,
28 llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
29 IdentifierTable &IdentTable)
30 : FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
31 Column(Column), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
32 Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
33 Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
34 FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
35 MacroBlockEndRegex(Style.MacroBlockEnd) {
36 Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr,
37 getFormattingLangOpts(Style)));
38 Lex->SetKeepWhitespaceMode(true);
39
40 for (const std::string &ForEachMacro : Style.ForEachMacros)
41 Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
42 for (const std::string &AttributeMacro : Style.AttributeMacros)
43 Macros.insert({&IdentTable.get(AttributeMacro), TT_AttributeMacro});
44 for (const std::string &StatementMacro : Style.StatementMacros)
45 Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
46 for (const std::string &TypenameMacro : Style.TypenameMacros)
47 Macros.insert({&IdentTable.get(TypenameMacro), TT_TypenameMacro});
48 for (const std::string &NamespaceMacro : Style.NamespaceMacros)
49 Macros.insert({&IdentTable.get(NamespaceMacro), TT_NamespaceMacro});
50 for (const std::string &WhitespaceSensitiveMacro :
51 Style.WhitespaceSensitiveMacros) {
52 Macros.insert(
53 {&IdentTable.get(WhitespaceSensitiveMacro), TT_UntouchableMacroFunc});
54 }
55 }
56
lex()57 ArrayRef<FormatToken *> FormatTokenLexer::lex() {
58 assert(Tokens.empty());
59 assert(FirstInLineIndex == 0);
60 do {
61 Tokens.push_back(getNextToken());
62 if (Style.Language == FormatStyle::LK_JavaScript) {
63 tryParseJSRegexLiteral();
64 handleTemplateStrings();
65 }
66 if (Style.Language == FormatStyle::LK_TextProto)
67 tryParsePythonComment();
68 tryMergePreviousTokens();
69 if (Style.isCSharp())
70 // This needs to come after tokens have been merged so that C#
71 // string literals are correctly identified.
72 handleCSharpVerbatimAndInterpolatedStrings();
73 if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
74 FirstInLineIndex = Tokens.size() - 1;
75 } while (Tokens.back()->Tok.isNot(tok::eof));
76 return Tokens;
77 }
78
tryMergePreviousTokens()79 void FormatTokenLexer::tryMergePreviousTokens() {
80 if (tryMerge_TMacro())
81 return;
82 if (tryMergeConflictMarkers())
83 return;
84 if (tryMergeLessLess())
85 return;
86 if (tryMergeForEach())
87 return;
88 if (Style.isCpp() && tryTransformTryUsageForC())
89 return;
90
91 if (Style.isCSharp()) {
92 if (tryMergeCSharpKeywordVariables())
93 return;
94 if (tryMergeCSharpStringLiteral())
95 return;
96 if (tryMergeCSharpDoubleQuestion())
97 return;
98 if (tryMergeCSharpNullConditional())
99 return;
100 if (tryTransformCSharpForEach())
101 return;
102 static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
103 if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
104 return;
105 }
106
107 if (tryMergeNSStringLiteral())
108 return;
109
110 if (Style.Language == FormatStyle::LK_JavaScript) {
111 static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
112 static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal,
113 tok::equal};
114 static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
115 tok::greaterequal};
116 static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
117 static const tok::TokenKind JSExponentiation[] = {tok::star, tok::star};
118 static const tok::TokenKind JSExponentiationEqual[] = {tok::star,
119 tok::starequal};
120 static const tok::TokenKind JSNullPropagatingOperator[] = {tok::question,
121 tok::period};
122 static const tok::TokenKind JSNullishOperator[] = {tok::question,
123 tok::question};
124 static const tok::TokenKind JSNullishEqual[] = {tok::question,
125 tok::question, tok::equal};
126 static const tok::TokenKind JSPipePipeEqual[] = {tok::pipepipe, tok::equal};
127 static const tok::TokenKind JSAndAndEqual[] = {tok::ampamp, tok::equal};
128
129 // FIXME: Investigate what token type gives the correct operator priority.
130 if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
131 return;
132 if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator))
133 return;
134 if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
135 return;
136 if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
137 return;
138 if (tryMergeTokens(JSExponentiation, TT_JsExponentiation))
139 return;
140 if (tryMergeTokens(JSExponentiationEqual, TT_JsExponentiationEqual)) {
141 Tokens.back()->Tok.setKind(tok::starequal);
142 return;
143 }
144 if (tryMergeTokens(JSNullishOperator, TT_JsNullishCoalescingOperator)) {
145 // Treat like the "||" operator (as opposed to the ternary ?).
146 Tokens.back()->Tok.setKind(tok::pipepipe);
147 return;
148 }
149 if (tryMergeTokens(JSNullPropagatingOperator,
150 TT_JsNullPropagatingOperator)) {
151 // Treat like a regular "." access.
152 Tokens.back()->Tok.setKind(tok::period);
153 return;
154 }
155 if (tryMergeTokens(JSAndAndEqual, TT_JsAndAndEqual) ||
156 tryMergeTokens(JSPipePipeEqual, TT_JsPipePipeEqual) ||
157 tryMergeTokens(JSNullishEqual, TT_JsNullishCoalescingEqual)) {
158 // Treat like the "=" assignment operator.
159 Tokens.back()->Tok.setKind(tok::equal);
160 return;
161 }
162 if (tryMergeJSPrivateIdentifier())
163 return;
164 }
165
166 if (Style.Language == FormatStyle::LK_Java) {
167 static const tok::TokenKind JavaRightLogicalShiftAssign[] = {
168 tok::greater, tok::greater, tok::greaterequal};
169 if (tryMergeTokens(JavaRightLogicalShiftAssign, TT_BinaryOperator))
170 return;
171 }
172 }
173
tryMergeNSStringLiteral()174 bool FormatTokenLexer::tryMergeNSStringLiteral() {
175 if (Tokens.size() < 2)
176 return false;
177 auto &At = *(Tokens.end() - 2);
178 auto &String = *(Tokens.end() - 1);
179 if (!At->is(tok::at) || !String->is(tok::string_literal))
180 return false;
181 At->Tok.setKind(tok::string_literal);
182 At->TokenText = StringRef(At->TokenText.begin(),
183 String->TokenText.end() - At->TokenText.begin());
184 At->ColumnWidth += String->ColumnWidth;
185 At->setType(TT_ObjCStringLiteral);
186 Tokens.erase(Tokens.end() - 1);
187 return true;
188 }
189
tryMergeJSPrivateIdentifier()190 bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
191 // Merges #idenfier into a single identifier with the text #identifier
192 // but the token tok::identifier.
193 if (Tokens.size() < 2)
194 return false;
195 auto &Hash = *(Tokens.end() - 2);
196 auto &Identifier = *(Tokens.end() - 1);
197 if (!Hash->is(tok::hash) || !Identifier->is(tok::identifier))
198 return false;
199 Hash->Tok.setKind(tok::identifier);
200 Hash->TokenText =
201 StringRef(Hash->TokenText.begin(),
202 Identifier->TokenText.end() - Hash->TokenText.begin());
203 Hash->ColumnWidth += Identifier->ColumnWidth;
204 Hash->setType(TT_JsPrivateIdentifier);
205 Tokens.erase(Tokens.end() - 1);
206 return true;
207 }
208
209 // Search for verbatim or interpolated string literals @"ABC" or
210 // $"aaaaa{abc}aaaaa" i and mark the token as TT_CSharpStringLiteral, and to
211 // prevent splitting of @, $ and ".
212 // Merging of multiline verbatim strings with embedded '"' is handled in
213 // handleCSharpVerbatimAndInterpolatedStrings with lower-level lexing.
tryMergeCSharpStringLiteral()214 bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
215 if (Tokens.size() < 2)
216 return false;
217
218 // Interpolated strings could contain { } with " characters inside.
219 // $"{x ?? "null"}"
220 // should not be split into $"{x ?? ", null, "}" but should treated as a
221 // single string-literal.
222 //
223 // We opt not to try and format expressions inside {} within a C#
224 // interpolated string. Formatting expressions within an interpolated string
225 // would require similar work as that done for JavaScript template strings
226 // in `handleTemplateStrings()`.
227 auto &CSharpInterpolatedString = *(Tokens.end() - 2);
228 if (CSharpInterpolatedString->getType() == TT_CSharpStringLiteral &&
229 (CSharpInterpolatedString->TokenText.startswith(R"($")") ||
230 CSharpInterpolatedString->TokenText.startswith(R"($@")"))) {
231 int UnmatchedOpeningBraceCount = 0;
232
233 auto TokenTextSize = CSharpInterpolatedString->TokenText.size();
234 for (size_t Index = 0; Index < TokenTextSize; ++Index) {
235 char C = CSharpInterpolatedString->TokenText[Index];
236 if (C == '{') {
237 // "{{" inside an interpolated string is an escaped '{' so skip it.
238 if (Index + 1 < TokenTextSize &&
239 CSharpInterpolatedString->TokenText[Index + 1] == '{') {
240 ++Index;
241 continue;
242 }
243 ++UnmatchedOpeningBraceCount;
244 } else if (C == '}') {
245 // "}}" inside an interpolated string is an escaped '}' so skip it.
246 if (Index + 1 < TokenTextSize &&
247 CSharpInterpolatedString->TokenText[Index + 1] == '}') {
248 ++Index;
249 continue;
250 }
251 --UnmatchedOpeningBraceCount;
252 }
253 }
254
255 if (UnmatchedOpeningBraceCount > 0) {
256 auto &NextToken = *(Tokens.end() - 1);
257 CSharpInterpolatedString->TokenText =
258 StringRef(CSharpInterpolatedString->TokenText.begin(),
259 NextToken->TokenText.end() -
260 CSharpInterpolatedString->TokenText.begin());
261 CSharpInterpolatedString->ColumnWidth += NextToken->ColumnWidth;
262 Tokens.erase(Tokens.end() - 1);
263 return true;
264 }
265 }
266
267 // Look for @"aaaaaa" or $"aaaaaa".
268 auto &String = *(Tokens.end() - 1);
269 if (!String->is(tok::string_literal))
270 return false;
271
272 auto &At = *(Tokens.end() - 2);
273 if (!(At->is(tok::at) || At->TokenText == "$"))
274 return false;
275
276 if (Tokens.size() > 2 && At->is(tok::at)) {
277 auto &Dollar = *(Tokens.end() - 3);
278 if (Dollar->TokenText == "$") {
279 // This looks like $@"aaaaa" so we need to combine all 3 tokens.
280 Dollar->Tok.setKind(tok::string_literal);
281 Dollar->TokenText =
282 StringRef(Dollar->TokenText.begin(),
283 String->TokenText.end() - Dollar->TokenText.begin());
284 Dollar->ColumnWidth += (At->ColumnWidth + String->ColumnWidth);
285 Dollar->setType(TT_CSharpStringLiteral);
286 Tokens.erase(Tokens.end() - 2);
287 Tokens.erase(Tokens.end() - 1);
288 return true;
289 }
290 }
291
292 // Convert back into just a string_literal.
293 At->Tok.setKind(tok::string_literal);
294 At->TokenText = StringRef(At->TokenText.begin(),
295 String->TokenText.end() - At->TokenText.begin());
296 At->ColumnWidth += String->ColumnWidth;
297 At->setType(TT_CSharpStringLiteral);
298 Tokens.erase(Tokens.end() - 1);
299 return true;
300 }
301
302 // Valid C# attribute targets:
303 // https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/concepts/attributes/#attribute-targets
304 const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
305 "assembly", "module", "field", "event", "method",
306 "param", "property", "return", "type",
307 };
308
tryMergeCSharpDoubleQuestion()309 bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
310 if (Tokens.size() < 2)
311 return false;
312 auto &FirstQuestion = *(Tokens.end() - 2);
313 auto &SecondQuestion = *(Tokens.end() - 1);
314 if (!FirstQuestion->is(tok::question) || !SecondQuestion->is(tok::question))
315 return false;
316 FirstQuestion->Tok.setKind(tok::question); // no '??' in clang tokens.
317 FirstQuestion->TokenText = StringRef(FirstQuestion->TokenText.begin(),
318 SecondQuestion->TokenText.end() -
319 FirstQuestion->TokenText.begin());
320 FirstQuestion->ColumnWidth += SecondQuestion->ColumnWidth;
321 FirstQuestion->setType(TT_CSharpNullCoalescing);
322 Tokens.erase(Tokens.end() - 1);
323 return true;
324 }
325
326 // Merge '?[' and '?.' pairs into single tokens.
tryMergeCSharpNullConditional()327 bool FormatTokenLexer::tryMergeCSharpNullConditional() {
328 if (Tokens.size() < 2)
329 return false;
330 auto &Question = *(Tokens.end() - 2);
331 auto &PeriodOrLSquare = *(Tokens.end() - 1);
332 if (!Question->is(tok::question) ||
333 !PeriodOrLSquare->isOneOf(tok::l_square, tok::period))
334 return false;
335 Question->TokenText =
336 StringRef(Question->TokenText.begin(),
337 PeriodOrLSquare->TokenText.end() - Question->TokenText.begin());
338 Question->ColumnWidth += PeriodOrLSquare->ColumnWidth;
339
340 if (PeriodOrLSquare->is(tok::l_square)) {
341 Question->Tok.setKind(tok::question); // no '?[' in clang tokens.
342 Question->setType(TT_CSharpNullConditionalLSquare);
343 } else {
344 Question->Tok.setKind(tok::question); // no '?.' in clang tokens.
345 Question->setType(TT_CSharpNullConditional);
346 }
347
348 Tokens.erase(Tokens.end() - 1);
349 return true;
350 }
351
tryMergeCSharpKeywordVariables()352 bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
353 if (Tokens.size() < 2)
354 return false;
355 auto &At = *(Tokens.end() - 2);
356 auto &Keyword = *(Tokens.end() - 1);
357 if (!At->is(tok::at))
358 return false;
359 if (!Keywords.isCSharpKeyword(*Keyword))
360 return false;
361
362 At->Tok.setKind(tok::identifier);
363 At->TokenText = StringRef(At->TokenText.begin(),
364 Keyword->TokenText.end() - At->TokenText.begin());
365 At->ColumnWidth += Keyword->ColumnWidth;
366 At->setType(Keyword->getType());
367 Tokens.erase(Tokens.end() - 1);
368 return true;
369 }
370
371 // In C# transform identifier foreach into kw_foreach
tryTransformCSharpForEach()372 bool FormatTokenLexer::tryTransformCSharpForEach() {
373 if (Tokens.size() < 1)
374 return false;
375 auto &Identifier = *(Tokens.end() - 1);
376 if (!Identifier->is(tok::identifier))
377 return false;
378 if (Identifier->TokenText != "foreach")
379 return false;
380
381 Identifier->setType(TT_ForEachMacro);
382 Identifier->Tok.setKind(tok::kw_for);
383 return true;
384 }
385
tryMergeForEach()386 bool FormatTokenLexer::tryMergeForEach() {
387 if (Tokens.size() < 2)
388 return false;
389 auto &For = *(Tokens.end() - 2);
390 auto &Each = *(Tokens.end() - 1);
391 if (!For->is(tok::kw_for))
392 return false;
393 if (!Each->is(tok::identifier))
394 return false;
395 if (Each->TokenText != "each")
396 return false;
397
398 For->setType(TT_ForEachMacro);
399 For->Tok.setKind(tok::kw_for);
400
401 For->TokenText = StringRef(For->TokenText.begin(),
402 Each->TokenText.end() - For->TokenText.begin());
403 For->ColumnWidth += Each->ColumnWidth;
404 Tokens.erase(Tokens.end() - 1);
405 return true;
406 }
407
tryTransformTryUsageForC()408 bool FormatTokenLexer::tryTransformTryUsageForC() {
409 if (Tokens.size() < 2)
410 return false;
411 auto &Try = *(Tokens.end() - 2);
412 if (!Try->is(tok::kw_try))
413 return false;
414 auto &Next = *(Tokens.end() - 1);
415 if (Next->isOneOf(tok::l_brace, tok::colon, tok::hash, tok::comment))
416 return false;
417
418 if (Tokens.size() > 2) {
419 auto &At = *(Tokens.end() - 3);
420 if (At->is(tok::at))
421 return false;
422 }
423
424 Try->Tok.setKind(tok::identifier);
425 return true;
426 }
427
tryMergeLessLess()428 bool FormatTokenLexer::tryMergeLessLess() {
429 // Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
430 if (Tokens.size() < 3)
431 return false;
432
433 bool FourthTokenIsLess = false;
434 if (Tokens.size() > 3)
435 FourthTokenIsLess = (Tokens.end() - 4)[0]->is(tok::less);
436
437 auto First = Tokens.end() - 3;
438 if (First[2]->is(tok::less) || First[1]->isNot(tok::less) ||
439 First[0]->isNot(tok::less) || FourthTokenIsLess)
440 return false;
441
442 // Only merge if there currently is no whitespace between the two "<".
443 if (First[1]->WhitespaceRange.getBegin() !=
444 First[1]->WhitespaceRange.getEnd())
445 return false;
446
447 First[0]->Tok.setKind(tok::lessless);
448 First[0]->TokenText = "<<";
449 First[0]->ColumnWidth += 1;
450 Tokens.erase(Tokens.end() - 2);
451 return true;
452 }
453
tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,TokenType NewType)454 bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
455 TokenType NewType) {
456 if (Tokens.size() < Kinds.size())
457 return false;
458
459 SmallVectorImpl<FormatToken *>::const_iterator First =
460 Tokens.end() - Kinds.size();
461 if (!First[0]->is(Kinds[0]))
462 return false;
463 unsigned AddLength = 0;
464 for (unsigned i = 1; i < Kinds.size(); ++i) {
465 if (!First[i]->is(Kinds[i]) || First[i]->WhitespaceRange.getBegin() !=
466 First[i]->WhitespaceRange.getEnd())
467 return false;
468 AddLength += First[i]->TokenText.size();
469 }
470 Tokens.resize(Tokens.size() - Kinds.size() + 1);
471 First[0]->TokenText = StringRef(First[0]->TokenText.data(),
472 First[0]->TokenText.size() + AddLength);
473 First[0]->ColumnWidth += AddLength;
474 First[0]->setType(NewType);
475 return true;
476 }
477
478 // Returns \c true if \p Tok can only be followed by an operand in JavaScript.
precedesOperand(FormatToken * Tok)479 bool FormatTokenLexer::precedesOperand(FormatToken *Tok) {
480 // NB: This is not entirely correct, as an r_paren can introduce an operand
481 // location in e.g. `if (foo) /bar/.exec(...);`. That is a rare enough
482 // corner case to not matter in practice, though.
483 return Tok->isOneOf(tok::period, tok::l_paren, tok::comma, tok::l_brace,
484 tok::r_brace, tok::l_square, tok::semi, tok::exclaim,
485 tok::colon, tok::question, tok::tilde) ||
486 Tok->isOneOf(tok::kw_return, tok::kw_do, tok::kw_case, tok::kw_throw,
487 tok::kw_else, tok::kw_new, tok::kw_delete, tok::kw_void,
488 tok::kw_typeof, Keywords.kw_instanceof, Keywords.kw_in) ||
489 Tok->isBinaryOperator();
490 }
491
canPrecedeRegexLiteral(FormatToken * Prev)492 bool FormatTokenLexer::canPrecedeRegexLiteral(FormatToken *Prev) {
493 if (!Prev)
494 return true;
495
496 // Regex literals can only follow after prefix unary operators, not after
497 // postfix unary operators. If the '++' is followed by a non-operand
498 // introducing token, the slash here is the operand and not the start of a
499 // regex.
500 // `!` is an unary prefix operator, but also a post-fix operator that casts
501 // away nullability, so the same check applies.
502 if (Prev->isOneOf(tok::plusplus, tok::minusminus, tok::exclaim))
503 return (Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]));
504
505 // The previous token must introduce an operand location where regex
506 // literals can occur.
507 if (!precedesOperand(Prev))
508 return false;
509
510 return true;
511 }
512
513 // Tries to parse a JavaScript Regex literal starting at the current token,
514 // if that begins with a slash and is in a location where JavaScript allows
515 // regex literals. Changes the current token to a regex literal and updates
516 // its text if successful.
tryParseJSRegexLiteral()517 void FormatTokenLexer::tryParseJSRegexLiteral() {
518 FormatToken *RegexToken = Tokens.back();
519 if (!RegexToken->isOneOf(tok::slash, tok::slashequal))
520 return;
521
522 FormatToken *Prev = nullptr;
523 for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; ++I) {
524 // NB: Because previous pointers are not initialized yet, this cannot use
525 // Token.getPreviousNonComment.
526 if ((*I)->isNot(tok::comment)) {
527 Prev = *I;
528 break;
529 }
530 }
531
532 if (!canPrecedeRegexLiteral(Prev))
533 return;
534
535 // 'Manually' lex ahead in the current file buffer.
536 const char *Offset = Lex->getBufferLocation();
537 const char *RegexBegin = Offset - RegexToken->TokenText.size();
538 StringRef Buffer = Lex->getBuffer();
539 bool InCharacterClass = false;
540 bool HaveClosingSlash = false;
541 for (; !HaveClosingSlash && Offset != Buffer.end(); ++Offset) {
542 // Regular expressions are terminated with a '/', which can only be
543 // escaped using '\' or a character class between '[' and ']'.
544 // See http://www.ecma-international.org/ecma-262/5.1/#sec-7.8.5.
545 switch (*Offset) {
546 case '\\':
547 // Skip the escaped character.
548 ++Offset;
549 break;
550 case '[':
551 InCharacterClass = true;
552 break;
553 case ']':
554 InCharacterClass = false;
555 break;
556 case '/':
557 if (!InCharacterClass)
558 HaveClosingSlash = true;
559 break;
560 }
561 }
562
563 RegexToken->setType(TT_RegexLiteral);
564 // Treat regex literals like other string_literals.
565 RegexToken->Tok.setKind(tok::string_literal);
566 RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
567 RegexToken->ColumnWidth = RegexToken->TokenText.size();
568
569 resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
570 }
571
handleCSharpVerbatimAndInterpolatedStrings()572 void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
573 FormatToken *CSharpStringLiteral = Tokens.back();
574
575 if (CSharpStringLiteral->getType() != TT_CSharpStringLiteral)
576 return;
577
578 // Deal with multiline strings.
579 if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
580 CSharpStringLiteral->TokenText.startswith(R"($@")")))
581 return;
582
583 const char *StrBegin =
584 Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
585 const char *Offset = StrBegin;
586 if (CSharpStringLiteral->TokenText.startswith(R"(@")"))
587 Offset += 2;
588 else // CSharpStringLiteral->TokenText.startswith(R"($@")")
589 Offset += 3;
590
591 // Look for a terminating '"' in the current file buffer.
592 // Make no effort to format code within an interpolated or verbatim string.
593 for (; Offset != Lex->getBuffer().end(); ++Offset) {
594 if (Offset[0] == '"') {
595 // "" within a verbatim string is an escaped double quote: skip it.
596 if (Offset + 1 < Lex->getBuffer().end() && Offset[1] == '"')
597 ++Offset;
598 else
599 break;
600 }
601 }
602
603 // Make no attempt to format code properly if a verbatim string is
604 // unterminated.
605 if (Offset == Lex->getBuffer().end())
606 return;
607
608 StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
609 CSharpStringLiteral->TokenText = LiteralText;
610
611 // Adjust width for potentially multiline string literals.
612 size_t FirstBreak = LiteralText.find('\n');
613 StringRef FirstLineText = FirstBreak == StringRef::npos
614 ? LiteralText
615 : LiteralText.substr(0, FirstBreak);
616 CSharpStringLiteral->ColumnWidth = encoding::columnWidthWithTabs(
617 FirstLineText, CSharpStringLiteral->OriginalColumn, Style.TabWidth,
618 Encoding);
619 size_t LastBreak = LiteralText.rfind('\n');
620 if (LastBreak != StringRef::npos) {
621 CSharpStringLiteral->IsMultiline = true;
622 unsigned StartColumn = 0;
623 CSharpStringLiteral->LastLineColumnWidth = encoding::columnWidthWithTabs(
624 LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
625 Style.TabWidth, Encoding);
626 }
627
628 SourceLocation loc = Offset < Lex->getBuffer().end()
629 ? Lex->getSourceLocation(Offset + 1)
630 : SourceMgr.getLocForEndOfFile(ID);
631 resetLexer(SourceMgr.getFileOffset(loc));
632 }
633
handleTemplateStrings()634 void FormatTokenLexer::handleTemplateStrings() {
635 FormatToken *BacktickToken = Tokens.back();
636
637 if (BacktickToken->is(tok::l_brace)) {
638 StateStack.push(LexerState::NORMAL);
639 return;
640 }
641 if (BacktickToken->is(tok::r_brace)) {
642 if (StateStack.size() == 1)
643 return;
644 StateStack.pop();
645 if (StateStack.top() != LexerState::TEMPLATE_STRING)
646 return;
647 // If back in TEMPLATE_STRING, fallthrough and continue parsing the
648 } else if (BacktickToken->is(tok::unknown) &&
649 BacktickToken->TokenText == "`") {
650 StateStack.push(LexerState::TEMPLATE_STRING);
651 } else {
652 return; // Not actually a template
653 }
654
655 // 'Manually' lex ahead in the current file buffer.
656 const char *Offset = Lex->getBufferLocation();
657 const char *TmplBegin = Offset - BacktickToken->TokenText.size(); // at "`"
658 for (; Offset != Lex->getBuffer().end(); ++Offset) {
659 if (Offset[0] == '`') {
660 StateStack.pop();
661 break;
662 }
663 if (Offset[0] == '\\') {
664 ++Offset; // Skip the escaped character.
665 } else if (Offset + 1 < Lex->getBuffer().end() && Offset[0] == '$' &&
666 Offset[1] == '{') {
667 // '${' introduces an expression interpolation in the template string.
668 StateStack.push(LexerState::NORMAL);
669 ++Offset;
670 break;
671 }
672 }
673
674 StringRef LiteralText(TmplBegin, Offset - TmplBegin + 1);
675 BacktickToken->setType(TT_TemplateString);
676 BacktickToken->Tok.setKind(tok::string_literal);
677 BacktickToken->TokenText = LiteralText;
678
679 // Adjust width for potentially multiline string literals.
680 size_t FirstBreak = LiteralText.find('\n');
681 StringRef FirstLineText = FirstBreak == StringRef::npos
682 ? LiteralText
683 : LiteralText.substr(0, FirstBreak);
684 BacktickToken->ColumnWidth = encoding::columnWidthWithTabs(
685 FirstLineText, BacktickToken->OriginalColumn, Style.TabWidth, Encoding);
686 size_t LastBreak = LiteralText.rfind('\n');
687 if (LastBreak != StringRef::npos) {
688 BacktickToken->IsMultiline = true;
689 unsigned StartColumn = 0; // The template tail spans the entire line.
690 BacktickToken->LastLineColumnWidth = encoding::columnWidthWithTabs(
691 LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
692 Style.TabWidth, Encoding);
693 }
694
695 SourceLocation loc = Offset < Lex->getBuffer().end()
696 ? Lex->getSourceLocation(Offset + 1)
697 : SourceMgr.getLocForEndOfFile(ID);
698 resetLexer(SourceMgr.getFileOffset(loc));
699 }
700
tryParsePythonComment()701 void FormatTokenLexer::tryParsePythonComment() {
702 FormatToken *HashToken = Tokens.back();
703 if (!HashToken->isOneOf(tok::hash, tok::hashhash))
704 return;
705 // Turn the remainder of this line into a comment.
706 const char *CommentBegin =
707 Lex->getBufferLocation() - HashToken->TokenText.size(); // at "#"
708 size_t From = CommentBegin - Lex->getBuffer().begin();
709 size_t To = Lex->getBuffer().find_first_of('\n', From);
710 if (To == StringRef::npos)
711 To = Lex->getBuffer().size();
712 size_t Len = To - From;
713 HashToken->setType(TT_LineComment);
714 HashToken->Tok.setKind(tok::comment);
715 HashToken->TokenText = Lex->getBuffer().substr(From, Len);
716 SourceLocation Loc = To < Lex->getBuffer().size()
717 ? Lex->getSourceLocation(CommentBegin + Len)
718 : SourceMgr.getLocForEndOfFile(ID);
719 resetLexer(SourceMgr.getFileOffset(Loc));
720 }
721
tryMerge_TMacro()722 bool FormatTokenLexer::tryMerge_TMacro() {
723 if (Tokens.size() < 4)
724 return false;
725 FormatToken *Last = Tokens.back();
726 if (!Last->is(tok::r_paren))
727 return false;
728
729 FormatToken *String = Tokens[Tokens.size() - 2];
730 if (!String->is(tok::string_literal) || String->IsMultiline)
731 return false;
732
733 if (!Tokens[Tokens.size() - 3]->is(tok::l_paren))
734 return false;
735
736 FormatToken *Macro = Tokens[Tokens.size() - 4];
737 if (Macro->TokenText != "_T")
738 return false;
739
740 const char *Start = Macro->TokenText.data();
741 const char *End = Last->TokenText.data() + Last->TokenText.size();
742 String->TokenText = StringRef(Start, End - Start);
743 String->IsFirst = Macro->IsFirst;
744 String->LastNewlineOffset = Macro->LastNewlineOffset;
745 String->WhitespaceRange = Macro->WhitespaceRange;
746 String->OriginalColumn = Macro->OriginalColumn;
747 String->ColumnWidth = encoding::columnWidthWithTabs(
748 String->TokenText, String->OriginalColumn, Style.TabWidth, Encoding);
749 String->NewlinesBefore = Macro->NewlinesBefore;
750 String->HasUnescapedNewline = Macro->HasUnescapedNewline;
751
752 Tokens.pop_back();
753 Tokens.pop_back();
754 Tokens.pop_back();
755 Tokens.back() = String;
756 return true;
757 }
758
tryMergeConflictMarkers()759 bool FormatTokenLexer::tryMergeConflictMarkers() {
760 if (Tokens.back()->NewlinesBefore == 0 && Tokens.back()->isNot(tok::eof))
761 return false;
762
763 // Conflict lines look like:
764 // <marker> <text from the vcs>
765 // For example:
766 // >>>>>>> /file/in/file/system at revision 1234
767 //
768 // We merge all tokens in a line that starts with a conflict marker
769 // into a single token with a special token type that the unwrapped line
770 // parser will use to correctly rebuild the underlying code.
771
772 FileID ID;
773 // Get the position of the first token in the line.
774 unsigned FirstInLineOffset;
775 std::tie(ID, FirstInLineOffset) = SourceMgr.getDecomposedLoc(
776 Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
777 StringRef Buffer = SourceMgr.getBufferOrFake(ID).getBuffer();
778 // Calculate the offset of the start of the current line.
779 auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
780 if (LineOffset == StringRef::npos) {
781 LineOffset = 0;
782 } else {
783 ++LineOffset;
784 }
785
786 auto FirstSpace = Buffer.find_first_of(" \n", LineOffset);
787 StringRef LineStart;
788 if (FirstSpace == StringRef::npos) {
789 LineStart = Buffer.substr(LineOffset);
790 } else {
791 LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
792 }
793
794 TokenType Type = TT_Unknown;
795 if (LineStart == "<<<<<<<" || LineStart == ">>>>") {
796 Type = TT_ConflictStart;
797 } else if (LineStart == "|||||||" || LineStart == "=======" ||
798 LineStart == "====") {
799 Type = TT_ConflictAlternative;
800 } else if (LineStart == ">>>>>>>" || LineStart == "<<<<") {
801 Type = TT_ConflictEnd;
802 }
803
804 if (Type != TT_Unknown) {
805 FormatToken *Next = Tokens.back();
806
807 Tokens.resize(FirstInLineIndex + 1);
808 // We do not need to build a complete token here, as we will skip it
809 // during parsing anyway (as we must not touch whitespace around conflict
810 // markers).
811 Tokens.back()->setType(Type);
812 Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
813
814 Tokens.push_back(Next);
815 return true;
816 }
817
818 return false;
819 }
820
getStashedToken()821 FormatToken *FormatTokenLexer::getStashedToken() {
822 // Create a synthesized second '>' or '<' token.
823 Token Tok = FormatTok->Tok;
824 StringRef TokenText = FormatTok->TokenText;
825
826 unsigned OriginalColumn = FormatTok->OriginalColumn;
827 FormatTok = new (Allocator.Allocate()) FormatToken;
828 FormatTok->Tok = Tok;
829 SourceLocation TokLocation =
830 FormatTok->Tok.getLocation().getLocWithOffset(Tok.getLength() - 1);
831 FormatTok->Tok.setLocation(TokLocation);
832 FormatTok->WhitespaceRange = SourceRange(TokLocation, TokLocation);
833 FormatTok->TokenText = TokenText;
834 FormatTok->ColumnWidth = 1;
835 FormatTok->OriginalColumn = OriginalColumn + 1;
836
837 return FormatTok;
838 }
839
getNextToken()840 FormatToken *FormatTokenLexer::getNextToken() {
841 if (StateStack.top() == LexerState::TOKEN_STASHED) {
842 StateStack.pop();
843 return getStashedToken();
844 }
845
846 FormatTok = new (Allocator.Allocate()) FormatToken;
847 readRawToken(*FormatTok);
848 SourceLocation WhitespaceStart =
849 FormatTok->Tok.getLocation().getLocWithOffset(-TrailingWhitespace);
850 FormatTok->IsFirst = IsFirstToken;
851 IsFirstToken = false;
852
853 // Consume and record whitespace until we find a significant token.
854 unsigned WhitespaceLength = TrailingWhitespace;
855 while (FormatTok->Tok.is(tok::unknown)) {
856 StringRef Text = FormatTok->TokenText;
857 auto EscapesNewline = [&](int pos) {
858 // A '\r' here is just part of '\r\n'. Skip it.
859 if (pos >= 0 && Text[pos] == '\r')
860 --pos;
861 // See whether there is an odd number of '\' before this.
862 // FIXME: This is wrong. A '\' followed by a newline is always removed,
863 // regardless of whether there is another '\' before it.
864 // FIXME: Newlines can also be escaped by a '?' '?' '/' trigraph.
865 unsigned count = 0;
866 for (; pos >= 0; --pos, ++count)
867 if (Text[pos] != '\\')
868 break;
869 return count & 1;
870 };
871 // FIXME: This miscounts tok:unknown tokens that are not just
872 // whitespace, e.g. a '`' character.
873 for (int i = 0, e = Text.size(); i != e; ++i) {
874 switch (Text[i]) {
875 case '\n':
876 ++FormatTok->NewlinesBefore;
877 FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1);
878 FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
879 Column = 0;
880 break;
881 case '\r':
882 FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
883 Column = 0;
884 break;
885 case '\f':
886 case '\v':
887 Column = 0;
888 break;
889 case ' ':
890 ++Column;
891 break;
892 case '\t':
893 Column +=
894 Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0);
895 break;
896 case '\\':
897 if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
898 FormatTok->setType(TT_ImplicitStringLiteral);
899 break;
900 default:
901 FormatTok->setType(TT_ImplicitStringLiteral);
902 break;
903 }
904 if (FormatTok->getType() == TT_ImplicitStringLiteral)
905 break;
906 }
907
908 if (FormatTok->is(TT_ImplicitStringLiteral))
909 break;
910 WhitespaceLength += FormatTok->Tok.getLength();
911
912 readRawToken(*FormatTok);
913 }
914
915 // JavaScript and Java do not allow to escape the end of the line with a
916 // backslash. Backslashes are syntax errors in plain source, but can occur in
917 // comments. When a single line comment ends with a \, it'll cause the next
918 // line of code to be lexed as a comment, breaking formatting. The code below
919 // finds comments that contain a backslash followed by a line break, truncates
920 // the comment token at the backslash, and resets the lexer to restart behind
921 // the backslash.
922 if ((Style.Language == FormatStyle::LK_JavaScript ||
923 Style.Language == FormatStyle::LK_Java) &&
924 FormatTok->is(tok::comment) && FormatTok->TokenText.startswith("//")) {
925 size_t BackslashPos = FormatTok->TokenText.find('\\');
926 while (BackslashPos != StringRef::npos) {
927 if (BackslashPos + 1 < FormatTok->TokenText.size() &&
928 FormatTok->TokenText[BackslashPos + 1] == '\n') {
929 const char *Offset = Lex->getBufferLocation();
930 Offset -= FormatTok->TokenText.size();
931 Offset += BackslashPos + 1;
932 resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
933 FormatTok->TokenText = FormatTok->TokenText.substr(0, BackslashPos + 1);
934 FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
935 FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
936 Encoding);
937 break;
938 }
939 BackslashPos = FormatTok->TokenText.find('\\', BackslashPos + 1);
940 }
941 }
942
943 // In case the token starts with escaped newlines, we want to
944 // take them into account as whitespace - this pattern is quite frequent
945 // in macro definitions.
946 // FIXME: Add a more explicit test.
947 while (FormatTok->TokenText.size() > 1 && FormatTok->TokenText[0] == '\\') {
948 unsigned SkippedWhitespace = 0;
949 if (FormatTok->TokenText.size() > 2 &&
950 (FormatTok->TokenText[1] == '\r' && FormatTok->TokenText[2] == '\n'))
951 SkippedWhitespace = 3;
952 else if (FormatTok->TokenText[1] == '\n')
953 SkippedWhitespace = 2;
954 else
955 break;
956
957 ++FormatTok->NewlinesBefore;
958 WhitespaceLength += SkippedWhitespace;
959 FormatTok->LastNewlineOffset = SkippedWhitespace;
960 Column = 0;
961 FormatTok->TokenText = FormatTok->TokenText.substr(SkippedWhitespace);
962 }
963
964 FormatTok->WhitespaceRange = SourceRange(
965 WhitespaceStart, WhitespaceStart.getLocWithOffset(WhitespaceLength));
966
967 FormatTok->OriginalColumn = Column;
968
969 TrailingWhitespace = 0;
970 if (FormatTok->Tok.is(tok::comment)) {
971 // FIXME: Add the trimmed whitespace to Column.
972 StringRef UntrimmedText = FormatTok->TokenText;
973 FormatTok->TokenText = FormatTok->TokenText.rtrim(" \t\v\f");
974 TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
975 } else if (FormatTok->Tok.is(tok::raw_identifier)) {
976 IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
977 FormatTok->Tok.setIdentifierInfo(&Info);
978 FormatTok->Tok.setKind(Info.getTokenID());
979 if (Style.Language == FormatStyle::LK_Java &&
980 FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete,
981 tok::kw_operator)) {
982 FormatTok->Tok.setKind(tok::identifier);
983 FormatTok->Tok.setIdentifierInfo(nullptr);
984 } else if (Style.Language == FormatStyle::LK_JavaScript &&
985 FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
986 tok::kw_operator)) {
987 FormatTok->Tok.setKind(tok::identifier);
988 FormatTok->Tok.setIdentifierInfo(nullptr);
989 }
990 } else if (FormatTok->Tok.is(tok::greatergreater)) {
991 FormatTok->Tok.setKind(tok::greater);
992 FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
993 ++Column;
994 StateStack.push(LexerState::TOKEN_STASHED);
995 } else if (FormatTok->Tok.is(tok::lessless)) {
996 FormatTok->Tok.setKind(tok::less);
997 FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
998 ++Column;
999 StateStack.push(LexerState::TOKEN_STASHED);
1000 }
1001
1002 // Now FormatTok is the next non-whitespace token.
1003
1004 StringRef Text = FormatTok->TokenText;
1005 size_t FirstNewlinePos = Text.find('\n');
1006 if (FirstNewlinePos == StringRef::npos) {
1007 // FIXME: ColumnWidth actually depends on the start column, we need to
1008 // take this into account when the token is moved.
1009 FormatTok->ColumnWidth =
1010 encoding::columnWidthWithTabs(Text, Column, Style.TabWidth, Encoding);
1011 Column += FormatTok->ColumnWidth;
1012 } else {
1013 FormatTok->IsMultiline = true;
1014 // FIXME: ColumnWidth actually depends on the start column, we need to
1015 // take this into account when the token is moved.
1016 FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
1017 Text.substr(0, FirstNewlinePos), Column, Style.TabWidth, Encoding);
1018
1019 // The last line of the token always starts in column 0.
1020 // Thus, the length can be precomputed even in the presence of tabs.
1021 FormatTok->LastLineColumnWidth = encoding::columnWidthWithTabs(
1022 Text.substr(Text.find_last_of('\n') + 1), 0, Style.TabWidth, Encoding);
1023 Column = FormatTok->LastLineColumnWidth;
1024 }
1025
1026 if (Style.isCpp()) {
1027 auto it = Macros.find(FormatTok->Tok.getIdentifierInfo());
1028 if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
1029 Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
1030 tok::pp_define) &&
1031 it != Macros.end()) {
1032 FormatTok->setType(it->second);
1033 } else if (FormatTok->is(tok::identifier)) {
1034 if (MacroBlockBeginRegex.match(Text)) {
1035 FormatTok->setType(TT_MacroBlockBegin);
1036 } else if (MacroBlockEndRegex.match(Text)) {
1037 FormatTok->setType(TT_MacroBlockEnd);
1038 }
1039 }
1040 }
1041
1042 return FormatTok;
1043 }
1044
readRawToken(FormatToken & Tok)1045 void FormatTokenLexer::readRawToken(FormatToken &Tok) {
1046 Lex->LexFromRawLexer(Tok.Tok);
1047 Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
1048 Tok.Tok.getLength());
1049 // For formatting, treat unterminated string literals like normal string
1050 // literals.
1051 if (Tok.is(tok::unknown)) {
1052 if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
1053 Tok.Tok.setKind(tok::string_literal);
1054 Tok.IsUnterminatedLiteral = true;
1055 } else if (Style.Language == FormatStyle::LK_JavaScript &&
1056 Tok.TokenText == "''") {
1057 Tok.Tok.setKind(tok::string_literal);
1058 }
1059 }
1060
1061 if ((Style.Language == FormatStyle::LK_JavaScript ||
1062 Style.Language == FormatStyle::LK_Proto ||
1063 Style.Language == FormatStyle::LK_TextProto) &&
1064 Tok.is(tok::char_constant)) {
1065 Tok.Tok.setKind(tok::string_literal);
1066 }
1067
1068 if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
1069 Tok.TokenText == "/* clang-format on */")) {
1070 FormattingDisabled = false;
1071 }
1072
1073 Tok.Finalized = FormattingDisabled;
1074
1075 if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
1076 Tok.TokenText == "/* clang-format off */")) {
1077 FormattingDisabled = true;
1078 }
1079 }
1080
resetLexer(unsigned Offset)1081 void FormatTokenLexer::resetLexer(unsigned Offset) {
1082 StringRef Buffer = SourceMgr.getBufferData(ID);
1083 Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID),
1084 getFormattingLangOpts(Style), Buffer.begin(),
1085 Buffer.begin() + Offset, Buffer.end()));
1086 Lex->SetKeepWhitespaceMode(true);
1087 TrailingWhitespace = 0;
1088 }
1089
1090 } // namespace format
1091 } // namespace clang
1092