1 //===--- SourceCode.cpp - Source code manipulation routines -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides functions that simplify extraction of source code.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "clang/Tooling/Transformer/SourceCode.h"
13 #include "clang/AST/ASTContext.h"
14 #include "clang/AST/Attr.h"
15 #include "clang/AST/Comment.h"
16 #include "clang/AST/Decl.h"
17 #include "clang/AST/DeclCXX.h"
18 #include "clang/AST/DeclTemplate.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/Basic/SourceManager.h"
21 #include "clang/Lex/Lexer.h"
22 #include "llvm/Support/Errc.h"
23 #include "llvm/Support/Error.h"
24 #include <set>
25
26 using namespace clang;
27
28 using llvm::errc;
29 using llvm::StringError;
30
getText(CharSourceRange Range,const ASTContext & Context)31 StringRef clang::tooling::getText(CharSourceRange Range,
32 const ASTContext &Context) {
33 return Lexer::getSourceText(Range, Context.getSourceManager(),
34 Context.getLangOpts());
35 }
36
maybeExtendRange(CharSourceRange Range,tok::TokenKind Next,ASTContext & Context)37 CharSourceRange clang::tooling::maybeExtendRange(CharSourceRange Range,
38 tok::TokenKind Next,
39 ASTContext &Context) {
40 CharSourceRange R = Lexer::getAsCharRange(Range, Context.getSourceManager(),
41 Context.getLangOpts());
42 if (R.isInvalid())
43 return Range;
44 Token Tok;
45 bool Err =
46 Lexer::getRawToken(R.getEnd(), Tok, Context.getSourceManager(),
47 Context.getLangOpts(), /*IgnoreWhiteSpace=*/true);
48 if (Err || !Tok.is(Next))
49 return Range;
50 return CharSourceRange::getTokenRange(Range.getBegin(), Tok.getLocation());
51 }
52
validateEditRange(const CharSourceRange & Range,const SourceManager & SM)53 llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
54 const SourceManager &SM) {
55 if (Range.isInvalid())
56 return llvm::make_error<StringError>(errc::invalid_argument,
57 "Invalid range");
58
59 if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID())
60 return llvm::make_error<StringError>(
61 errc::invalid_argument, "Range starts or ends in a macro expansion");
62
63 if (SM.isInSystemHeader(Range.getBegin()) ||
64 SM.isInSystemHeader(Range.getEnd()))
65 return llvm::make_error<StringError>(errc::invalid_argument,
66 "Range is in system header");
67
68 std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin());
69 std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd());
70 if (BeginInfo.first != EndInfo.first)
71 return llvm::make_error<StringError>(
72 errc::invalid_argument, "Range begins and ends in different files");
73
74 if (BeginInfo.second > EndInfo.second)
75 return llvm::make_error<StringError>(
76 errc::invalid_argument, "Range's begin is past its end");
77
78 return llvm::Error::success();
79 }
80
81 llvm::Optional<CharSourceRange>
getRangeForEdit(const CharSourceRange & EditRange,const SourceManager & SM,const LangOptions & LangOpts)82 clang::tooling::getRangeForEdit(const CharSourceRange &EditRange,
83 const SourceManager &SM,
84 const LangOptions &LangOpts) {
85 // FIXME: makeFileCharRange() has the disadvantage of stripping off "identity"
86 // macros. For example, if we're looking to rewrite the int literal 3 to 6,
87 // and we have the following definition:
88 // #define DO_NOTHING(x) x
89 // then
90 // foo(DO_NOTHING(3))
91 // will be rewritten to
92 // foo(6)
93 // rather than the arguably better
94 // foo(DO_NOTHING(6))
95 // Decide whether the current behavior is desirable and modify if not.
96 CharSourceRange Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts);
97 bool IsInvalid = llvm::errorToBool(validateEditRange(Range, SM));
98 if (IsInvalid)
99 return llvm::None;
100 return Range;
101
102 }
103
startsWithNewline(const SourceManager & SM,const Token & Tok)104 static bool startsWithNewline(const SourceManager &SM, const Token &Tok) {
105 return isVerticalWhitespace(SM.getCharacterData(Tok.getLocation())[0]);
106 }
107
contains(const std::set<tok::TokenKind> & Terminators,const Token & Tok)108 static bool contains(const std::set<tok::TokenKind> &Terminators,
109 const Token &Tok) {
110 return Terminators.count(Tok.getKind()) > 0;
111 }
112
113 // Returns the exclusive, *file* end location of the entity whose last token is
114 // at location 'EntityLast'. That is, it returns the location one past the last
115 // relevant character.
116 //
117 // Associated tokens include comments, horizontal whitespace and 'Terminators'
118 // -- optional tokens, which, if any are found, will be included; if
119 // 'Terminators' is empty, we will not include any extra tokens beyond comments
120 // and horizontal whitespace.
121 static SourceLocation
getEntityEndLoc(const SourceManager & SM,SourceLocation EntityLast,const std::set<tok::TokenKind> & Terminators,const LangOptions & LangOpts)122 getEntityEndLoc(const SourceManager &SM, SourceLocation EntityLast,
123 const std::set<tok::TokenKind> &Terminators,
124 const LangOptions &LangOpts) {
125 assert(EntityLast.isValid() && "Invalid end location found.");
126
127 // We remember the last location of a non-horizontal-whitespace token we have
128 // lexed; this is the location up to which we will want to delete.
129 // FIXME: Support using the spelling loc here for cases where we want to
130 // analyze the macro text.
131
132 CharSourceRange ExpansionRange = SM.getExpansionRange(EntityLast);
133 // FIXME: Should check isTokenRange(), for the (rare) case that
134 // `ExpansionRange` is a character range.
135 std::unique_ptr<Lexer> Lexer = [&]() {
136 bool Invalid = false;
137 auto FileOffset = SM.getDecomposedLoc(ExpansionRange.getEnd());
138 llvm::StringRef File = SM.getBufferData(FileOffset.first, &Invalid);
139 assert(!Invalid && "Cannot get file/offset");
140 return std::make_unique<clang::Lexer>(
141 SM.getLocForStartOfFile(FileOffset.first), LangOpts, File.begin(),
142 File.data() + FileOffset.second, File.end());
143 }();
144
145 // Tell Lexer to return whitespace as pseudo-tokens (kind is tok::unknown).
146 Lexer->SetKeepWhitespaceMode(true);
147
148 // Generally, the code we want to include looks like this ([] are optional),
149 // If Terminators is empty:
150 // [ <comment> ] [ <newline> ]
151 // Otherwise:
152 // ... <terminator> [ <comment> ] [ <newline> ]
153
154 Token Tok;
155 bool Terminated = false;
156
157 // First, lex to the current token (which is the last token of the range that
158 // is definitely associated with the decl). Then, we process the first token
159 // separately from the rest based on conditions that hold specifically for
160 // that first token.
161 //
162 // We do not search for a terminator if none is required or we've already
163 // encountered it. Otherwise, if the original `EntityLast` location was in a
164 // macro expansion, we don't have visibility into the text, so we assume we've
165 // already terminated. However, we note this assumption with
166 // `TerminatedByMacro`, because we'll want to handle it somewhat differently
167 // for the terminators semicolon and comma. These terminators can be safely
168 // associated with the entity when they appear after the macro -- extra
169 // semicolons have no effect on the program and a well-formed program won't
170 // have multiple commas in a row, so we're guaranteed that there is only one.
171 //
172 // FIXME: This handling of macros is more conservative than necessary. When
173 // the end of the expansion coincides with the end of the node, we can still
174 // safely analyze the code. But, it is more complicated, because we need to
175 // start by lexing the spelling loc for the first token and then switch to the
176 // expansion loc.
177 bool TerminatedByMacro = false;
178 Lexer->LexFromRawLexer(Tok);
179 if (Terminators.empty() || contains(Terminators, Tok))
180 Terminated = true;
181 else if (EntityLast.isMacroID()) {
182 Terminated = true;
183 TerminatedByMacro = true;
184 }
185
186 // We save the most recent candidate for the exclusive end location.
187 SourceLocation End = Tok.getEndLoc();
188
189 while (!Terminated) {
190 // Lex the next token we want to possibly expand the range with.
191 Lexer->LexFromRawLexer(Tok);
192
193 switch (Tok.getKind()) {
194 case tok::eof:
195 // Unexpected separators.
196 case tok::l_brace:
197 case tok::r_brace:
198 case tok::comma:
199 return End;
200 // Whitespace pseudo-tokens.
201 case tok::unknown:
202 if (startsWithNewline(SM, Tok))
203 // Include at least until the end of the line.
204 End = Tok.getEndLoc();
205 break;
206 default:
207 if (contains(Terminators, Tok))
208 Terminated = true;
209 End = Tok.getEndLoc();
210 break;
211 }
212 }
213
214 do {
215 // Lex the next token we want to possibly expand the range with.
216 Lexer->LexFromRawLexer(Tok);
217
218 switch (Tok.getKind()) {
219 case tok::unknown:
220 if (startsWithNewline(SM, Tok))
221 // We're done, but include this newline.
222 return Tok.getEndLoc();
223 break;
224 case tok::comment:
225 // Include any comments we find on the way.
226 End = Tok.getEndLoc();
227 break;
228 case tok::semi:
229 case tok::comma:
230 if (TerminatedByMacro && contains(Terminators, Tok)) {
231 End = Tok.getEndLoc();
232 // We've found a real terminator.
233 TerminatedByMacro = false;
234 break;
235 }
236 // Found an unrelated token; stop and don't include it.
237 return End;
238 default:
239 // Found an unrelated token; stop and don't include it.
240 return End;
241 }
242 } while (true);
243 }
244
245 // Returns the expected terminator tokens for the given declaration.
246 //
247 // If we do not know the correct terminator token, returns an empty set.
248 //
249 // There are cases where we have more than one possible terminator (for example,
250 // we find either a comma or a semicolon after a VarDecl).
getTerminators(const Decl & D)251 static std::set<tok::TokenKind> getTerminators(const Decl &D) {
252 if (llvm::isa<RecordDecl>(D) || llvm::isa<UsingDecl>(D))
253 return {tok::semi};
254
255 if (llvm::isa<FunctionDecl>(D) || llvm::isa<LinkageSpecDecl>(D))
256 return {tok::r_brace, tok::semi};
257
258 if (llvm::isa<VarDecl>(D) || llvm::isa<FieldDecl>(D))
259 return {tok::comma, tok::semi};
260
261 return {};
262 }
263
264 // Starting from `Loc`, skips whitespace up to, and including, a single
265 // newline. Returns the (exclusive) end of any skipped whitespace (that is, the
266 // location immediately after the whitespace).
skipWhitespaceAndNewline(const SourceManager & SM,SourceLocation Loc,const LangOptions & LangOpts)267 static SourceLocation skipWhitespaceAndNewline(const SourceManager &SM,
268 SourceLocation Loc,
269 const LangOptions &LangOpts) {
270 const char *LocChars = SM.getCharacterData(Loc);
271 int i = 0;
272 while (isHorizontalWhitespace(LocChars[i]))
273 ++i;
274 if (isVerticalWhitespace(LocChars[i]))
275 ++i;
276 return Loc.getLocWithOffset(i);
277 }
278
279 // Is `Loc` separated from any following decl by something meaningful (e.g. an
280 // empty line, a comment), ignoring horizontal whitespace? Since this is a
281 // heuristic, we return false when in doubt. `Loc` cannot be the first location
282 // in the file.
atOrBeforeSeparation(const SourceManager & SM,SourceLocation Loc,const LangOptions & LangOpts)283 static bool atOrBeforeSeparation(const SourceManager &SM, SourceLocation Loc,
284 const LangOptions &LangOpts) {
285 // If the preceding character is a newline, we'll check for an empty line as a
286 // separator. However, we can't identify an empty line using tokens, so we
287 // analyse the characters. If we try to use tokens, we'll just end up with a
288 // whitespace token, whose characters we'd have to analyse anyhow.
289 bool Invalid = false;
290 const char *LocChars =
291 SM.getCharacterData(Loc.getLocWithOffset(-1), &Invalid);
292 assert(!Invalid &&
293 "Loc must be a valid character and not the first of the source file.");
294 if (isVerticalWhitespace(LocChars[0])) {
295 for (int i = 1; isWhitespace(LocChars[i]); ++i)
296 if (isVerticalWhitespace(LocChars[i]))
297 return true;
298 }
299 // We didn't find an empty line, so lex the next token, skipping past any
300 // whitespace we just scanned.
301 Token Tok;
302 bool Failed = Lexer::getRawToken(Loc, Tok, SM, LangOpts,
303 /*IgnoreWhiteSpace=*/true);
304 if (Failed)
305 // Any text that confuses the lexer seems fair to consider a separation.
306 return true;
307
308 switch (Tok.getKind()) {
309 case tok::comment:
310 case tok::l_brace:
311 case tok::r_brace:
312 case tok::eof:
313 return true;
314 default:
315 return false;
316 }
317 }
318
getAssociatedRange(const Decl & Decl,ASTContext & Context)319 CharSourceRange tooling::getAssociatedRange(const Decl &Decl,
320 ASTContext &Context) {
321 const SourceManager &SM = Context.getSourceManager();
322 const LangOptions &LangOpts = Context.getLangOpts();
323 CharSourceRange Range = CharSourceRange::getTokenRange(Decl.getSourceRange());
324
325 // First, expand to the start of the template<> declaration if necessary.
326 if (const auto *Record = llvm::dyn_cast<CXXRecordDecl>(&Decl)) {
327 if (const auto *T = Record->getDescribedClassTemplate())
328 if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
329 Range.setBegin(T->getBeginLoc());
330 } else if (const auto *F = llvm::dyn_cast<FunctionDecl>(&Decl)) {
331 if (const auto *T = F->getDescribedFunctionTemplate())
332 if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
333 Range.setBegin(T->getBeginLoc());
334 }
335
336 // Next, expand the end location past trailing comments to include a potential
337 // newline at the end of the decl's line.
338 Range.setEnd(
339 getEntityEndLoc(SM, Decl.getEndLoc(), getTerminators(Decl), LangOpts));
340 Range.setTokenRange(false);
341
342 // Expand to include preceeding associated comments. We ignore any comments
343 // that are not preceeding the decl, since we've already skipped trailing
344 // comments with getEntityEndLoc.
345 if (const RawComment *Comment =
346 Decl.getASTContext().getRawCommentForDeclNoCache(&Decl))
347 // Only include a preceding comment if:
348 // * it is *not* separate from the declaration (not including any newline
349 // that immediately follows the comment),
350 // * the decl *is* separate from any following entity (so, there are no
351 // other entities the comment could refer to), and
352 // * it is not a IfThisThenThat lint check.
353 if (SM.isBeforeInTranslationUnit(Comment->getBeginLoc(),
354 Range.getBegin()) &&
355 !atOrBeforeSeparation(
356 SM, skipWhitespaceAndNewline(SM, Comment->getEndLoc(), LangOpts),
357 LangOpts) &&
358 atOrBeforeSeparation(SM, Range.getEnd(), LangOpts)) {
359 const StringRef CommentText = Comment->getRawText(SM);
360 if (!CommentText.contains("LINT.IfChange") &&
361 !CommentText.contains("LINT.ThenChange"))
362 Range.setBegin(Comment->getBeginLoc());
363 }
364 // Add leading attributes.
365 for (auto *Attr : Decl.attrs()) {
366 if (Attr->getLocation().isInvalid() ||
367 !SM.isBeforeInTranslationUnit(Attr->getLocation(), Range.getBegin()))
368 continue;
369 Range.setBegin(Attr->getLocation());
370
371 // Extend to the left '[[' or '__attribute((' if we saw the attribute,
372 // unless it is not a valid location.
373 bool Invalid;
374 StringRef Source =
375 SM.getBufferData(SM.getFileID(Range.getBegin()), &Invalid);
376 if (Invalid)
377 continue;
378 llvm::StringRef BeforeAttr =
379 Source.substr(0, SM.getFileOffset(Range.getBegin()));
380 llvm::StringRef BeforeAttrStripped = BeforeAttr.rtrim();
381
382 for (llvm::StringRef Prefix : {"[[", "__attribute__(("}) {
383 // Handle whitespace between attribute prefix and attribute value.
384 if (BeforeAttrStripped.endswith(Prefix)) {
385 // Move start to start position of prefix, which is
386 // length(BeforeAttr) - length(BeforeAttrStripped) + length(Prefix)
387 // positions to the left.
388 Range.setBegin(Range.getBegin().getLocWithOffset(static_cast<int>(
389 -BeforeAttr.size() + BeforeAttrStripped.size() - Prefix.size())));
390 break;
391 // If we didn't see '[[' or '__attribute' it's probably coming from a
392 // macro expansion which is already handled by makeFileCharRange(),
393 // below.
394 }
395 }
396 }
397
398 // Range.getEnd() is already fully un-expanded by getEntityEndLoc. But,
399 // Range.getBegin() may be inside an expansion.
400 return Lexer::makeFileCharRange(Range, SM, LangOpts);
401 }
402