• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines analysis_warnings::[Policy,Executor].
11 // Together they are used by Sema to issue warnings based on inexpensive
12 // static analysis algorithms in libAnalysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "clang/Sema/AnalysisBasedWarnings.h"
17 #include "clang/AST/DeclCXX.h"
18 #include "clang/AST/DeclObjC.h"
19 #include "clang/AST/EvaluatedExprVisitor.h"
20 #include "clang/AST/ExprCXX.h"
21 #include "clang/AST/ExprObjC.h"
22 #include "clang/AST/ParentMap.h"
23 #include "clang/AST/RecursiveASTVisitor.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/AST/StmtObjC.h"
26 #include "clang/AST/StmtVisitor.h"
27 #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
28 #include "clang/Analysis/Analyses/ReachableCode.h"
29 #include "clang/Analysis/Analyses/ThreadSafety.h"
30 #include "clang/Analysis/Analyses/UninitializedValues.h"
31 #include "clang/Analysis/AnalysisContext.h"
32 #include "clang/Analysis/CFG.h"
33 #include "clang/Analysis/CFGStmtMap.h"
34 #include "clang/Basic/SourceLocation.h"
35 #include "clang/Basic/SourceManager.h"
36 #include "clang/Lex/Lexer.h"
37 #include "clang/Lex/Preprocessor.h"
38 #include "clang/Sema/ScopeInfo.h"
39 #include "clang/Sema/SemaInternal.h"
40 #include "llvm/ADT/ArrayRef.h"
41 #include "llvm/ADT/BitVector.h"
42 #include "llvm/ADT/FoldingSet.h"
43 #include "llvm/ADT/ImmutableMap.h"
44 #include "llvm/ADT/MapVector.h"
45 #include "llvm/ADT/PostOrderIterator.h"
46 #include "llvm/ADT/SmallString.h"
47 #include "llvm/ADT/SmallVector.h"
48 #include "llvm/ADT/StringRef.h"
49 #include "llvm/Support/Casting.h"
50 #include <algorithm>
51 #include <deque>
52 #include <iterator>
53 #include <vector>
54 
55 using namespace clang;
56 
57 //===----------------------------------------------------------------------===//
58 // Unreachable code analysis.
59 //===----------------------------------------------------------------------===//
60 
61 namespace {
62   class UnreachableCodeHandler : public reachable_code::Callback {
63     Sema &S;
64   public:
UnreachableCodeHandler(Sema & s)65     UnreachableCodeHandler(Sema &s) : S(s) {}
66 
HandleUnreachable(SourceLocation L,SourceRange R1,SourceRange R2)67     void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) {
68       S.Diag(L, diag::warn_unreachable) << R1 << R2;
69     }
70   };
71 }
72 
73 /// CheckUnreachable - Check for unreachable code.
CheckUnreachable(Sema & S,AnalysisDeclContext & AC)74 static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
75   UnreachableCodeHandler UC(S);
76   reachable_code::FindUnreachableCode(AC, UC);
77 }
78 
79 //===----------------------------------------------------------------------===//
80 // Check for missing return value.
81 //===----------------------------------------------------------------------===//
82 
83 enum ControlFlowKind {
84   UnknownFallThrough,
85   NeverFallThrough,
86   MaybeFallThrough,
87   AlwaysFallThrough,
88   NeverFallThroughOrReturn
89 };
90 
91 /// CheckFallThrough - Check that we don't fall off the end of a
92 /// Statement that should return a value.
93 ///
94 /// \returns AlwaysFallThrough iff we always fall off the end of the statement,
95 /// MaybeFallThrough iff we might or might not fall off the end,
96 /// NeverFallThroughOrReturn iff we never fall off the end of the statement or
97 /// return.  We assume NeverFallThrough iff we never fall off the end of the
98 /// statement but we may return.  We assume that functions not marked noreturn
99 /// will return.
CheckFallThrough(AnalysisDeclContext & AC)100 static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
101   CFG *cfg = AC.getCFG();
102   if (cfg == 0) return UnknownFallThrough;
103 
104   // The CFG leaves in dead things, and we don't want the dead code paths to
105   // confuse us, so we mark all live things first.
106   llvm::BitVector live(cfg->getNumBlockIDs());
107   unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(),
108                                                           live);
109 
110   bool AddEHEdges = AC.getAddEHEdges();
111   if (!AddEHEdges && count != cfg->getNumBlockIDs())
112     // When there are things remaining dead, and we didn't add EH edges
113     // from CallExprs to the catch clauses, we have to go back and
114     // mark them as live.
115     for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
116       CFGBlock &b = **I;
117       if (!live[b.getBlockID()]) {
118         if (b.pred_begin() == b.pred_end()) {
119           if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator()))
120             // When not adding EH edges from calls, catch clauses
121             // can otherwise seem dead.  Avoid noting them as dead.
122             count += reachable_code::ScanReachableFromBlock(&b, live);
123           continue;
124         }
125       }
126     }
127 
128   // Now we know what is live, we check the live precessors of the exit block
129   // and look for fall through paths, being careful to ignore normal returns,
130   // and exceptional paths.
131   bool HasLiveReturn = false;
132   bool HasFakeEdge = false;
133   bool HasPlainEdge = false;
134   bool HasAbnormalEdge = false;
135 
136   // Ignore default cases that aren't likely to be reachable because all
137   // enums in a switch(X) have explicit case statements.
138   CFGBlock::FilterOptions FO;
139   FO.IgnoreDefaultsWithCoveredEnums = 1;
140 
141   for (CFGBlock::filtered_pred_iterator
142 	 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
143     const CFGBlock& B = **I;
144     if (!live[B.getBlockID()])
145       continue;
146 
147     // Skip blocks which contain an element marked as no-return. They don't
148     // represent actually viable edges into the exit block, so mark them as
149     // abnormal.
150     if (B.hasNoReturnElement()) {
151       HasAbnormalEdge = true;
152       continue;
153     }
154 
155     // Destructors can appear after the 'return' in the CFG.  This is
156     // normal.  We need to look pass the destructors for the return
157     // statement (if it exists).
158     CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend();
159 
160     for ( ; ri != re ; ++ri)
161       if (ri->getAs<CFGStmt>())
162         break;
163 
164     // No more CFGElements in the block?
165     if (ri == re) {
166       if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
167         HasAbnormalEdge = true;
168         continue;
169       }
170       // A labeled empty statement, or the entry block...
171       HasPlainEdge = true;
172       continue;
173     }
174 
175     CFGStmt CS = ri->castAs<CFGStmt>();
176     const Stmt *S = CS.getStmt();
177     if (isa<ReturnStmt>(S)) {
178       HasLiveReturn = true;
179       continue;
180     }
181     if (isa<ObjCAtThrowStmt>(S)) {
182       HasFakeEdge = true;
183       continue;
184     }
185     if (isa<CXXThrowExpr>(S)) {
186       HasFakeEdge = true;
187       continue;
188     }
189     if (isa<MSAsmStmt>(S)) {
190       // TODO: Verify this is correct.
191       HasFakeEdge = true;
192       HasLiveReturn = true;
193       continue;
194     }
195     if (isa<CXXTryStmt>(S)) {
196       HasAbnormalEdge = true;
197       continue;
198     }
199     if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit())
200         == B.succ_end()) {
201       HasAbnormalEdge = true;
202       continue;
203     }
204 
205     HasPlainEdge = true;
206   }
207   if (!HasPlainEdge) {
208     if (HasLiveReturn)
209       return NeverFallThrough;
210     return NeverFallThroughOrReturn;
211   }
212   if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn)
213     return MaybeFallThrough;
214   // This says AlwaysFallThrough for calls to functions that are not marked
215   // noreturn, that don't return.  If people would like this warning to be more
216   // accurate, such functions should be marked as noreturn.
217   return AlwaysFallThrough;
218 }
219 
220 namespace {
221 
222 struct CheckFallThroughDiagnostics {
223   unsigned diag_MaybeFallThrough_HasNoReturn;
224   unsigned diag_MaybeFallThrough_ReturnsNonVoid;
225   unsigned diag_AlwaysFallThrough_HasNoReturn;
226   unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
227   unsigned diag_NeverFallThroughOrReturn;
228   enum { Function, Block, Lambda } funMode;
229   SourceLocation FuncLoc;
230 
MakeForFunction__anon8cca75670211::CheckFallThroughDiagnostics231   static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
232     CheckFallThroughDiagnostics D;
233     D.FuncLoc = Func->getLocation();
234     D.diag_MaybeFallThrough_HasNoReturn =
235       diag::warn_falloff_noreturn_function;
236     D.diag_MaybeFallThrough_ReturnsNonVoid =
237       diag::warn_maybe_falloff_nonvoid_function;
238     D.diag_AlwaysFallThrough_HasNoReturn =
239       diag::warn_falloff_noreturn_function;
240     D.diag_AlwaysFallThrough_ReturnsNonVoid =
241       diag::warn_falloff_nonvoid_function;
242 
243     // Don't suggest that virtual functions be marked "noreturn", since they
244     // might be overridden by non-noreturn functions.
245     bool isVirtualMethod = false;
246     if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func))
247       isVirtualMethod = Method->isVirtual();
248 
249     // Don't suggest that template instantiations be marked "noreturn"
250     bool isTemplateInstantiation = false;
251     if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func))
252       isTemplateInstantiation = Function->isTemplateInstantiation();
253 
254     if (!isVirtualMethod && !isTemplateInstantiation)
255       D.diag_NeverFallThroughOrReturn =
256         diag::warn_suggest_noreturn_function;
257     else
258       D.diag_NeverFallThroughOrReturn = 0;
259 
260     D.funMode = Function;
261     return D;
262   }
263 
MakeForBlock__anon8cca75670211::CheckFallThroughDiagnostics264   static CheckFallThroughDiagnostics MakeForBlock() {
265     CheckFallThroughDiagnostics D;
266     D.diag_MaybeFallThrough_HasNoReturn =
267       diag::err_noreturn_block_has_return_expr;
268     D.diag_MaybeFallThrough_ReturnsNonVoid =
269       diag::err_maybe_falloff_nonvoid_block;
270     D.diag_AlwaysFallThrough_HasNoReturn =
271       diag::err_noreturn_block_has_return_expr;
272     D.diag_AlwaysFallThrough_ReturnsNonVoid =
273       diag::err_falloff_nonvoid_block;
274     D.diag_NeverFallThroughOrReturn =
275       diag::warn_suggest_noreturn_block;
276     D.funMode = Block;
277     return D;
278   }
279 
MakeForLambda__anon8cca75670211::CheckFallThroughDiagnostics280   static CheckFallThroughDiagnostics MakeForLambda() {
281     CheckFallThroughDiagnostics D;
282     D.diag_MaybeFallThrough_HasNoReturn =
283       diag::err_noreturn_lambda_has_return_expr;
284     D.diag_MaybeFallThrough_ReturnsNonVoid =
285       diag::warn_maybe_falloff_nonvoid_lambda;
286     D.diag_AlwaysFallThrough_HasNoReturn =
287       diag::err_noreturn_lambda_has_return_expr;
288     D.diag_AlwaysFallThrough_ReturnsNonVoid =
289       diag::warn_falloff_nonvoid_lambda;
290     D.diag_NeverFallThroughOrReturn = 0;
291     D.funMode = Lambda;
292     return D;
293   }
294 
checkDiagnostics__anon8cca75670211::CheckFallThroughDiagnostics295   bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid,
296                         bool HasNoReturn) const {
297     if (funMode == Function) {
298       return (ReturnsVoid ||
299               D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function,
300                                    FuncLoc) == DiagnosticsEngine::Ignored)
301         && (!HasNoReturn ||
302             D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr,
303                                  FuncLoc) == DiagnosticsEngine::Ignored)
304         && (!ReturnsVoid ||
305             D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
306               == DiagnosticsEngine::Ignored);
307     }
308 
309     // For blocks / lambdas.
310     return ReturnsVoid && !HasNoReturn
311             && ((funMode == Lambda) ||
312                 D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
313                   == DiagnosticsEngine::Ignored);
314   }
315 };
316 
317 }
318 
319 /// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
320 /// function that should return a value.  Check that we don't fall off the end
321 /// of a noreturn function.  We assume that functions and blocks not marked
322 /// noreturn will return.
CheckFallThroughForBody(Sema & S,const Decl * D,const Stmt * Body,const BlockExpr * blkExpr,const CheckFallThroughDiagnostics & CD,AnalysisDeclContext & AC)323 static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
324                                     const BlockExpr *blkExpr,
325                                     const CheckFallThroughDiagnostics& CD,
326                                     AnalysisDeclContext &AC) {
327 
328   bool ReturnsVoid = false;
329   bool HasNoReturn = false;
330 
331   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
332     ReturnsVoid = FD->getResultType()->isVoidType();
333     HasNoReturn = FD->isNoReturn();
334   }
335   else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
336     ReturnsVoid = MD->getResultType()->isVoidType();
337     HasNoReturn = MD->hasAttr<NoReturnAttr>();
338   }
339   else if (isa<BlockDecl>(D)) {
340     QualType BlockTy = blkExpr->getType();
341     if (const FunctionType *FT =
342           BlockTy->getPointeeType()->getAs<FunctionType>()) {
343       if (FT->getResultType()->isVoidType())
344         ReturnsVoid = true;
345       if (FT->getNoReturnAttr())
346         HasNoReturn = true;
347     }
348   }
349 
350   DiagnosticsEngine &Diags = S.getDiagnostics();
351 
352   // Short circuit for compilation speed.
353   if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
354       return;
355 
356   // FIXME: Function try block
357   if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
358     switch (CheckFallThrough(AC)) {
359       case UnknownFallThrough:
360         break;
361 
362       case MaybeFallThrough:
363         if (HasNoReturn)
364           S.Diag(Compound->getRBracLoc(),
365                  CD.diag_MaybeFallThrough_HasNoReturn);
366         else if (!ReturnsVoid)
367           S.Diag(Compound->getRBracLoc(),
368                  CD.diag_MaybeFallThrough_ReturnsNonVoid);
369         break;
370       case AlwaysFallThrough:
371         if (HasNoReturn)
372           S.Diag(Compound->getRBracLoc(),
373                  CD.diag_AlwaysFallThrough_HasNoReturn);
374         else if (!ReturnsVoid)
375           S.Diag(Compound->getRBracLoc(),
376                  CD.diag_AlwaysFallThrough_ReturnsNonVoid);
377         break;
378       case NeverFallThroughOrReturn:
379         if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) {
380           if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
381             S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
382               << 0 << FD;
383           } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
384             S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
385               << 1 << MD;
386           } else {
387             S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn);
388           }
389         }
390         break;
391       case NeverFallThrough:
392         break;
393     }
394   }
395 }
396 
397 //===----------------------------------------------------------------------===//
398 // -Wuninitialized
399 //===----------------------------------------------------------------------===//
400 
401 namespace {
402 /// ContainsReference - A visitor class to search for references to
403 /// a particular declaration (the needle) within any evaluated component of an
404 /// expression (recursively).
405 class ContainsReference : public EvaluatedExprVisitor<ContainsReference> {
406   bool FoundReference;
407   const DeclRefExpr *Needle;
408 
409 public:
ContainsReference(ASTContext & Context,const DeclRefExpr * Needle)410   ContainsReference(ASTContext &Context, const DeclRefExpr *Needle)
411     : EvaluatedExprVisitor<ContainsReference>(Context),
412       FoundReference(false), Needle(Needle) {}
413 
VisitExpr(Expr * E)414   void VisitExpr(Expr *E) {
415     // Stop evaluating if we already have a reference.
416     if (FoundReference)
417       return;
418 
419     EvaluatedExprVisitor<ContainsReference>::VisitExpr(E);
420   }
421 
VisitDeclRefExpr(DeclRefExpr * E)422   void VisitDeclRefExpr(DeclRefExpr *E) {
423     if (E == Needle)
424       FoundReference = true;
425     else
426       EvaluatedExprVisitor<ContainsReference>::VisitDeclRefExpr(E);
427   }
428 
doesContainReference() const429   bool doesContainReference() const { return FoundReference; }
430 };
431 }
432 
SuggestInitializationFixit(Sema & S,const VarDecl * VD)433 static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
434   QualType VariableTy = VD->getType().getCanonicalType();
435   if (VariableTy->isBlockPointerType() &&
436       !VD->hasAttr<BlocksAttr>()) {
437     S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName()
438     << FixItHint::CreateInsertion(VD->getLocation(), "__block ");
439     return true;
440   }
441 
442   // Don't issue a fixit if there is already an initializer.
443   if (VD->getInit())
444     return false;
445 
446   // Suggest possible initialization (if any).
447   std::string Init = S.getFixItZeroInitializerForType(VariableTy);
448   if (Init.empty())
449     return false;
450 
451   // Don't suggest a fixit inside macros.
452   if (VD->getLocEnd().isMacroID())
453     return false;
454 
455   SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd());
456 
457   S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
458     << FixItHint::CreateInsertion(Loc, Init);
459   return true;
460 }
461 
462 /// Create a fixit to remove an if-like statement, on the assumption that its
463 /// condition is CondVal.
CreateIfFixit(Sema & S,const Stmt * If,const Stmt * Then,const Stmt * Else,bool CondVal,FixItHint & Fixit1,FixItHint & Fixit2)464 static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then,
465                           const Stmt *Else, bool CondVal,
466                           FixItHint &Fixit1, FixItHint &Fixit2) {
467   if (CondVal) {
468     // If condition is always true, remove all but the 'then'.
469     Fixit1 = FixItHint::CreateRemoval(
470         CharSourceRange::getCharRange(If->getLocStart(),
471                                       Then->getLocStart()));
472     if (Else) {
473       SourceLocation ElseKwLoc = Lexer::getLocForEndOfToken(
474           Then->getLocEnd(), 0, S.getSourceManager(), S.getLangOpts());
475       Fixit2 = FixItHint::CreateRemoval(
476           SourceRange(ElseKwLoc, Else->getLocEnd()));
477     }
478   } else {
479     // If condition is always false, remove all but the 'else'.
480     if (Else)
481       Fixit1 = FixItHint::CreateRemoval(
482           CharSourceRange::getCharRange(If->getLocStart(),
483                                         Else->getLocStart()));
484     else
485       Fixit1 = FixItHint::CreateRemoval(If->getSourceRange());
486   }
487 }
488 
489 /// DiagUninitUse -- Helper function to produce a diagnostic for an
490 /// uninitialized use of a variable.
DiagUninitUse(Sema & S,const VarDecl * VD,const UninitUse & Use,bool IsCapturedByBlock)491 static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
492                           bool IsCapturedByBlock) {
493   bool Diagnosed = false;
494 
495   // Diagnose each branch which leads to a sometimes-uninitialized use.
496   for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end();
497        I != E; ++I) {
498     assert(Use.getKind() == UninitUse::Sometimes);
499 
500     const Expr *User = Use.getUser();
501     const Stmt *Term = I->Terminator;
502 
503     // Information used when building the diagnostic.
504     unsigned DiagKind;
505     StringRef Str;
506     SourceRange Range;
507 
508     // FixIts to suppress the diagnostic by removing the dead condition.
509     // For all binary terminators, branch 0 is taken if the condition is true,
510     // and branch 1 is taken if the condition is false.
511     int RemoveDiagKind = -1;
512     const char *FixitStr =
513         S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false")
514                                   : (I->Output ? "1" : "0");
515     FixItHint Fixit1, Fixit2;
516 
517     switch (Term->getStmtClass()) {
518     default:
519       // Don't know how to report this. Just fall back to 'may be used
520       // uninitialized'. This happens for range-based for, which the user
521       // can't explicitly fix.
522       // FIXME: This also happens if the first use of a variable is always
523       // uninitialized, eg "for (int n; n < 10; ++n)". We should report that
524       // with the 'is uninitialized' diagnostic.
525       continue;
526 
527     // "condition is true / condition is false".
528     case Stmt::IfStmtClass: {
529       const IfStmt *IS = cast<IfStmt>(Term);
530       DiagKind = 0;
531       Str = "if";
532       Range = IS->getCond()->getSourceRange();
533       RemoveDiagKind = 0;
534       CreateIfFixit(S, IS, IS->getThen(), IS->getElse(),
535                     I->Output, Fixit1, Fixit2);
536       break;
537     }
538     case Stmt::ConditionalOperatorClass: {
539       const ConditionalOperator *CO = cast<ConditionalOperator>(Term);
540       DiagKind = 0;
541       Str = "?:";
542       Range = CO->getCond()->getSourceRange();
543       RemoveDiagKind = 0;
544       CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(),
545                     I->Output, Fixit1, Fixit2);
546       break;
547     }
548     case Stmt::BinaryOperatorClass: {
549       const BinaryOperator *BO = cast<BinaryOperator>(Term);
550       if (!BO->isLogicalOp())
551         continue;
552       DiagKind = 0;
553       Str = BO->getOpcodeStr();
554       Range = BO->getLHS()->getSourceRange();
555       RemoveDiagKind = 0;
556       if ((BO->getOpcode() == BO_LAnd && I->Output) ||
557           (BO->getOpcode() == BO_LOr && !I->Output))
558         // true && y -> y, false || y -> y.
559         Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(),
560                                                       BO->getOperatorLoc()));
561       else
562         // false && y -> false, true || y -> true.
563         Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr);
564       break;
565     }
566 
567     // "loop is entered / loop is exited".
568     case Stmt::WhileStmtClass:
569       DiagKind = 1;
570       Str = "while";
571       Range = cast<WhileStmt>(Term)->getCond()->getSourceRange();
572       RemoveDiagKind = 1;
573       Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
574       break;
575     case Stmt::ForStmtClass:
576       DiagKind = 1;
577       Str = "for";
578       Range = cast<ForStmt>(Term)->getCond()->getSourceRange();
579       RemoveDiagKind = 1;
580       if (I->Output)
581         Fixit1 = FixItHint::CreateRemoval(Range);
582       else
583         Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
584       break;
585 
586     // "condition is true / loop is exited".
587     case Stmt::DoStmtClass:
588       DiagKind = 2;
589       Str = "do";
590       Range = cast<DoStmt>(Term)->getCond()->getSourceRange();
591       RemoveDiagKind = 1;
592       Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
593       break;
594 
595     // "switch case is taken".
596     case Stmt::CaseStmtClass:
597       DiagKind = 3;
598       Str = "case";
599       Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange();
600       break;
601     case Stmt::DefaultStmtClass:
602       DiagKind = 3;
603       Str = "default";
604       Range = cast<DefaultStmt>(Term)->getDefaultLoc();
605       break;
606     }
607 
608     S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var)
609       << VD->getDeclName() << IsCapturedByBlock << DiagKind
610       << Str << I->Output << Range;
611     S.Diag(User->getLocStart(), diag::note_uninit_var_use)
612       << IsCapturedByBlock << User->getSourceRange();
613     if (RemoveDiagKind != -1)
614       S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond)
615         << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2;
616 
617     Diagnosed = true;
618   }
619 
620   if (!Diagnosed)
621     S.Diag(Use.getUser()->getLocStart(),
622            Use.getKind() == UninitUse::Always ? diag::warn_uninit_var
623                                               : diag::warn_maybe_uninit_var)
624         << VD->getDeclName() << IsCapturedByBlock
625         << Use.getUser()->getSourceRange();
626 }
627 
628 /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
629 /// uninitialized variable. This manages the different forms of diagnostic
630 /// emitted for particular types of uses. Returns true if the use was diagnosed
631 /// as a warning. If a particular use is one we omit warnings for, returns
632 /// false.
DiagnoseUninitializedUse(Sema & S,const VarDecl * VD,const UninitUse & Use,bool alwaysReportSelfInit=false)633 static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
634                                      const UninitUse &Use,
635                                      bool alwaysReportSelfInit = false) {
636 
637   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) {
638     // Inspect the initializer of the variable declaration which is
639     // being referenced prior to its initialization. We emit
640     // specialized diagnostics for self-initialization, and we
641     // specifically avoid warning about self references which take the
642     // form of:
643     //
644     //   int x = x;
645     //
646     // This is used to indicate to GCC that 'x' is intentionally left
647     // uninitialized. Proven code paths which access 'x' in
648     // an uninitialized state after this will still warn.
649     if (const Expr *Initializer = VD->getInit()) {
650       if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts())
651         return false;
652 
653       ContainsReference CR(S.Context, DRE);
654       CR.Visit(const_cast<Expr*>(Initializer));
655       if (CR.doesContainReference()) {
656         S.Diag(DRE->getLocStart(),
657                diag::warn_uninit_self_reference_in_init)
658           << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
659         return true;
660       }
661     }
662 
663     DiagUninitUse(S, VD, Use, false);
664   } else {
665     const BlockExpr *BE = cast<BlockExpr>(Use.getUser());
666     if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>())
667       S.Diag(BE->getLocStart(),
668              diag::warn_uninit_byref_blockvar_captured_by_block)
669         << VD->getDeclName();
670     else
671       DiagUninitUse(S, VD, Use, true);
672   }
673 
674   // Report where the variable was declared when the use wasn't within
675   // the initializer of that declaration & we didn't already suggest
676   // an initialization fixit.
677   if (!SuggestInitializationFixit(S, VD))
678     S.Diag(VD->getLocStart(), diag::note_uninit_var_def)
679       << VD->getDeclName();
680 
681   return true;
682 }
683 
684 namespace {
685   class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> {
686   public:
FallthroughMapper(Sema & S)687     FallthroughMapper(Sema &S)
688       : FoundSwitchStatements(false),
689         S(S) {
690     }
691 
foundSwitchStatements() const692     bool foundSwitchStatements() const { return FoundSwitchStatements; }
693 
markFallthroughVisited(const AttributedStmt * Stmt)694     void markFallthroughVisited(const AttributedStmt *Stmt) {
695       bool Found = FallthroughStmts.erase(Stmt);
696       assert(Found);
697       (void)Found;
698     }
699 
700     typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts;
701 
getFallthroughStmts() const702     const AttrStmts &getFallthroughStmts() const {
703       return FallthroughStmts;
704     }
705 
fillReachableBlocks(CFG * Cfg)706     void fillReachableBlocks(CFG *Cfg) {
707       assert(ReachableBlocks.empty() && "ReachableBlocks already filled");
708       std::deque<const CFGBlock *> BlockQueue;
709 
710       ReachableBlocks.insert(&Cfg->getEntry());
711       BlockQueue.push_back(&Cfg->getEntry());
712       // Mark all case blocks reachable to avoid problems with switching on
713       // constants, covered enums, etc.
714       // These blocks can contain fall-through annotations, and we don't want to
715       // issue a warn_fallthrough_attr_unreachable for them.
716       for (CFG::iterator I = Cfg->begin(), E = Cfg->end(); I != E; ++I) {
717         const CFGBlock *B = *I;
718         const Stmt *L = B->getLabel();
719         if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B))
720           BlockQueue.push_back(B);
721       }
722 
723       while (!BlockQueue.empty()) {
724         const CFGBlock *P = BlockQueue.front();
725         BlockQueue.pop_front();
726         for (CFGBlock::const_succ_iterator I = P->succ_begin(),
727                                            E = P->succ_end();
728              I != E; ++I) {
729           if (*I && ReachableBlocks.insert(*I))
730             BlockQueue.push_back(*I);
731         }
732       }
733     }
734 
checkFallThroughIntoBlock(const CFGBlock & B,int & AnnotatedCnt)735     bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) {
736       assert(!ReachableBlocks.empty() && "ReachableBlocks empty");
737 
738       int UnannotatedCnt = 0;
739       AnnotatedCnt = 0;
740 
741       std::deque<const CFGBlock*> BlockQueue;
742 
743       std::copy(B.pred_begin(), B.pred_end(), std::back_inserter(BlockQueue));
744 
745       while (!BlockQueue.empty()) {
746         const CFGBlock *P = BlockQueue.front();
747         BlockQueue.pop_front();
748 
749         const Stmt *Term = P->getTerminator();
750         if (Term && isa<SwitchStmt>(Term))
751           continue; // Switch statement, good.
752 
753         const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel());
754         if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end())
755           continue; // Previous case label has no statements, good.
756 
757         const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel());
758         if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end())
759           continue; // Case label is preceded with a normal label, good.
760 
761         if (!ReachableBlocks.count(P)) {
762           for (CFGBlock::const_reverse_iterator ElemIt = P->rbegin(),
763                                                 ElemEnd = P->rend();
764                ElemIt != ElemEnd; ++ElemIt) {
765             if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) {
766               if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) {
767                 S.Diag(AS->getLocStart(),
768                        diag::warn_fallthrough_attr_unreachable);
769                 markFallthroughVisited(AS);
770                 ++AnnotatedCnt;
771                 break;
772               }
773               // Don't care about other unreachable statements.
774             }
775           }
776           // If there are no unreachable statements, this may be a special
777           // case in CFG:
778           // case X: {
779           //    A a;  // A has a destructor.
780           //    break;
781           // }
782           // // <<<< This place is represented by a 'hanging' CFG block.
783           // case Y:
784           continue;
785         }
786 
787         const Stmt *LastStmt = getLastStmt(*P);
788         if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) {
789           markFallthroughVisited(AS);
790           ++AnnotatedCnt;
791           continue; // Fallthrough annotation, good.
792         }
793 
794         if (!LastStmt) { // This block contains no executable statements.
795           // Traverse its predecessors.
796           std::copy(P->pred_begin(), P->pred_end(),
797                     std::back_inserter(BlockQueue));
798           continue;
799         }
800 
801         ++UnannotatedCnt;
802       }
803       return !!UnannotatedCnt;
804     }
805 
806     // RecursiveASTVisitor setup.
shouldWalkTypesOfTypeLocs() const807     bool shouldWalkTypesOfTypeLocs() const { return false; }
808 
VisitAttributedStmt(AttributedStmt * S)809     bool VisitAttributedStmt(AttributedStmt *S) {
810       if (asFallThroughAttr(S))
811         FallthroughStmts.insert(S);
812       return true;
813     }
814 
VisitSwitchStmt(SwitchStmt * S)815     bool VisitSwitchStmt(SwitchStmt *S) {
816       FoundSwitchStatements = true;
817       return true;
818     }
819 
820     // We don't want to traverse local type declarations. We analyze their
821     // methods separately.
TraverseDecl(Decl * D)822     bool TraverseDecl(Decl *D) { return true; }
823 
824   private:
825 
asFallThroughAttr(const Stmt * S)826     static const AttributedStmt *asFallThroughAttr(const Stmt *S) {
827       if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) {
828         if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs()))
829           return AS;
830       }
831       return 0;
832     }
833 
getLastStmt(const CFGBlock & B)834     static const Stmt *getLastStmt(const CFGBlock &B) {
835       if (const Stmt *Term = B.getTerminator())
836         return Term;
837       for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(),
838                                             ElemEnd = B.rend();
839                                             ElemIt != ElemEnd; ++ElemIt) {
840         if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>())
841           return CS->getStmt();
842       }
843       // Workaround to detect a statement thrown out by CFGBuilder:
844       //   case X: {} case Y:
845       //   case X: ; case Y:
846       if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel()))
847         if (!isa<SwitchCase>(SW->getSubStmt()))
848           return SW->getSubStmt();
849 
850       return 0;
851     }
852 
853     bool FoundSwitchStatements;
854     AttrStmts FallthroughStmts;
855     Sema &S;
856     llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks;
857   };
858 }
859 
DiagnoseSwitchLabelsFallthrough(Sema & S,AnalysisDeclContext & AC,bool PerFunction)860 static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
861                                             bool PerFunction) {
862   // Only perform this analysis when using C++11.  There is no good workflow
863   // for this warning when not using C++11.  There is no good way to silence
864   // the warning (no attribute is available) unless we are using C++11's support
865   // for generalized attributes.  Once could use pragmas to silence the warning,
866   // but as a general solution that is gross and not in the spirit of this
867   // warning.
868   //
869   // NOTE: This an intermediate solution.  There are on-going discussions on
870   // how to properly support this warning outside of C++11 with an annotation.
871   if (!AC.getASTContext().getLangOpts().CPlusPlus11)
872     return;
873 
874   FallthroughMapper FM(S);
875   FM.TraverseStmt(AC.getBody());
876 
877   if (!FM.foundSwitchStatements())
878     return;
879 
880   if (PerFunction && FM.getFallthroughStmts().empty())
881     return;
882 
883   CFG *Cfg = AC.getCFG();
884 
885   if (!Cfg)
886     return;
887 
888   FM.fillReachableBlocks(Cfg);
889 
890   for (CFG::reverse_iterator I = Cfg->rbegin(), E = Cfg->rend(); I != E; ++I) {
891     const CFGBlock *B = *I;
892     const Stmt *Label = B->getLabel();
893 
894     if (!Label || !isa<SwitchCase>(Label))
895       continue;
896 
897     int AnnotatedCnt;
898 
899     if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt))
900       continue;
901 
902     S.Diag(Label->getLocStart(),
903         PerFunction ? diag::warn_unannotated_fallthrough_per_function
904                     : diag::warn_unannotated_fallthrough);
905 
906     if (!AnnotatedCnt) {
907       SourceLocation L = Label->getLocStart();
908       if (L.isMacroID())
909         continue;
910       if (S.getLangOpts().CPlusPlus11) {
911         const Stmt *Term = B->getTerminator();
912         // Skip empty cases.
913         while (B->empty() && !Term && B->succ_size() == 1) {
914           B = *B->succ_begin();
915           Term = B->getTerminator();
916         }
917         if (!(B->empty() && Term && isa<BreakStmt>(Term))) {
918           Preprocessor &PP = S.getPreprocessor();
919           TokenValue Tokens[] = {
920             tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"),
921             tok::coloncolon, PP.getIdentifierInfo("fallthrough"),
922             tok::r_square, tok::r_square
923           };
924           StringRef AnnotationSpelling = "[[clang::fallthrough]]";
925           StringRef MacroName = PP.getLastMacroWithSpelling(L, Tokens);
926           if (!MacroName.empty())
927             AnnotationSpelling = MacroName;
928           SmallString<64> TextToInsert(AnnotationSpelling);
929           TextToInsert += "; ";
930           S.Diag(L, diag::note_insert_fallthrough_fixit) <<
931               AnnotationSpelling <<
932               FixItHint::CreateInsertion(L, TextToInsert);
933         }
934       }
935       S.Diag(L, diag::note_insert_break_fixit) <<
936         FixItHint::CreateInsertion(L, "break; ");
937     }
938   }
939 
940   const FallthroughMapper::AttrStmts &Fallthroughs = FM.getFallthroughStmts();
941   for (FallthroughMapper::AttrStmts::const_iterator I = Fallthroughs.begin(),
942                                                     E = Fallthroughs.end();
943                                                     I != E; ++I) {
944     S.Diag((*I)->getLocStart(), diag::warn_fallthrough_attr_invalid_placement);
945   }
946 
947 }
948 
949 namespace {
950 typedef std::pair<const Stmt *,
951                   sema::FunctionScopeInfo::WeakObjectUseMap::const_iterator>
952         StmtUsesPair;
953 
954 class StmtUseSorter {
955   const SourceManager &SM;
956 
957 public:
StmtUseSorter(const SourceManager & SM)958   explicit StmtUseSorter(const SourceManager &SM) : SM(SM) { }
959 
operator ()(const StmtUsesPair & LHS,const StmtUsesPair & RHS)960   bool operator()(const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
961     return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
962                                         RHS.first->getLocStart());
963   }
964 };
965 }
966 
isInLoop(const ASTContext & Ctx,const ParentMap & PM,const Stmt * S)967 static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
968                      const Stmt *S) {
969   assert(S);
970 
971   do {
972     switch (S->getStmtClass()) {
973     case Stmt::ForStmtClass:
974     case Stmt::WhileStmtClass:
975     case Stmt::CXXForRangeStmtClass:
976     case Stmt::ObjCForCollectionStmtClass:
977       return true;
978     case Stmt::DoStmtClass: {
979       const Expr *Cond = cast<DoStmt>(S)->getCond();
980       llvm::APSInt Val;
981       if (!Cond->EvaluateAsInt(Val, Ctx))
982         return true;
983       return Val.getBoolValue();
984     }
985     default:
986       break;
987     }
988   } while ((S = PM.getParent(S)));
989 
990   return false;
991 }
992 
993 
diagnoseRepeatedUseOfWeak(Sema & S,const sema::FunctionScopeInfo * CurFn,const Decl * D,const ParentMap & PM)994 static void diagnoseRepeatedUseOfWeak(Sema &S,
995                                       const sema::FunctionScopeInfo *CurFn,
996                                       const Decl *D,
997                                       const ParentMap &PM) {
998   typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy;
999   typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap;
1000   typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector;
1001 
1002   ASTContext &Ctx = S.getASTContext();
1003 
1004   const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses();
1005 
1006   // Extract all weak objects that are referenced more than once.
1007   SmallVector<StmtUsesPair, 8> UsesByStmt;
1008   for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end();
1009        I != E; ++I) {
1010     const WeakUseVector &Uses = I->second;
1011 
1012     // Find the first read of the weak object.
1013     WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end();
1014     for ( ; UI != UE; ++UI) {
1015       if (UI->isUnsafe())
1016         break;
1017     }
1018 
1019     // If there were only writes to this object, don't warn.
1020     if (UI == UE)
1021       continue;
1022 
1023     // If there was only one read, followed by any number of writes, and the
1024     // read is not within a loop, don't warn. Additionally, don't warn in a
1025     // loop if the base object is a local variable -- local variables are often
1026     // changed in loops.
1027     if (UI == Uses.begin()) {
1028       WeakUseVector::const_iterator UI2 = UI;
1029       for (++UI2; UI2 != UE; ++UI2)
1030         if (UI2->isUnsafe())
1031           break;
1032 
1033       if (UI2 == UE) {
1034         if (!isInLoop(Ctx, PM, UI->getUseExpr()))
1035           continue;
1036 
1037         const WeakObjectProfileTy &Profile = I->first;
1038         if (!Profile.isExactProfile())
1039           continue;
1040 
1041         const NamedDecl *Base = Profile.getBase();
1042         if (!Base)
1043           Base = Profile.getProperty();
1044         assert(Base && "A profile always has a base or property.");
1045 
1046         if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base))
1047           if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base))
1048             continue;
1049       }
1050     }
1051 
1052     UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I));
1053   }
1054 
1055   if (UsesByStmt.empty())
1056     return;
1057 
1058   // Sort by first use so that we emit the warnings in a deterministic order.
1059   std::sort(UsesByStmt.begin(), UsesByStmt.end(),
1060             StmtUseSorter(S.getSourceManager()));
1061 
1062   // Classify the current code body for better warning text.
1063   // This enum should stay in sync with the cases in
1064   // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1065   // FIXME: Should we use a common classification enum and the same set of
1066   // possibilities all throughout Sema?
1067   enum {
1068     Function,
1069     Method,
1070     Block,
1071     Lambda
1072   } FunctionKind;
1073 
1074   if (isa<sema::BlockScopeInfo>(CurFn))
1075     FunctionKind = Block;
1076   else if (isa<sema::LambdaScopeInfo>(CurFn))
1077     FunctionKind = Lambda;
1078   else if (isa<ObjCMethodDecl>(D))
1079     FunctionKind = Method;
1080   else
1081     FunctionKind = Function;
1082 
1083   // Iterate through the sorted problems and emit warnings for each.
1084   for (SmallVectorImpl<StmtUsesPair>::const_iterator I = UsesByStmt.begin(),
1085                                                      E = UsesByStmt.end();
1086        I != E; ++I) {
1087     const Stmt *FirstRead = I->first;
1088     const WeakObjectProfileTy &Key = I->second->first;
1089     const WeakUseVector &Uses = I->second->second;
1090 
1091     // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy
1092     // may not contain enough information to determine that these are different
1093     // properties. We can only be 100% sure of a repeated use in certain cases,
1094     // and we adjust the diagnostic kind accordingly so that the less certain
1095     // case can be turned off if it is too noisy.
1096     unsigned DiagKind;
1097     if (Key.isExactProfile())
1098       DiagKind = diag::warn_arc_repeated_use_of_weak;
1099     else
1100       DiagKind = diag::warn_arc_possible_repeated_use_of_weak;
1101 
1102     // Classify the weak object being accessed for better warning text.
1103     // This enum should stay in sync with the cases in
1104     // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1105     enum {
1106       Variable,
1107       Property,
1108       ImplicitProperty,
1109       Ivar
1110     } ObjectKind;
1111 
1112     const NamedDecl *D = Key.getProperty();
1113     if (isa<VarDecl>(D))
1114       ObjectKind = Variable;
1115     else if (isa<ObjCPropertyDecl>(D))
1116       ObjectKind = Property;
1117     else if (isa<ObjCMethodDecl>(D))
1118       ObjectKind = ImplicitProperty;
1119     else if (isa<ObjCIvarDecl>(D))
1120       ObjectKind = Ivar;
1121     else
1122       llvm_unreachable("Unexpected weak object kind!");
1123 
1124     // Show the first time the object was read.
1125     S.Diag(FirstRead->getLocStart(), DiagKind)
1126       << int(ObjectKind) << D << int(FunctionKind)
1127       << FirstRead->getSourceRange();
1128 
1129     // Print all the other accesses as notes.
1130     for (WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end();
1131          UI != UE; ++UI) {
1132       if (UI->getUseExpr() == FirstRead)
1133         continue;
1134       S.Diag(UI->getUseExpr()->getLocStart(),
1135              diag::note_arc_weak_also_accessed_here)
1136         << UI->getUseExpr()->getSourceRange();
1137     }
1138   }
1139 }
1140 
1141 
1142 namespace {
1143 struct SLocSort {
operator ()__anon8cca75670911::SLocSort1144   bool operator()(const UninitUse &a, const UninitUse &b) {
1145     // Prefer a more confident report over a less confident one.
1146     if (a.getKind() != b.getKind())
1147       return a.getKind() > b.getKind();
1148     SourceLocation aLoc = a.getUser()->getLocStart();
1149     SourceLocation bLoc = b.getUser()->getLocStart();
1150     return aLoc.getRawEncoding() < bLoc.getRawEncoding();
1151   }
1152 };
1153 
1154 class UninitValsDiagReporter : public UninitVariablesHandler {
1155   Sema &S;
1156   typedef SmallVector<UninitUse, 2> UsesVec;
1157   typedef llvm::PointerIntPair<UsesVec *, 1, bool> MappedType;
1158   // Prefer using MapVector to DenseMap, so that iteration order will be
1159   // the same as insertion order. This is needed to obtain a deterministic
1160   // order of diagnostics when calling flushDiagnostics().
1161   typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap;
1162   UsesMap *uses;
1163 
1164 public:
UninitValsDiagReporter(Sema & S)1165   UninitValsDiagReporter(Sema &S) : S(S), uses(0) {}
~UninitValsDiagReporter()1166   ~UninitValsDiagReporter() {
1167     flushDiagnostics();
1168   }
1169 
getUses(const VarDecl * vd)1170   MappedType &getUses(const VarDecl *vd) {
1171     if (!uses)
1172       uses = new UsesMap();
1173 
1174     MappedType &V = (*uses)[vd];
1175     if (!V.getPointer())
1176       V.setPointer(new UsesVec());
1177 
1178     return V;
1179   }
1180 
handleUseOfUninitVariable(const VarDecl * vd,const UninitUse & use)1181   void handleUseOfUninitVariable(const VarDecl *vd, const UninitUse &use) {
1182     getUses(vd).getPointer()->push_back(use);
1183   }
1184 
handleSelfInit(const VarDecl * vd)1185   void handleSelfInit(const VarDecl *vd) {
1186     getUses(vd).setInt(true);
1187   }
1188 
flushDiagnostics()1189   void flushDiagnostics() {
1190     if (!uses)
1191       return;
1192 
1193     for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) {
1194       const VarDecl *vd = i->first;
1195       const MappedType &V = i->second;
1196 
1197       UsesVec *vec = V.getPointer();
1198       bool hasSelfInit = V.getInt();
1199 
1200       // Specially handle the case where we have uses of an uninitialized
1201       // variable, but the root cause is an idiomatic self-init.  We want
1202       // to report the diagnostic at the self-init since that is the root cause.
1203       if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
1204         DiagnoseUninitializedUse(S, vd,
1205                                  UninitUse(vd->getInit()->IgnoreParenCasts(),
1206                                            /* isAlwaysUninit */ true),
1207                                  /* alwaysReportSelfInit */ true);
1208       else {
1209         // Sort the uses by their SourceLocations.  While not strictly
1210         // guaranteed to produce them in line/column order, this will provide
1211         // a stable ordering.
1212         std::sort(vec->begin(), vec->end(), SLocSort());
1213 
1214         for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve;
1215              ++vi) {
1216           // If we have self-init, downgrade all uses to 'may be uninitialized'.
1217           UninitUse Use = hasSelfInit ? UninitUse(vi->getUser(), false) : *vi;
1218 
1219           if (DiagnoseUninitializedUse(S, vd, Use))
1220             // Skip further diagnostics for this variable. We try to warn only
1221             // on the first point at which a variable is used uninitialized.
1222             break;
1223         }
1224       }
1225 
1226       // Release the uses vector.
1227       delete vec;
1228     }
1229     delete uses;
1230   }
1231 
1232 private:
hasAlwaysUninitializedUse(const UsesVec * vec)1233   static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
1234   for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) {
1235     if (i->getKind() == UninitUse::Always) {
1236       return true;
1237     }
1238   }
1239   return false;
1240 }
1241 };
1242 }
1243 
1244 
1245 //===----------------------------------------------------------------------===//
1246 // -Wthread-safety
1247 //===----------------------------------------------------------------------===//
1248 namespace clang {
1249 namespace thread_safety {
1250 typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
1251 typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
1252 typedef std::list<DelayedDiag> DiagList;
1253 
1254 struct SortDiagBySourceLocation {
1255   SourceManager &SM;
SortDiagBySourceLocationclang::thread_safety::SortDiagBySourceLocation1256   SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {}
1257 
operator ()clang::thread_safety::SortDiagBySourceLocation1258   bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
1259     // Although this call will be slow, this is only called when outputting
1260     // multiple warnings.
1261     return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
1262   }
1263 };
1264 
1265 namespace {
1266 class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
1267   Sema &S;
1268   DiagList Warnings;
1269   SourceLocation FunLocation, FunEndLocation;
1270 
1271   // Helper functions
warnLockMismatch(unsigned DiagID,Name LockName,SourceLocation Loc)1272   void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) {
1273     // Gracefully handle rare cases when the analysis can't get a more
1274     // precise source location.
1275     if (!Loc.isValid())
1276       Loc = FunLocation;
1277     PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName);
1278     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1279   }
1280 
1281  public:
ThreadSafetyReporter(Sema & S,SourceLocation FL,SourceLocation FEL)1282   ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
1283     : S(S), FunLocation(FL), FunEndLocation(FEL) {}
1284 
1285   /// \brief Emit all buffered diagnostics in order of sourcelocation.
1286   /// We need to output diagnostics produced while iterating through
1287   /// the lockset in deterministic order, so this function orders diagnostics
1288   /// and outputs them.
emitDiagnostics()1289   void emitDiagnostics() {
1290     Warnings.sort(SortDiagBySourceLocation(S.getSourceManager()));
1291     for (DiagList::iterator I = Warnings.begin(), E = Warnings.end();
1292          I != E; ++I) {
1293       S.Diag(I->first.first, I->first.second);
1294       const OptionalNotes &Notes = I->second;
1295       for (unsigned NoteI = 0, NoteN = Notes.size(); NoteI != NoteN; ++NoteI)
1296         S.Diag(Notes[NoteI].first, Notes[NoteI].second);
1297     }
1298   }
1299 
handleInvalidLockExp(SourceLocation Loc)1300   void handleInvalidLockExp(SourceLocation Loc) {
1301     PartialDiagnosticAt Warning(Loc,
1302                                 S.PDiag(diag::warn_cannot_resolve_lock) << Loc);
1303     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1304   }
handleUnmatchedUnlock(Name LockName,SourceLocation Loc)1305   void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {
1306     warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc);
1307   }
1308 
handleDoubleLock(Name LockName,SourceLocation Loc)1309   void handleDoubleLock(Name LockName, SourceLocation Loc) {
1310     warnLockMismatch(diag::warn_double_lock, LockName, Loc);
1311   }
1312 
handleMutexHeldEndOfScope(Name LockName,SourceLocation LocLocked,SourceLocation LocEndOfScope,LockErrorKind LEK)1313   void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked,
1314                                  SourceLocation LocEndOfScope,
1315                                  LockErrorKind LEK){
1316     unsigned DiagID = 0;
1317     switch (LEK) {
1318       case LEK_LockedSomePredecessors:
1319         DiagID = diag::warn_lock_some_predecessors;
1320         break;
1321       case LEK_LockedSomeLoopIterations:
1322         DiagID = diag::warn_expecting_lock_held_on_loop;
1323         break;
1324       case LEK_LockedAtEndOfFunction:
1325         DiagID = diag::warn_no_unlock;
1326         break;
1327       case LEK_NotLockedAtEndOfFunction:
1328         DiagID = diag::warn_expecting_locked;
1329         break;
1330     }
1331     if (LocEndOfScope.isInvalid())
1332       LocEndOfScope = FunEndLocation;
1333 
1334     PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName);
1335     if (LocLocked.isValid()) {
1336       PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here));
1337       Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
1338       return;
1339     }
1340     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1341   }
1342 
1343 
handleExclusiveAndShared(Name LockName,SourceLocation Loc1,SourceLocation Loc2)1344   void handleExclusiveAndShared(Name LockName, SourceLocation Loc1,
1345                                 SourceLocation Loc2) {
1346     PartialDiagnosticAt Warning(
1347       Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName);
1348     PartialDiagnosticAt Note(
1349       Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName);
1350     Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
1351   }
1352 
handleNoMutexHeld(const NamedDecl * D,ProtectedOperationKind POK,AccessKind AK,SourceLocation Loc)1353   void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
1354                          AccessKind AK, SourceLocation Loc) {
1355     assert((POK == POK_VarAccess || POK == POK_VarDereference)
1356              && "Only works for variables");
1357     unsigned DiagID = POK == POK_VarAccess?
1358                         diag::warn_variable_requires_any_lock:
1359                         diag::warn_var_deref_requires_any_lock;
1360     PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
1361       << D->getNameAsString() << getLockKindFromAccessKind(AK));
1362     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1363   }
1364 
handleMutexNotHeld(const NamedDecl * D,ProtectedOperationKind POK,Name LockName,LockKind LK,SourceLocation Loc,Name * PossibleMatch)1365   void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK,
1366                           Name LockName, LockKind LK, SourceLocation Loc,
1367                           Name *PossibleMatch) {
1368     unsigned DiagID = 0;
1369     if (PossibleMatch) {
1370       switch (POK) {
1371         case POK_VarAccess:
1372           DiagID = diag::warn_variable_requires_lock_precise;
1373           break;
1374         case POK_VarDereference:
1375           DiagID = diag::warn_var_deref_requires_lock_precise;
1376           break;
1377         case POK_FunctionCall:
1378           DiagID = diag::warn_fun_requires_lock_precise;
1379           break;
1380       }
1381       PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
1382         << D->getNameAsString() << LockName << LK);
1383       PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match)
1384                                << *PossibleMatch);
1385       Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
1386     } else {
1387       switch (POK) {
1388         case POK_VarAccess:
1389           DiagID = diag::warn_variable_requires_lock;
1390           break;
1391         case POK_VarDereference:
1392           DiagID = diag::warn_var_deref_requires_lock;
1393           break;
1394         case POK_FunctionCall:
1395           DiagID = diag::warn_fun_requires_lock;
1396           break;
1397       }
1398       PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
1399         << D->getNameAsString() << LockName << LK);
1400       Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1401     }
1402   }
1403 
handleFunExcludesLock(Name FunName,Name LockName,SourceLocation Loc)1404   void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) {
1405     PartialDiagnosticAt Warning(Loc,
1406       S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName);
1407     Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1408   }
1409 };
1410 }
1411 }
1412 }
1413 
1414 //===----------------------------------------------------------------------===//
1415 // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
1416 //  warnings on a function, method, or block.
1417 //===----------------------------------------------------------------------===//
1418 
Policy()1419 clang::sema::AnalysisBasedWarnings::Policy::Policy() {
1420   enableCheckFallThrough = 1;
1421   enableCheckUnreachable = 0;
1422   enableThreadSafetyAnalysis = 0;
1423 }
1424 
AnalysisBasedWarnings(Sema & s)1425 clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
1426   : S(s),
1427     NumFunctionsAnalyzed(0),
1428     NumFunctionsWithBadCFGs(0),
1429     NumCFGBlocks(0),
1430     MaxCFGBlocksPerFunction(0),
1431     NumUninitAnalysisFunctions(0),
1432     NumUninitAnalysisVariables(0),
1433     MaxUninitAnalysisVariablesPerFunction(0),
1434     NumUninitAnalysisBlockVisits(0),
1435     MaxUninitAnalysisBlockVisitsPerFunction(0) {
1436   DiagnosticsEngine &D = S.getDiagnostics();
1437   DefaultPolicy.enableCheckUnreachable = (unsigned)
1438     (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
1439         DiagnosticsEngine::Ignored);
1440   DefaultPolicy.enableThreadSafetyAnalysis = (unsigned)
1441     (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) !=
1442      DiagnosticsEngine::Ignored);
1443 
1444 }
1445 
flushDiagnostics(Sema & S,sema::FunctionScopeInfo * fscope)1446 static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) {
1447   for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
1448        i = fscope->PossiblyUnreachableDiags.begin(),
1449        e = fscope->PossiblyUnreachableDiags.end();
1450        i != e; ++i) {
1451     const sema::PossiblyUnreachableDiag &D = *i;
1452     S.Diag(D.Loc, D.PD);
1453   }
1454 }
1455 
1456 void clang::sema::
IssueWarnings(sema::AnalysisBasedWarnings::Policy P,sema::FunctionScopeInfo * fscope,const Decl * D,const BlockExpr * blkExpr)1457 AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
1458                                      sema::FunctionScopeInfo *fscope,
1459                                      const Decl *D, const BlockExpr *blkExpr) {
1460 
1461   // We avoid doing analysis-based warnings when there are errors for
1462   // two reasons:
1463   // (1) The CFGs often can't be constructed (if the body is invalid), so
1464   //     don't bother trying.
1465   // (2) The code already has problems; running the analysis just takes more
1466   //     time.
1467   DiagnosticsEngine &Diags = S.getDiagnostics();
1468 
1469   // Do not do any analysis for declarations in system headers if we are
1470   // going to just ignore them.
1471   if (Diags.getSuppressSystemWarnings() &&
1472       S.SourceMgr.isInSystemHeader(D->getLocation()))
1473     return;
1474 
1475   // For code in dependent contexts, we'll do this at instantiation time.
1476   if (cast<DeclContext>(D)->isDependentContext())
1477     return;
1478 
1479   if (Diags.hasUncompilableErrorOccurred() || Diags.hasFatalErrorOccurred()) {
1480     // Flush out any possibly unreachable diagnostics.
1481     flushDiagnostics(S, fscope);
1482     return;
1483   }
1484 
1485   const Stmt *Body = D->getBody();
1486   assert(Body);
1487 
1488   AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D);
1489 
1490   // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
1491   // explosion for destrutors that can result and the compile time hit.
1492   AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
1493   AC.getCFGBuildOptions().AddEHEdges = false;
1494   AC.getCFGBuildOptions().AddInitializers = true;
1495   AC.getCFGBuildOptions().AddImplicitDtors = true;
1496   AC.getCFGBuildOptions().AddTemporaryDtors = true;
1497 
1498   // Force that certain expressions appear as CFGElements in the CFG.  This
1499   // is used to speed up various analyses.
1500   // FIXME: This isn't the right factoring.  This is here for initial
1501   // prototyping, but we need a way for analyses to say what expressions they
1502   // expect to always be CFGElements and then fill in the BuildOptions
1503   // appropriately.  This is essentially a layering violation.
1504   if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis) {
1505     // Unreachable code analysis and thread safety require a linearized CFG.
1506     AC.getCFGBuildOptions().setAllAlwaysAdd();
1507   }
1508   else {
1509     AC.getCFGBuildOptions()
1510       .setAlwaysAdd(Stmt::BinaryOperatorClass)
1511       .setAlwaysAdd(Stmt::CompoundAssignOperatorClass)
1512       .setAlwaysAdd(Stmt::BlockExprClass)
1513       .setAlwaysAdd(Stmt::CStyleCastExprClass)
1514       .setAlwaysAdd(Stmt::DeclRefExprClass)
1515       .setAlwaysAdd(Stmt::ImplicitCastExprClass)
1516       .setAlwaysAdd(Stmt::UnaryOperatorClass)
1517       .setAlwaysAdd(Stmt::AttributedStmtClass);
1518   }
1519 
1520   // Construct the analysis context with the specified CFG build options.
1521 
1522   // Emit delayed diagnostics.
1523   if (!fscope->PossiblyUnreachableDiags.empty()) {
1524     bool analyzed = false;
1525 
1526     // Register the expressions with the CFGBuilder.
1527     for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
1528          i = fscope->PossiblyUnreachableDiags.begin(),
1529          e = fscope->PossiblyUnreachableDiags.end();
1530          i != e; ++i) {
1531       if (const Stmt *stmt = i->stmt)
1532         AC.registerForcedBlockExpression(stmt);
1533     }
1534 
1535     if (AC.getCFG()) {
1536       analyzed = true;
1537       for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
1538             i = fscope->PossiblyUnreachableDiags.begin(),
1539             e = fscope->PossiblyUnreachableDiags.end();
1540             i != e; ++i)
1541       {
1542         const sema::PossiblyUnreachableDiag &D = *i;
1543         bool processed = false;
1544         if (const Stmt *stmt = i->stmt) {
1545           const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt);
1546           CFGReverseBlockReachabilityAnalysis *cra =
1547               AC.getCFGReachablityAnalysis();
1548           // FIXME: We should be able to assert that block is non-null, but
1549           // the CFG analysis can skip potentially-evaluated expressions in
1550           // edge cases; see test/Sema/vla-2.c.
1551           if (block && cra) {
1552             // Can this block be reached from the entrance?
1553             if (cra->isReachable(&AC.getCFG()->getEntry(), block))
1554               S.Diag(D.Loc, D.PD);
1555             processed = true;
1556           }
1557         }
1558         if (!processed) {
1559           // Emit the warning anyway if we cannot map to a basic block.
1560           S.Diag(D.Loc, D.PD);
1561         }
1562       }
1563     }
1564 
1565     if (!analyzed)
1566       flushDiagnostics(S, fscope);
1567   }
1568 
1569 
1570   // Warning: check missing 'return'
1571   if (P.enableCheckFallThrough) {
1572     const CheckFallThroughDiagnostics &CD =
1573       (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
1574        : (isa<CXXMethodDecl>(D) &&
1575           cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
1576           cast<CXXMethodDecl>(D)->getParent()->isLambda())
1577             ? CheckFallThroughDiagnostics::MakeForLambda()
1578             : CheckFallThroughDiagnostics::MakeForFunction(D));
1579     CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
1580   }
1581 
1582   // Warning: check for unreachable code
1583   if (P.enableCheckUnreachable) {
1584     // Only check for unreachable code on non-template instantiations.
1585     // Different template instantiations can effectively change the control-flow
1586     // and it is very difficult to prove that a snippet of code in a template
1587     // is unreachable for all instantiations.
1588     bool isTemplateInstantiation = false;
1589     if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
1590       isTemplateInstantiation = Function->isTemplateInstantiation();
1591     if (!isTemplateInstantiation)
1592       CheckUnreachable(S, AC);
1593   }
1594 
1595   // Check for thread safety violations
1596   if (P.enableThreadSafetyAnalysis) {
1597     SourceLocation FL = AC.getDecl()->getLocation();
1598     SourceLocation FEL = AC.getDecl()->getLocEnd();
1599     thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL);
1600     if (Diags.getDiagnosticLevel(diag::warn_thread_safety_beta,D->getLocStart())
1601         != DiagnosticsEngine::Ignored)
1602       Reporter.setIssueBetaWarnings(true);
1603 
1604     thread_safety::runThreadSafetyAnalysis(AC, Reporter);
1605     Reporter.emitDiagnostics();
1606   }
1607 
1608   if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
1609       != DiagnosticsEngine::Ignored ||
1610       Diags.getDiagnosticLevel(diag::warn_sometimes_uninit_var,D->getLocStart())
1611       != DiagnosticsEngine::Ignored ||
1612       Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart())
1613       != DiagnosticsEngine::Ignored) {
1614     if (CFG *cfg = AC.getCFG()) {
1615       UninitValsDiagReporter reporter(S);
1616       UninitVariablesAnalysisStats stats;
1617       std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
1618       runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
1619                                         reporter, stats);
1620 
1621       if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
1622         ++NumUninitAnalysisFunctions;
1623         NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
1624         NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
1625         MaxUninitAnalysisVariablesPerFunction =
1626             std::max(MaxUninitAnalysisVariablesPerFunction,
1627                      stats.NumVariablesAnalyzed);
1628         MaxUninitAnalysisBlockVisitsPerFunction =
1629             std::max(MaxUninitAnalysisBlockVisitsPerFunction,
1630                      stats.NumBlockVisits);
1631       }
1632     }
1633   }
1634 
1635   bool FallThroughDiagFull =
1636       Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough,
1637                                D->getLocStart()) != DiagnosticsEngine::Ignored;
1638   bool FallThroughDiagPerFunction =
1639       Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough_per_function,
1640                                D->getLocStart()) != DiagnosticsEngine::Ignored;
1641   if (FallThroughDiagFull || FallThroughDiagPerFunction) {
1642     DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull);
1643   }
1644 
1645   if (S.getLangOpts().ObjCARCWeak &&
1646       Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
1647                                D->getLocStart()) != DiagnosticsEngine::Ignored)
1648     diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap());
1649 
1650   // Collect statistics about the CFG if it was built.
1651   if (S.CollectStats && AC.isCFGBuilt()) {
1652     ++NumFunctionsAnalyzed;
1653     if (CFG *cfg = AC.getCFG()) {
1654       // If we successfully built a CFG for this context, record some more
1655       // detail information about it.
1656       NumCFGBlocks += cfg->getNumBlockIDs();
1657       MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,
1658                                          cfg->getNumBlockIDs());
1659     } else {
1660       ++NumFunctionsWithBadCFGs;
1661     }
1662   }
1663 }
1664 
PrintStats() const1665 void clang::sema::AnalysisBasedWarnings::PrintStats() const {
1666   llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
1667 
1668   unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
1669   unsigned AvgCFGBlocksPerFunction =
1670       !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
1671   llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
1672                << NumFunctionsWithBadCFGs << " w/o CFGs).\n"
1673                << "  " << NumCFGBlocks << " CFG blocks built.\n"
1674                << "  " << AvgCFGBlocksPerFunction
1675                << " average CFG blocks per function.\n"
1676                << "  " << MaxCFGBlocksPerFunction
1677                << " max CFG blocks per function.\n";
1678 
1679   unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
1680       : NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
1681   unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
1682       : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
1683   llvm::errs() << NumUninitAnalysisFunctions
1684                << " functions analyzed for uninitialiazed variables\n"
1685                << "  " << NumUninitAnalysisVariables << " variables analyzed.\n"
1686                << "  " << AvgUninitVariablesPerFunction
1687                << " average variables per function.\n"
1688                << "  " << MaxUninitAnalysisVariablesPerFunction
1689                << " max variables per function.\n"
1690                << "  " << NumUninitAnalysisBlockVisits << " block visits.\n"
1691                << "  " << AvgUninitBlockVisitsPerFunction
1692                << " average block visits per function.\n"
1693                << "  " << MaxUninitAnalysisBlockVisitsPerFunction
1694                << " max block visits per function.\n";
1695 }
1696