• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // A intra-procedural analysis for thread safety (e.g. deadlocks and race
11 // conditions), based off of an annotation system.
12 //
13 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
14 // for more information.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/ExprCXX.h"
21 #include "clang/AST/StmtCXX.h"
22 #include "clang/AST/StmtVisitor.h"
23 #include "clang/Analysis/Analyses/PostOrderCFGView.h"
24 #include "clang/Analysis/Analyses/ThreadSafety.h"
25 #include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
26 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
27 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
28 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
29 #include "clang/Analysis/AnalysisContext.h"
30 #include "clang/Analysis/CFG.h"
31 #include "clang/Analysis/CFGStmtMap.h"
32 #include "clang/Basic/OperatorKinds.h"
33 #include "clang/Basic/SourceLocation.h"
34 #include "clang/Basic/SourceManager.h"
35 #include "llvm/ADT/BitVector.h"
36 #include "llvm/ADT/FoldingSet.h"
37 #include "llvm/ADT/ImmutableMap.h"
38 #include "llvm/ADT/PostOrderIterator.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/ADT/StringRef.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include <algorithm>
43 #include <utility>
44 #include <vector>
45 
46 using namespace clang;
47 using namespace thread_safety;
48 
49 // Key method definition
~ThreadSafetyHandler()50 ThreadSafetyHandler::~ThreadSafetyHandler() {}
51 
52 namespace {
53 
54 /// SExpr implements a simple expression language that is used to store,
55 /// compare, and pretty-print C++ expressions.  Unlike a clang Expr, a SExpr
56 /// does not capture surface syntax, and it does not distinguish between
57 /// C++ concepts, like pointers and references, that have no real semantic
58 /// differences.  This simplicity allows SExprs to be meaningfully compared,
59 /// e.g.
60 ///        (x)          =  x
61 ///        (*this).foo  =  this->foo
62 ///        *&a          =  a
63 ///
64 /// Thread-safety analysis works by comparing lock expressions.  Within the
65 /// body of a function, an expression such as "x->foo->bar.mu" will resolve to
66 /// a particular mutex object at run-time.  Subsequent occurrences of the same
67 /// expression (where "same" means syntactic equality) will refer to the same
68 /// run-time object if three conditions hold:
69 /// (1) Local variables in the expression, such as "x" have not changed.
70 /// (2) Values on the heap that affect the expression have not changed.
71 /// (3) The expression involves only pure function calls.
72 ///
73 /// The current implementation assumes, but does not verify, that multiple uses
74 /// of the same lock expression satisfies these criteria.
75 class SExpr {
76 private:
77   enum ExprOp {
78     EOP_Nop,       ///< No-op
79     EOP_Wildcard,  ///< Matches anything.
80     EOP_Universal, ///< Universal lock.
81     EOP_This,      ///< This keyword.
82     EOP_NVar,      ///< Named variable.
83     EOP_LVar,      ///< Local variable.
84     EOP_Dot,       ///< Field access
85     EOP_Call,      ///< Function call
86     EOP_MCall,     ///< Method call
87     EOP_Index,     ///< Array index
88     EOP_Unary,     ///< Unary operation
89     EOP_Binary,    ///< Binary operation
90     EOP_Unknown    ///< Catchall for everything else
91   };
92 
93 
94   class SExprNode {
95    private:
96     unsigned char  Op;     ///< Opcode of the root node
97     unsigned char  Flags;  ///< Additional opcode-specific data
98     unsigned short Sz;     ///< Number of child nodes
99     const void*    Data;   ///< Additional opcode-specific data
100 
101    public:
SExprNode(ExprOp O,unsigned F,const void * D)102     SExprNode(ExprOp O, unsigned F, const void* D)
103       : Op(static_cast<unsigned char>(O)),
104         Flags(static_cast<unsigned char>(F)), Sz(1), Data(D)
105     { }
106 
size() const107     unsigned size() const        { return Sz; }
setSize(unsigned S)108     void     setSize(unsigned S) { Sz = S;    }
109 
kind() const110     ExprOp   kind() const { return static_cast<ExprOp>(Op); }
111 
getNamedDecl() const112     const NamedDecl* getNamedDecl() const {
113       assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot);
114       return reinterpret_cast<const NamedDecl*>(Data);
115     }
116 
getFunctionDecl() const117     const NamedDecl* getFunctionDecl() const {
118       assert(Op == EOP_Call || Op == EOP_MCall);
119       return reinterpret_cast<const NamedDecl*>(Data);
120     }
121 
isArrow() const122     bool isArrow() const { return Op == EOP_Dot && Flags == 1; }
setArrow(bool A)123     void setArrow(bool A) { Flags = A ? 1 : 0; }
124 
arity() const125     unsigned arity() const {
126       switch (Op) {
127         case EOP_Nop:       return 0;
128         case EOP_Wildcard:  return 0;
129         case EOP_Universal: return 0;
130         case EOP_NVar:      return 0;
131         case EOP_LVar:      return 0;
132         case EOP_This:      return 0;
133         case EOP_Dot:       return 1;
134         case EOP_Call:      return Flags+1;  // First arg is function.
135         case EOP_MCall:     return Flags+1;  // First arg is implicit obj.
136         case EOP_Index:     return 2;
137         case EOP_Unary:     return 1;
138         case EOP_Binary:    return 2;
139         case EOP_Unknown:   return Flags;
140       }
141       return 0;
142     }
143 
operator ==(const SExprNode & Other) const144     bool operator==(const SExprNode& Other) const {
145       // Ignore flags and size -- they don't matter.
146       return (Op == Other.Op &&
147               Data == Other.Data);
148     }
149 
operator !=(const SExprNode & Other) const150     bool operator!=(const SExprNode& Other) const {
151       return !(*this == Other);
152     }
153 
matches(const SExprNode & Other) const154     bool matches(const SExprNode& Other) const {
155       return (*this == Other) ||
156              (Op == EOP_Wildcard) ||
157              (Other.Op == EOP_Wildcard);
158     }
159   };
160 
161 
162   /// \brief Encapsulates the lexical context of a function call.  The lexical
163   /// context includes the arguments to the call, including the implicit object
164   /// argument.  When an attribute containing a mutex expression is attached to
165   /// a method, the expression may refer to formal parameters of the method.
166   /// Actual arguments must be substituted for formal parameters to derive
167   /// the appropriate mutex expression in the lexical context where the function
168   /// is called.  PrevCtx holds the context in which the arguments themselves
169   /// should be evaluated; multiple calling contexts can be chained together
170   /// by the lock_returned attribute.
171   struct CallingContext {
172     const NamedDecl*   AttrDecl;   // The decl to which the attribute is attached.
173     const Expr*        SelfArg;    // Implicit object argument -- e.g. 'this'
174     bool               SelfArrow;  // is Self referred to with -> or .?
175     unsigned           NumArgs;    // Number of funArgs
176     const Expr* const* FunArgs;    // Function arguments
177     CallingContext*    PrevCtx;    // The previous context; or 0 if none.
178 
CallingContext__anon48488dd70111::SExpr::CallingContext179     CallingContext(const NamedDecl *D)
180         : AttrDecl(D), SelfArg(nullptr), SelfArrow(false), NumArgs(0),
181           FunArgs(nullptr), PrevCtx(nullptr) {}
182   };
183 
184   typedef SmallVector<SExprNode, 4> NodeVector;
185 
186 private:
187   // A SExpr is a list of SExprNodes in prefix order.  The Size field allows
188   // the list to be traversed as a tree.
189   NodeVector NodeVec;
190 
191 private:
make(ExprOp O,unsigned F=0,const void * D=nullptr)192   unsigned make(ExprOp O, unsigned F = 0, const void *D = nullptr) {
193     NodeVec.push_back(SExprNode(O, F, D));
194     return NodeVec.size() - 1;
195   }
196 
makeNop()197   unsigned makeNop() {
198     return make(EOP_Nop);
199   }
200 
makeWildcard()201   unsigned makeWildcard() {
202     return make(EOP_Wildcard);
203   }
204 
makeUniversal()205   unsigned makeUniversal() {
206     return make(EOP_Universal);
207   }
208 
makeNamedVar(const NamedDecl * D)209   unsigned makeNamedVar(const NamedDecl *D) {
210     return make(EOP_NVar, 0, D);
211   }
212 
makeLocalVar(const NamedDecl * D)213   unsigned makeLocalVar(const NamedDecl *D) {
214     return make(EOP_LVar, 0, D);
215   }
216 
makeThis()217   unsigned makeThis() {
218     return make(EOP_This);
219   }
220 
makeDot(const NamedDecl * D,bool Arrow)221   unsigned makeDot(const NamedDecl *D, bool Arrow) {
222     return make(EOP_Dot, Arrow ? 1 : 0, D);
223   }
224 
makeCall(unsigned NumArgs,const NamedDecl * D)225   unsigned makeCall(unsigned NumArgs, const NamedDecl *D) {
226     return make(EOP_Call, NumArgs, D);
227   }
228 
229   // Grab the very first declaration of virtual method D
getFirstVirtualDecl(const CXXMethodDecl * D)230   const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) {
231     while (true) {
232       D = D->getCanonicalDecl();
233       CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
234                                      E = D->end_overridden_methods();
235       if (I == E)
236         return D;  // Method does not override anything
237       D = *I;      // FIXME: this does not work with multiple inheritance.
238     }
239     return nullptr;
240   }
241 
makeMCall(unsigned NumArgs,const CXXMethodDecl * D)242   unsigned makeMCall(unsigned NumArgs, const CXXMethodDecl *D) {
243     return make(EOP_MCall, NumArgs, getFirstVirtualDecl(D));
244   }
245 
makeIndex()246   unsigned makeIndex() {
247     return make(EOP_Index);
248   }
249 
makeUnary()250   unsigned makeUnary() {
251     return make(EOP_Unary);
252   }
253 
makeBinary()254   unsigned makeBinary() {
255     return make(EOP_Binary);
256   }
257 
makeUnknown(unsigned Arity)258   unsigned makeUnknown(unsigned Arity) {
259     return make(EOP_Unknown, Arity);
260   }
261 
isCalleeArrow(const Expr * E)262   inline bool isCalleeArrow(const Expr *E) {
263     const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
264     return ME ? ME->isArrow() : false;
265   }
266 
267   /// Build an SExpr from the given C++ expression.
268   /// Recursive function that terminates on DeclRefExpr.
269   /// Note: this function merely creates a SExpr; it does not check to
270   /// ensure that the original expression is a valid mutex expression.
271   ///
272   /// NDeref returns the number of Derefence and AddressOf operations
273   /// preceding the Expr; this is used to decide whether to pretty-print
274   /// SExprs with . or ->.
buildSExpr(const Expr * Exp,CallingContext * CallCtx,int * NDeref=nullptr)275   unsigned buildSExpr(const Expr *Exp, CallingContext *CallCtx,
276                       int *NDeref = nullptr) {
277     if (!Exp)
278       return 0;
279 
280     if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
281       const NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
282       const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND);
283       if (PV) {
284         const FunctionDecl *FD =
285           cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
286         unsigned i = PV->getFunctionScopeIndex();
287 
288         if (CallCtx && CallCtx->FunArgs &&
289             FD == CallCtx->AttrDecl->getCanonicalDecl()) {
290           // Substitute call arguments for references to function parameters
291           assert(i < CallCtx->NumArgs);
292           return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref);
293         }
294         // Map the param back to the param of the original function declaration.
295         makeNamedVar(FD->getParamDecl(i));
296         return 1;
297       }
298       // Not a function parameter -- just store the reference.
299       makeNamedVar(ND);
300       return 1;
301     } else if (isa<CXXThisExpr>(Exp)) {
302       // Substitute parent for 'this'
303       if (CallCtx && CallCtx->SelfArg) {
304         if (!CallCtx->SelfArrow && NDeref)
305           // 'this' is a pointer, but self is not, so need to take address.
306           --(*NDeref);
307         return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref);
308       }
309       else {
310         makeThis();
311         return 1;
312       }
313     } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
314       const NamedDecl *ND = ME->getMemberDecl();
315       int ImplicitDeref = ME->isArrow() ? 1 : 0;
316       unsigned Root = makeDot(ND, false);
317       unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref);
318       NodeVec[Root].setArrow(ImplicitDeref > 0);
319       NodeVec[Root].setSize(Sz + 1);
320       return Sz + 1;
321     } else if (const CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
322       // When calling a function with a lock_returned attribute, replace
323       // the function call with the expression in lock_returned.
324       const CXXMethodDecl *MD = CMCE->getMethodDecl()->getMostRecentDecl();
325       if (LockReturnedAttr* At = MD->getAttr<LockReturnedAttr>()) {
326         CallingContext LRCallCtx(CMCE->getMethodDecl());
327         LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument();
328         LRCallCtx.SelfArrow = isCalleeArrow(CMCE->getCallee());
329         LRCallCtx.NumArgs = CMCE->getNumArgs();
330         LRCallCtx.FunArgs = CMCE->getArgs();
331         LRCallCtx.PrevCtx = CallCtx;
332         return buildSExpr(At->getArg(), &LRCallCtx);
333       }
334       // Hack to treat smart pointers and iterators as pointers;
335       // ignore any method named get().
336       if (CMCE->getMethodDecl()->getNameAsString() == "get" &&
337           CMCE->getNumArgs() == 0) {
338         if (NDeref && isCalleeArrow(CMCE->getCallee()))
339           ++(*NDeref);
340         return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref);
341       }
342       unsigned NumCallArgs = CMCE->getNumArgs();
343       unsigned Root = makeMCall(NumCallArgs, CMCE->getMethodDecl());
344       unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx);
345       const Expr* const* CallArgs = CMCE->getArgs();
346       for (unsigned i = 0; i < NumCallArgs; ++i) {
347         Sz += buildSExpr(CallArgs[i], CallCtx);
348       }
349       NodeVec[Root].setSize(Sz + 1);
350       return Sz + 1;
351     } else if (const CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
352       const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
353       if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
354         CallingContext LRCallCtx(CE->getDirectCallee());
355         LRCallCtx.NumArgs = CE->getNumArgs();
356         LRCallCtx.FunArgs = CE->getArgs();
357         LRCallCtx.PrevCtx = CallCtx;
358         return buildSExpr(At->getArg(), &LRCallCtx);
359       }
360       // Treat smart pointers and iterators as pointers;
361       // ignore the * and -> operators.
362       if (const CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) {
363         OverloadedOperatorKind k = OE->getOperator();
364         if (k == OO_Star) {
365           if (NDeref) ++(*NDeref);
366           return buildSExpr(OE->getArg(0), CallCtx, NDeref);
367         }
368         else if (k == OO_Arrow) {
369           return buildSExpr(OE->getArg(0), CallCtx, NDeref);
370         }
371       }
372       unsigned NumCallArgs = CE->getNumArgs();
373       unsigned Root = makeCall(NumCallArgs, nullptr);
374       unsigned Sz = buildSExpr(CE->getCallee(), CallCtx);
375       const Expr* const* CallArgs = CE->getArgs();
376       for (unsigned i = 0; i < NumCallArgs; ++i) {
377         Sz += buildSExpr(CallArgs[i], CallCtx);
378       }
379       NodeVec[Root].setSize(Sz+1);
380       return Sz+1;
381     } else if (const BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
382       unsigned Root = makeBinary();
383       unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx);
384       Sz += buildSExpr(BOE->getRHS(), CallCtx);
385       NodeVec[Root].setSize(Sz);
386       return Sz;
387     } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
388       // Ignore & and * operators -- they're no-ops.
389       // However, we try to figure out whether the expression is a pointer,
390       // so we can use . and -> appropriately in error messages.
391       if (UOE->getOpcode() == UO_Deref) {
392         if (NDeref) ++(*NDeref);
393         return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
394       }
395       if (UOE->getOpcode() == UO_AddrOf) {
396         if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) {
397           if (DRE->getDecl()->isCXXInstanceMember()) {
398             // This is a pointer-to-member expression, e.g. &MyClass::mu_.
399             // We interpret this syntax specially, as a wildcard.
400             unsigned Root = makeDot(DRE->getDecl(), false);
401             makeWildcard();
402             NodeVec[Root].setSize(2);
403             return 2;
404           }
405         }
406         if (NDeref) --(*NDeref);
407         return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref);
408       }
409       unsigned Root = makeUnary();
410       unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx);
411       NodeVec[Root].setSize(Sz);
412       return Sz;
413     } else if (const ArraySubscriptExpr *ASE =
414                dyn_cast<ArraySubscriptExpr>(Exp)) {
415       unsigned Root = makeIndex();
416       unsigned Sz = buildSExpr(ASE->getBase(), CallCtx);
417       Sz += buildSExpr(ASE->getIdx(), CallCtx);
418       NodeVec[Root].setSize(Sz);
419       return Sz;
420     } else if (const AbstractConditionalOperator *CE =
421                dyn_cast<AbstractConditionalOperator>(Exp)) {
422       unsigned Root = makeUnknown(3);
423       unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
424       Sz += buildSExpr(CE->getTrueExpr(), CallCtx);
425       Sz += buildSExpr(CE->getFalseExpr(), CallCtx);
426       NodeVec[Root].setSize(Sz);
427       return Sz;
428     } else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
429       unsigned Root = makeUnknown(3);
430       unsigned Sz = buildSExpr(CE->getCond(), CallCtx);
431       Sz += buildSExpr(CE->getLHS(), CallCtx);
432       Sz += buildSExpr(CE->getRHS(), CallCtx);
433       NodeVec[Root].setSize(Sz);
434       return Sz;
435     } else if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
436       return buildSExpr(CE->getSubExpr(), CallCtx, NDeref);
437     } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
438       return buildSExpr(PE->getSubExpr(), CallCtx, NDeref);
439     } else if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) {
440       return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref);
441     } else if (const CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) {
442       return buildSExpr(E->getSubExpr(), CallCtx, NDeref);
443     } else if (isa<CharacterLiteral>(Exp) ||
444                isa<CXXNullPtrLiteralExpr>(Exp) ||
445                isa<GNUNullExpr>(Exp) ||
446                isa<CXXBoolLiteralExpr>(Exp) ||
447                isa<FloatingLiteral>(Exp) ||
448                isa<ImaginaryLiteral>(Exp) ||
449                isa<IntegerLiteral>(Exp) ||
450                isa<StringLiteral>(Exp) ||
451                isa<ObjCStringLiteral>(Exp)) {
452       makeNop();
453       return 1;  // FIXME: Ignore literals for now
454     } else {
455       makeNop();
456       return 1;  // Ignore.  FIXME: mark as invalid expression?
457     }
458   }
459 
460   /// \brief Construct a SExpr from an expression.
461   /// \param MutexExp The original mutex expression within an attribute
462   /// \param DeclExp An expression involving the Decl on which the attribute
463   ///        occurs.
464   /// \param D  The declaration to which the lock/unlock attribute is attached.
buildSExprFromExpr(const Expr * MutexExp,const Expr * DeclExp,const NamedDecl * D,VarDecl * SelfDecl=nullptr)465   void buildSExprFromExpr(const Expr *MutexExp, const Expr *DeclExp,
466                           const NamedDecl *D, VarDecl *SelfDecl = nullptr) {
467     CallingContext CallCtx(D);
468 
469     if (MutexExp) {
470       if (const StringLiteral* SLit = dyn_cast<StringLiteral>(MutexExp)) {
471         if (SLit->getString() == StringRef("*"))
472           // The "*" expr is a universal lock, which essentially turns off
473           // checks until it is removed from the lockset.
474           makeUniversal();
475         else
476           // Ignore other string literals for now.
477           makeNop();
478         return;
479       }
480     }
481 
482     // If we are processing a raw attribute expression, with no substitutions.
483     if (!DeclExp) {
484       buildSExpr(MutexExp, nullptr);
485       return;
486     }
487 
488     // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
489     // for formal parameters when we call buildMutexID later.
490     if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
491       CallCtx.SelfArg   = ME->getBase();
492       CallCtx.SelfArrow = ME->isArrow();
493     } else if (const CXXMemberCallExpr *CE =
494                dyn_cast<CXXMemberCallExpr>(DeclExp)) {
495       CallCtx.SelfArg   = CE->getImplicitObjectArgument();
496       CallCtx.SelfArrow = isCalleeArrow(CE->getCallee());
497       CallCtx.NumArgs   = CE->getNumArgs();
498       CallCtx.FunArgs   = CE->getArgs();
499     } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
500       CallCtx.NumArgs = CE->getNumArgs();
501       CallCtx.FunArgs = CE->getArgs();
502     } else if (const CXXConstructExpr *CE =
503                dyn_cast<CXXConstructExpr>(DeclExp)) {
504       CallCtx.SelfArg = nullptr;  // Will be set below
505       CallCtx.NumArgs = CE->getNumArgs();
506       CallCtx.FunArgs = CE->getArgs();
507     } else if (D && isa<CXXDestructorDecl>(D)) {
508       // There's no such thing as a "destructor call" in the AST.
509       CallCtx.SelfArg = DeclExp;
510     }
511 
512     // Hack to handle constructors, where self cannot be recovered from
513     // the expression.
514     if (SelfDecl && !CallCtx.SelfArg) {
515       DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
516                           SelfDecl->getLocation());
517       CallCtx.SelfArg = &SelfDRE;
518 
519       // If the attribute has no arguments, then assume the argument is "this".
520       if (!MutexExp)
521         buildSExpr(CallCtx.SelfArg, nullptr);
522       else  // For most attributes.
523         buildSExpr(MutexExp, &CallCtx);
524       return;
525     }
526 
527     // If the attribute has no arguments, then assume the argument is "this".
528     if (!MutexExp)
529       buildSExpr(CallCtx.SelfArg, nullptr);
530     else  // For most attributes.
531       buildSExpr(MutexExp, &CallCtx);
532   }
533 
534   /// \brief Get index of next sibling of node i.
getNextSibling(unsigned i) const535   unsigned getNextSibling(unsigned i) const {
536     return i + NodeVec[i].size();
537   }
538 
539 public:
SExpr(clang::Decl::EmptyShell e)540   explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); }
541 
542   /// \param MutexExp The original mutex expression within an attribute
543   /// \param DeclExp An expression involving the Decl on which the attribute
544   ///        occurs.
545   /// \param D  The declaration to which the lock/unlock attribute is attached.
546   /// Caller must check isValid() after construction.
SExpr(const Expr * MutexExp,const Expr * DeclExp,const NamedDecl * D,VarDecl * SelfDecl=nullptr)547   SExpr(const Expr *MutexExp, const Expr *DeclExp, const NamedDecl *D,
548         VarDecl *SelfDecl = nullptr) {
549     buildSExprFromExpr(MutexExp, DeclExp, D, SelfDecl);
550   }
551 
552   /// Return true if this is a valid decl sequence.
553   /// Caller must call this by hand after construction to handle errors.
isValid() const554   bool isValid() const {
555     return !NodeVec.empty();
556   }
557 
shouldIgnore() const558   bool shouldIgnore() const {
559     // Nop is a mutex that we have decided to deliberately ignore.
560     assert(NodeVec.size() > 0 && "Invalid Mutex");
561     return NodeVec[0].kind() == EOP_Nop;
562   }
563 
isUniversal() const564   bool isUniversal() const {
565     assert(NodeVec.size() > 0 && "Invalid Mutex");
566     return NodeVec[0].kind() == EOP_Universal;
567   }
568 
569   /// Issue a warning about an invalid lock expression
warnInvalidLock(ThreadSafetyHandler & Handler,const Expr * MutexExp,const Expr * DeclExp,const NamedDecl * D,StringRef Kind)570   static void warnInvalidLock(ThreadSafetyHandler &Handler,
571                               const Expr *MutexExp, const Expr *DeclExp,
572                               const NamedDecl *D, StringRef Kind) {
573     SourceLocation Loc;
574     if (DeclExp)
575       Loc = DeclExp->getExprLoc();
576 
577     // FIXME: add a note about the attribute location in MutexExp or D
578     if (Loc.isValid())
579       Handler.handleInvalidLockExp(Kind, Loc);
580   }
581 
operator ==(const SExpr & other) const582   bool operator==(const SExpr &other) const {
583     return NodeVec == other.NodeVec;
584   }
585 
operator !=(const SExpr & other) const586   bool operator!=(const SExpr &other) const {
587     return !(*this == other);
588   }
589 
matches(const SExpr & Other,unsigned i=0,unsigned j=0) const590   bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const {
591     if (NodeVec[i].matches(Other.NodeVec[j])) {
592       unsigned ni = NodeVec[i].arity();
593       unsigned nj = Other.NodeVec[j].arity();
594       unsigned n = (ni < nj) ? ni : nj;
595       bool Result = true;
596       unsigned ci = i+1;  // first child of i
597       unsigned cj = j+1;  // first child of j
598       for (unsigned k = 0; k < n;
599            ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) {
600         Result = Result && matches(Other, ci, cj);
601       }
602       return Result;
603     }
604     return false;
605   }
606 
607   // A partial match between a.mu and b.mu returns true a and b have the same
608   // type (and thus mu refers to the same mutex declaration), regardless of
609   // whether a and b are different objects or not.
partiallyMatches(const SExpr & Other) const610   bool partiallyMatches(const SExpr &Other) const {
611     if (NodeVec[0].kind() == EOP_Dot)
612       return NodeVec[0].matches(Other.NodeVec[0]);
613     return false;
614   }
615 
616   /// \brief Pretty print a lock expression for use in error messages.
toString(unsigned i=0) const617   std::string toString(unsigned i = 0) const {
618     assert(isValid());
619     if (i >= NodeVec.size())
620       return "";
621 
622     const SExprNode* N = &NodeVec[i];
623     switch (N->kind()) {
624       case EOP_Nop:
625         return "_";
626       case EOP_Wildcard:
627         return "(?)";
628       case EOP_Universal:
629         return "*";
630       case EOP_This:
631         return "this";
632       case EOP_NVar:
633       case EOP_LVar: {
634         return N->getNamedDecl()->getNameAsString();
635       }
636       case EOP_Dot: {
637         if (NodeVec[i+1].kind() == EOP_Wildcard) {
638           std::string S = "&";
639           S += N->getNamedDecl()->getQualifiedNameAsString();
640           return S;
641         }
642         std::string FieldName = N->getNamedDecl()->getNameAsString();
643         if (NodeVec[i+1].kind() == EOP_This)
644           return FieldName;
645 
646         std::string S = toString(i+1);
647         if (N->isArrow())
648           return S + "->" + FieldName;
649         else
650           return S + "." + FieldName;
651       }
652       case EOP_Call: {
653         std::string S = toString(i+1) + "(";
654         unsigned NumArgs = N->arity()-1;
655         unsigned ci = getNextSibling(i+1);
656         for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
657           S += toString(ci);
658           if (k+1 < NumArgs) S += ",";
659         }
660         S += ")";
661         return S;
662       }
663       case EOP_MCall: {
664         std::string S = "";
665         if (NodeVec[i+1].kind() != EOP_This)
666           S = toString(i+1) + ".";
667         if (const NamedDecl *D = N->getFunctionDecl())
668           S += D->getNameAsString() + "(";
669         else
670           S += "#(";
671         unsigned NumArgs = N->arity()-1;
672         unsigned ci = getNextSibling(i+1);
673         for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) {
674           S += toString(ci);
675           if (k+1 < NumArgs) S += ",";
676         }
677         S += ")";
678         return S;
679       }
680       case EOP_Index: {
681         std::string S1 = toString(i+1);
682         std::string S2 = toString(i+1 + NodeVec[i+1].size());
683         return S1 + "[" + S2 + "]";
684       }
685       case EOP_Unary: {
686         std::string S = toString(i+1);
687         return "#" + S;
688       }
689       case EOP_Binary: {
690         std::string S1 = toString(i+1);
691         std::string S2 = toString(i+1 + NodeVec[i+1].size());
692         return "(" + S1 + "#" + S2 + ")";
693       }
694       case EOP_Unknown: {
695         unsigned NumChildren = N->arity();
696         if (NumChildren == 0)
697           return "(...)";
698         std::string S = "(";
699         unsigned ci = i+1;
700         for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) {
701           S += toString(ci);
702           if (j+1 < NumChildren) S += "#";
703         }
704         S += ")";
705         return S;
706       }
707     }
708     return "";
709   }
710 };
711 
712 /// \brief A short list of SExprs
713 class MutexIDList : public SmallVector<SExpr, 3> {
714 public:
715   /// \brief Push M onto list, but discard duplicates.
push_back_nodup(const SExpr & M)716   void push_back_nodup(const SExpr& M) {
717     if (end() == std::find(begin(), end(), M))
718       push_back(M);
719   }
720 };
721 
722 /// \brief This is a helper class that stores info about the most recent
723 /// accquire of a Lock.
724 ///
725 /// The main body of the analysis maps MutexIDs to LockDatas.
726 struct LockData {
727   SourceLocation AcquireLoc;
728 
729   /// \brief LKind stores whether a lock is held shared or exclusively.
730   /// Note that this analysis does not currently support either re-entrant
731   /// locking or lock "upgrading" and "downgrading" between exclusive and
732   /// shared.
733   ///
734   /// FIXME: add support for re-entrant locking and lock up/downgrading
735   LockKind LKind;
736   bool     Asserted;           // for asserted locks
737   bool     Managed;            // for ScopedLockable objects
738   SExpr    UnderlyingMutex;    // for ScopedLockable objects
739 
LockData__anon48488dd70111::LockData740   LockData(SourceLocation AcquireLoc, LockKind LKind, bool M=false,
741            bool Asrt=false)
742     : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(Asrt), Managed(M),
743       UnderlyingMutex(Decl::EmptyShell())
744   {}
745 
LockData__anon48488dd70111::LockData746   LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu)
747     : AcquireLoc(AcquireLoc), LKind(LKind), Asserted(false), Managed(false),
748       UnderlyingMutex(Mu)
749   {}
750 
operator ==__anon48488dd70111::LockData751   bool operator==(const LockData &other) const {
752     return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
753   }
754 
operator !=__anon48488dd70111::LockData755   bool operator!=(const LockData &other) const {
756     return !(*this == other);
757   }
758 
Profile__anon48488dd70111::LockData759   void Profile(llvm::FoldingSetNodeID &ID) const {
760     ID.AddInteger(AcquireLoc.getRawEncoding());
761     ID.AddInteger(LKind);
762   }
763 
isAtLeast__anon48488dd70111::LockData764   bool isAtLeast(LockKind LK) {
765     return (LK == LK_Shared) || (LKind == LK_Exclusive);
766   }
767 };
768 
769 
770 /// \brief A FactEntry stores a single fact that is known at a particular point
771 /// in the program execution.  Currently, this is information regarding a lock
772 /// that is held at that point.
773 struct FactEntry {
774   SExpr    MutID;
775   LockData LDat;
776 
FactEntry__anon48488dd70111::FactEntry777   FactEntry(const SExpr& M, const LockData& L)
778     : MutID(M), LDat(L)
779   { }
780 };
781 
782 
783 typedef unsigned short FactID;
784 
785 /// \brief FactManager manages the memory for all facts that are created during
786 /// the analysis of a single routine.
787 class FactManager {
788 private:
789   std::vector<FactEntry> Facts;
790 
791 public:
newLock(const SExpr & M,const LockData & L)792   FactID newLock(const SExpr& M, const LockData& L) {
793     Facts.push_back(FactEntry(M,L));
794     return static_cast<unsigned short>(Facts.size() - 1);
795   }
796 
operator [](FactID F) const797   const FactEntry& operator[](FactID F) const { return Facts[F]; }
operator [](FactID F)798   FactEntry&       operator[](FactID F)       { return Facts[F]; }
799 };
800 
801 
802 /// \brief A FactSet is the set of facts that are known to be true at a
803 /// particular program point.  FactSets must be small, because they are
804 /// frequently copied, and are thus implemented as a set of indices into a
805 /// table maintained by a FactManager.  A typical FactSet only holds 1 or 2
806 /// locks, so we can get away with doing a linear search for lookup.  Note
807 /// that a hashtable or map is inappropriate in this case, because lookups
808 /// may involve partial pattern matches, rather than exact matches.
809 class FactSet {
810 private:
811   typedef SmallVector<FactID, 4> FactVec;
812 
813   FactVec FactIDs;
814 
815 public:
816   typedef FactVec::iterator       iterator;
817   typedef FactVec::const_iterator const_iterator;
818 
begin()819   iterator       begin()       { return FactIDs.begin(); }
begin() const820   const_iterator begin() const { return FactIDs.begin(); }
821 
end()822   iterator       end()       { return FactIDs.end(); }
end() const823   const_iterator end() const { return FactIDs.end(); }
824 
isEmpty() const825   bool isEmpty() const { return FactIDs.size() == 0; }
826 
addLock(FactManager & FM,const SExpr & M,const LockData & L)827   FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) {
828     FactID F = FM.newLock(M, L);
829     FactIDs.push_back(F);
830     return F;
831   }
832 
removeLock(FactManager & FM,const SExpr & M)833   bool removeLock(FactManager& FM, const SExpr& M) {
834     unsigned n = FactIDs.size();
835     if (n == 0)
836       return false;
837 
838     for (unsigned i = 0; i < n-1; ++i) {
839       if (FM[FactIDs[i]].MutID.matches(M)) {
840         FactIDs[i] = FactIDs[n-1];
841         FactIDs.pop_back();
842         return true;
843       }
844     }
845     if (FM[FactIDs[n-1]].MutID.matches(M)) {
846       FactIDs.pop_back();
847       return true;
848     }
849     return false;
850   }
851 
findLockIter(FactManager & FM,const SExpr & M)852   iterator findLockIter(FactManager &FM, const SExpr &M) {
853     return std::find_if(begin(), end(), [&](FactID ID) {
854       return FM[ID].MutID.matches(M);
855     });
856   }
857 
findLock(FactManager & FM,const SExpr & M) const858   LockData *findLock(FactManager &FM, const SExpr &M) const {
859     auto I = std::find_if(begin(), end(), [&](FactID ID) {
860       return FM[ID].MutID.matches(M);
861     });
862 
863     return I != end() ? &FM[*I].LDat : nullptr;
864   }
865 
findLockUniv(FactManager & FM,const SExpr & M) const866   LockData *findLockUniv(FactManager &FM, const SExpr &M) const {
867     auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
868       const SExpr &Expr = FM[ID].MutID;
869       return Expr.isUniversal() || Expr.matches(M);
870     });
871 
872     return I != end() ? &FM[*I].LDat : nullptr;
873   }
874 
findPartialMatch(FactManager & FM,const SExpr & M) const875   FactEntry *findPartialMatch(FactManager &FM, const SExpr &M) const {
876     auto I = std::find_if(begin(), end(), [&](FactID ID) {
877       return FM[ID].MutID.partiallyMatches(M);
878     });
879 
880     return I != end() ? &FM[*I] : nullptr;
881   }
882 };
883 
884 /// A Lockset maps each SExpr (defined above) to information about how it has
885 /// been locked.
886 typedef llvm::ImmutableMap<SExpr, LockData> Lockset;
887 typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
888 
889 class LocalVariableMap;
890 
891 /// A side (entry or exit) of a CFG node.
892 enum CFGBlockSide { CBS_Entry, CBS_Exit };
893 
894 /// CFGBlockInfo is a struct which contains all the information that is
895 /// maintained for each block in the CFG.  See LocalVariableMap for more
896 /// information about the contexts.
897 struct CFGBlockInfo {
898   FactSet EntrySet;             // Lockset held at entry to block
899   FactSet ExitSet;              // Lockset held at exit from block
900   LocalVarContext EntryContext; // Context held at entry to block
901   LocalVarContext ExitContext;  // Context held at exit from block
902   SourceLocation EntryLoc;      // Location of first statement in block
903   SourceLocation ExitLoc;       // Location of last statement in block.
904   unsigned EntryIndex;          // Used to replay contexts later
905   bool Reachable;               // Is this block reachable?
906 
getSet__anon48488dd70111::CFGBlockInfo907   const FactSet &getSet(CFGBlockSide Side) const {
908     return Side == CBS_Entry ? EntrySet : ExitSet;
909   }
getLocation__anon48488dd70111::CFGBlockInfo910   SourceLocation getLocation(CFGBlockSide Side) const {
911     return Side == CBS_Entry ? EntryLoc : ExitLoc;
912   }
913 
914 private:
CFGBlockInfo__anon48488dd70111::CFGBlockInfo915   CFGBlockInfo(LocalVarContext EmptyCtx)
916     : EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false)
917   { }
918 
919 public:
920   static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
921 };
922 
923 
924 
925 // A LocalVariableMap maintains a map from local variables to their currently
926 // valid definitions.  It provides SSA-like functionality when traversing the
927 // CFG.  Like SSA, each definition or assignment to a variable is assigned a
928 // unique name (an integer), which acts as the SSA name for that definition.
929 // The total set of names is shared among all CFG basic blocks.
930 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs
931 // with their SSA-names.  Instead, we compute a Context for each point in the
932 // code, which maps local variables to the appropriate SSA-name.  This map
933 // changes with each assignment.
934 //
935 // The map is computed in a single pass over the CFG.  Subsequent analyses can
936 // then query the map to find the appropriate Context for a statement, and use
937 // that Context to look up the definitions of variables.
938 class LocalVariableMap {
939 public:
940   typedef LocalVarContext Context;
941 
942   /// A VarDefinition consists of an expression, representing the value of the
943   /// variable, along with the context in which that expression should be
944   /// interpreted.  A reference VarDefinition does not itself contain this
945   /// information, but instead contains a pointer to a previous VarDefinition.
946   struct VarDefinition {
947   public:
948     friend class LocalVariableMap;
949 
950     const NamedDecl *Dec;  // The original declaration for this variable.
951     const Expr *Exp;       // The expression for this variable, OR
952     unsigned Ref;          // Reference to another VarDefinition
953     Context Ctx;           // The map with which Exp should be interpreted.
954 
isReference__anon48488dd70111::LocalVariableMap::VarDefinition955     bool isReference() { return !Exp; }
956 
957   private:
958     // Create ordinary variable definition
VarDefinition__anon48488dd70111::LocalVariableMap::VarDefinition959     VarDefinition(const NamedDecl *D, const Expr *E, Context C)
960       : Dec(D), Exp(E), Ref(0), Ctx(C)
961     { }
962 
963     // Create reference to previous definition
VarDefinition__anon48488dd70111::LocalVariableMap::VarDefinition964     VarDefinition(const NamedDecl *D, unsigned R, Context C)
965       : Dec(D), Exp(nullptr), Ref(R), Ctx(C)
966     { }
967   };
968 
969 private:
970   Context::Factory ContextFactory;
971   std::vector<VarDefinition> VarDefinitions;
972   std::vector<unsigned> CtxIndices;
973   std::vector<std::pair<Stmt*, Context> > SavedContexts;
974 
975 public:
LocalVariableMap()976   LocalVariableMap() {
977     // index 0 is a placeholder for undefined variables (aka phi-nodes).
978     VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
979   }
980 
981   /// Look up a definition, within the given context.
lookup(const NamedDecl * D,Context Ctx)982   const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
983     const unsigned *i = Ctx.lookup(D);
984     if (!i)
985       return nullptr;
986     assert(*i < VarDefinitions.size());
987     return &VarDefinitions[*i];
988   }
989 
990   /// Look up the definition for D within the given context.  Returns
991   /// NULL if the expression is not statically known.  If successful, also
992   /// modifies Ctx to hold the context of the return Expr.
lookupExpr(const NamedDecl * D,Context & Ctx)993   const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
994     const unsigned *P = Ctx.lookup(D);
995     if (!P)
996       return nullptr;
997 
998     unsigned i = *P;
999     while (i > 0) {
1000       if (VarDefinitions[i].Exp) {
1001         Ctx = VarDefinitions[i].Ctx;
1002         return VarDefinitions[i].Exp;
1003       }
1004       i = VarDefinitions[i].Ref;
1005     }
1006     return nullptr;
1007   }
1008 
getEmptyContext()1009   Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
1010 
1011   /// Return the next context after processing S.  This function is used by
1012   /// clients of the class to get the appropriate context when traversing the
1013   /// CFG.  It must be called for every assignment or DeclStmt.
getNextContext(unsigned & CtxIndex,Stmt * S,Context C)1014   Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
1015     if (SavedContexts[CtxIndex+1].first == S) {
1016       CtxIndex++;
1017       Context Result = SavedContexts[CtxIndex].second;
1018       return Result;
1019     }
1020     return C;
1021   }
1022 
dumpVarDefinitionName(unsigned i)1023   void dumpVarDefinitionName(unsigned i) {
1024     if (i == 0) {
1025       llvm::errs() << "Undefined";
1026       return;
1027     }
1028     const NamedDecl *Dec = VarDefinitions[i].Dec;
1029     if (!Dec) {
1030       llvm::errs() << "<<NULL>>";
1031       return;
1032     }
1033     Dec->printName(llvm::errs());
1034     llvm::errs() << "." << i << " " << ((const void*) Dec);
1035   }
1036 
1037   /// Dumps an ASCII representation of the variable map to llvm::errs()
dump()1038   void dump() {
1039     for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
1040       const Expr *Exp = VarDefinitions[i].Exp;
1041       unsigned Ref = VarDefinitions[i].Ref;
1042 
1043       dumpVarDefinitionName(i);
1044       llvm::errs() << " = ";
1045       if (Exp) Exp->dump();
1046       else {
1047         dumpVarDefinitionName(Ref);
1048         llvm::errs() << "\n";
1049       }
1050     }
1051   }
1052 
1053   /// Dumps an ASCII representation of a Context to llvm::errs()
dumpContext(Context C)1054   void dumpContext(Context C) {
1055     for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
1056       const NamedDecl *D = I.getKey();
1057       D->printName(llvm::errs());
1058       const unsigned *i = C.lookup(D);
1059       llvm::errs() << " -> ";
1060       dumpVarDefinitionName(*i);
1061       llvm::errs() << "\n";
1062     }
1063   }
1064 
1065   /// Builds the variable map.
1066   void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
1067                    std::vector<CFGBlockInfo> &BlockInfo);
1068 
1069 protected:
1070   // Get the current context index
getContextIndex()1071   unsigned getContextIndex() { return SavedContexts.size()-1; }
1072 
1073   // Save the current context for later replay
saveContext(Stmt * S,Context C)1074   void saveContext(Stmt *S, Context C) {
1075     SavedContexts.push_back(std::make_pair(S,C));
1076   }
1077 
1078   // Adds a new definition to the given context, and returns a new context.
1079   // This method should be called when declaring a new variable.
addDefinition(const NamedDecl * D,const Expr * Exp,Context Ctx)1080   Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
1081     assert(!Ctx.contains(D));
1082     unsigned newID = VarDefinitions.size();
1083     Context NewCtx = ContextFactory.add(Ctx, D, newID);
1084     VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
1085     return NewCtx;
1086   }
1087 
1088   // Add a new reference to an existing definition.
addReference(const NamedDecl * D,unsigned i,Context Ctx)1089   Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
1090     unsigned newID = VarDefinitions.size();
1091     Context NewCtx = ContextFactory.add(Ctx, D, newID);
1092     VarDefinitions.push_back(VarDefinition(D, i, Ctx));
1093     return NewCtx;
1094   }
1095 
1096   // Updates a definition only if that definition is already in the map.
1097   // This method should be called when assigning to an existing variable.
updateDefinition(const NamedDecl * D,Expr * Exp,Context Ctx)1098   Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
1099     if (Ctx.contains(D)) {
1100       unsigned newID = VarDefinitions.size();
1101       Context NewCtx = ContextFactory.remove(Ctx, D);
1102       NewCtx = ContextFactory.add(NewCtx, D, newID);
1103       VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
1104       return NewCtx;
1105     }
1106     return Ctx;
1107   }
1108 
1109   // Removes a definition from the context, but keeps the variable name
1110   // as a valid variable.  The index 0 is a placeholder for cleared definitions.
clearDefinition(const NamedDecl * D,Context Ctx)1111   Context clearDefinition(const NamedDecl *D, Context Ctx) {
1112     Context NewCtx = Ctx;
1113     if (NewCtx.contains(D)) {
1114       NewCtx = ContextFactory.remove(NewCtx, D);
1115       NewCtx = ContextFactory.add(NewCtx, D, 0);
1116     }
1117     return NewCtx;
1118   }
1119 
1120   // Remove a definition entirely frmo the context.
removeDefinition(const NamedDecl * D,Context Ctx)1121   Context removeDefinition(const NamedDecl *D, Context Ctx) {
1122     Context NewCtx = Ctx;
1123     if (NewCtx.contains(D)) {
1124       NewCtx = ContextFactory.remove(NewCtx, D);
1125     }
1126     return NewCtx;
1127   }
1128 
1129   Context intersectContexts(Context C1, Context C2);
1130   Context createReferenceContext(Context C);
1131   void intersectBackEdge(Context C1, Context C2);
1132 
1133   friend class VarMapBuilder;
1134 };
1135 
1136 
1137 // This has to be defined after LocalVariableMap.
getEmptyBlockInfo(LocalVariableMap & M)1138 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
1139   return CFGBlockInfo(M.getEmptyContext());
1140 }
1141 
1142 
1143 /// Visitor which builds a LocalVariableMap
1144 class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
1145 public:
1146   LocalVariableMap* VMap;
1147   LocalVariableMap::Context Ctx;
1148 
VarMapBuilder(LocalVariableMap * VM,LocalVariableMap::Context C)1149   VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
1150     : VMap(VM), Ctx(C) {}
1151 
1152   void VisitDeclStmt(DeclStmt *S);
1153   void VisitBinaryOperator(BinaryOperator *BO);
1154 };
1155 
1156 
1157 // Add new local variables to the variable map
VisitDeclStmt(DeclStmt * S)1158 void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
1159   bool modifiedCtx = false;
1160   DeclGroupRef DGrp = S->getDeclGroup();
1161   for (const auto *D : DGrp) {
1162     if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
1163       const Expr *E = VD->getInit();
1164 
1165       // Add local variables with trivial type to the variable map
1166       QualType T = VD->getType();
1167       if (T.isTrivialType(VD->getASTContext())) {
1168         Ctx = VMap->addDefinition(VD, E, Ctx);
1169         modifiedCtx = true;
1170       }
1171     }
1172   }
1173   if (modifiedCtx)
1174     VMap->saveContext(S, Ctx);
1175 }
1176 
1177 // Update local variable definitions in variable map
VisitBinaryOperator(BinaryOperator * BO)1178 void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
1179   if (!BO->isAssignmentOp())
1180     return;
1181 
1182   Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
1183 
1184   // Update the variable map and current context.
1185   if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
1186     ValueDecl *VDec = DRE->getDecl();
1187     if (Ctx.lookup(VDec)) {
1188       if (BO->getOpcode() == BO_Assign)
1189         Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
1190       else
1191         // FIXME -- handle compound assignment operators
1192         Ctx = VMap->clearDefinition(VDec, Ctx);
1193       VMap->saveContext(BO, Ctx);
1194     }
1195   }
1196 }
1197 
1198 
1199 // Computes the intersection of two contexts.  The intersection is the
1200 // set of variables which have the same definition in both contexts;
1201 // variables with different definitions are discarded.
1202 LocalVariableMap::Context
intersectContexts(Context C1,Context C2)1203 LocalVariableMap::intersectContexts(Context C1, Context C2) {
1204   Context Result = C1;
1205   for (const auto &P : C1) {
1206     const NamedDecl *Dec = P.first;
1207     const unsigned *i2 = C2.lookup(Dec);
1208     if (!i2)             // variable doesn't exist on second path
1209       Result = removeDefinition(Dec, Result);
1210     else if (*i2 != P.second)  // variable exists, but has different definition
1211       Result = clearDefinition(Dec, Result);
1212   }
1213   return Result;
1214 }
1215 
1216 // For every variable in C, create a new variable that refers to the
1217 // definition in C.  Return a new context that contains these new variables.
1218 // (We use this for a naive implementation of SSA on loop back-edges.)
createReferenceContext(Context C)1219 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
1220   Context Result = getEmptyContext();
1221   for (const auto &P : C)
1222     Result = addReference(P.first, P.second, Result);
1223   return Result;
1224 }
1225 
1226 // This routine also takes the intersection of C1 and C2, but it does so by
1227 // altering the VarDefinitions.  C1 must be the result of an earlier call to
1228 // createReferenceContext.
intersectBackEdge(Context C1,Context C2)1229 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
1230   for (const auto &P : C1) {
1231     unsigned i1 = P.second;
1232     VarDefinition *VDef = &VarDefinitions[i1];
1233     assert(VDef->isReference());
1234 
1235     const unsigned *i2 = C2.lookup(P.first);
1236     if (!i2 || (*i2 != i1))
1237       VDef->Ref = 0;    // Mark this variable as undefined
1238   }
1239 }
1240 
1241 
1242 // Traverse the CFG in topological order, so all predecessors of a block
1243 // (excluding back-edges) are visited before the block itself.  At
1244 // each point in the code, we calculate a Context, which holds the set of
1245 // variable definitions which are visible at that point in execution.
1246 // Visible variables are mapped to their definitions using an array that
1247 // contains all definitions.
1248 //
1249 // At join points in the CFG, the set is computed as the intersection of
1250 // the incoming sets along each edge, E.g.
1251 //
1252 //                       { Context                 | VarDefinitions }
1253 //   int x = 0;          { x -> x1                 | x1 = 0 }
1254 //   int y = 0;          { x -> x1, y -> y1        | y1 = 0, x1 = 0 }
1255 //   if (b) x = 1;       { x -> x2, y -> y1        | x2 = 1, y1 = 0, ... }
1256 //   else   x = 2;       { x -> x3, y -> y1        | x3 = 2, x2 = 1, ... }
1257 //   ...                 { y -> y1  (x is unknown) | x3 = 2, x2 = 1, ... }
1258 //
1259 // This is essentially a simpler and more naive version of the standard SSA
1260 // algorithm.  Those definitions that remain in the intersection are from blocks
1261 // that strictly dominate the current block.  We do not bother to insert proper
1262 // phi nodes, because they are not used in our analysis; instead, wherever
1263 // a phi node would be required, we simply remove that definition from the
1264 // context (E.g. x above).
1265 //
1266 // The initial traversal does not capture back-edges, so those need to be
1267 // handled on a separate pass.  Whenever the first pass encounters an
1268 // incoming back edge, it duplicates the context, creating new definitions
1269 // that refer back to the originals.  (These correspond to places where SSA
1270 // might have to insert a phi node.)  On the second pass, these definitions are
1271 // set to NULL if the variable has changed on the back-edge (i.e. a phi
1272 // node was actually required.)  E.g.
1273 //
1274 //                       { Context           | VarDefinitions }
1275 //   int x = 0, y = 0;   { x -> x1, y -> y1  | y1 = 0, x1 = 0 }
1276 //   while (b)           { x -> x2, y -> y1  | [1st:] x2=x1; [2nd:] x2=NULL; }
1277 //     x = x+1;          { x -> x3, y -> y1  | x3 = x2 + 1, ... }
1278 //   ...                 { y -> y1           | x3 = 2, x2 = 1, ... }
1279 //
traverseCFG(CFG * CFGraph,const PostOrderCFGView * SortedGraph,std::vector<CFGBlockInfo> & BlockInfo)1280 void LocalVariableMap::traverseCFG(CFG *CFGraph,
1281                                    const PostOrderCFGView *SortedGraph,
1282                                    std::vector<CFGBlockInfo> &BlockInfo) {
1283   PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
1284 
1285   CtxIndices.resize(CFGraph->getNumBlockIDs());
1286 
1287   for (const auto *CurrBlock : *SortedGraph) {
1288     int CurrBlockID = CurrBlock->getBlockID();
1289     CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
1290 
1291     VisitedBlocks.insert(CurrBlock);
1292 
1293     // Calculate the entry context for the current block
1294     bool HasBackEdges = false;
1295     bool CtxInit = true;
1296     for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
1297          PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
1298       // if *PI -> CurrBlock is a back edge, so skip it
1299       if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
1300         HasBackEdges = true;
1301         continue;
1302       }
1303 
1304       int PrevBlockID = (*PI)->getBlockID();
1305       CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
1306 
1307       if (CtxInit) {
1308         CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
1309         CtxInit = false;
1310       }
1311       else {
1312         CurrBlockInfo->EntryContext =
1313           intersectContexts(CurrBlockInfo->EntryContext,
1314                             PrevBlockInfo->ExitContext);
1315       }
1316     }
1317 
1318     // Duplicate the context if we have back-edges, so we can call
1319     // intersectBackEdges later.
1320     if (HasBackEdges)
1321       CurrBlockInfo->EntryContext =
1322         createReferenceContext(CurrBlockInfo->EntryContext);
1323 
1324     // Create a starting context index for the current block
1325     saveContext(nullptr, CurrBlockInfo->EntryContext);
1326     CurrBlockInfo->EntryIndex = getContextIndex();
1327 
1328     // Visit all the statements in the basic block.
1329     VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
1330     for (CFGBlock::const_iterator BI = CurrBlock->begin(),
1331          BE = CurrBlock->end(); BI != BE; ++BI) {
1332       switch (BI->getKind()) {
1333         case CFGElement::Statement: {
1334           CFGStmt CS = BI->castAs<CFGStmt>();
1335           VMapBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
1336           break;
1337         }
1338         default:
1339           break;
1340       }
1341     }
1342     CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
1343 
1344     // Mark variables on back edges as "unknown" if they've been changed.
1345     for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
1346          SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
1347       // if CurrBlock -> *SI is *not* a back edge
1348       if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
1349         continue;
1350 
1351       CFGBlock *FirstLoopBlock = *SI;
1352       Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
1353       Context LoopEnd   = CurrBlockInfo->ExitContext;
1354       intersectBackEdge(LoopBegin, LoopEnd);
1355     }
1356   }
1357 
1358   // Put an extra entry at the end of the indexed context array
1359   unsigned exitID = CFGraph->getExit().getBlockID();
1360   saveContext(nullptr, BlockInfo[exitID].ExitContext);
1361 }
1362 
1363 /// Find the appropriate source locations to use when producing diagnostics for
1364 /// each block in the CFG.
findBlockLocations(CFG * CFGraph,const PostOrderCFGView * SortedGraph,std::vector<CFGBlockInfo> & BlockInfo)1365 static void findBlockLocations(CFG *CFGraph,
1366                                const PostOrderCFGView *SortedGraph,
1367                                std::vector<CFGBlockInfo> &BlockInfo) {
1368   for (const auto *CurrBlock : *SortedGraph) {
1369     CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
1370 
1371     // Find the source location of the last statement in the block, if the
1372     // block is not empty.
1373     if (const Stmt *S = CurrBlock->getTerminator()) {
1374       CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
1375     } else {
1376       for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
1377            BE = CurrBlock->rend(); BI != BE; ++BI) {
1378         // FIXME: Handle other CFGElement kinds.
1379         if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
1380           CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
1381           break;
1382         }
1383       }
1384     }
1385 
1386     if (!CurrBlockInfo->ExitLoc.isInvalid()) {
1387       // This block contains at least one statement. Find the source location
1388       // of the first statement in the block.
1389       for (CFGBlock::const_iterator BI = CurrBlock->begin(),
1390            BE = CurrBlock->end(); BI != BE; ++BI) {
1391         // FIXME: Handle other CFGElement kinds.
1392         if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
1393           CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
1394           break;
1395         }
1396       }
1397     } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
1398                CurrBlock != &CFGraph->getExit()) {
1399       // The block is empty, and has a single predecessor. Use its exit
1400       // location.
1401       CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
1402           BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
1403     }
1404   }
1405 }
1406 
1407 /// \brief Class which implements the core thread safety analysis routines.
1408 class ThreadSafetyAnalyzer {
1409   friend class BuildLockset;
1410 
1411   ThreadSafetyHandler       &Handler;
1412   LocalVariableMap          LocalVarMap;
1413   FactManager               FactMan;
1414   std::vector<CFGBlockInfo> BlockInfo;
1415 
1416 public:
ThreadSafetyAnalyzer(ThreadSafetyHandler & H)1417   ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
1418 
1419   void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat,
1420                StringRef DiagKind);
1421   void removeLock(FactSet &FSet, const SExpr &Mutex, SourceLocation UnlockLoc,
1422                   bool FullyRemove, LockKind Kind, StringRef DiagKind);
1423 
1424   template <typename AttrType>
1425   void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
1426                    const NamedDecl *D, VarDecl *SelfDecl = nullptr);
1427 
1428   template <class AttrType>
1429   void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp,
1430                    const NamedDecl *D,
1431                    const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1432                    Expr *BrE, bool Neg);
1433 
1434   const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1435                                      bool &Negate);
1436 
1437   void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1438                       const CFGBlock* PredBlock,
1439                       const CFGBlock *CurrBlock);
1440 
1441   void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
1442                         SourceLocation JoinLoc,
1443                         LockErrorKind LEK1, LockErrorKind LEK2,
1444                         bool Modify=true);
1445 
intersectAndWarn(FactSet & FSet1,const FactSet & FSet2,SourceLocation JoinLoc,LockErrorKind LEK1,bool Modify=true)1446   void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
1447                         SourceLocation JoinLoc, LockErrorKind LEK1,
1448                         bool Modify=true) {
1449     intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify);
1450   }
1451 
1452   void runAnalysis(AnalysisDeclContext &AC);
1453 };
1454 
1455 /// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs.
getValueDecl(const Expr * Exp)1456 static const ValueDecl *getValueDecl(const Expr *Exp) {
1457   if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1458     return getValueDecl(CE->getSubExpr());
1459 
1460   if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1461     return DR->getDecl();
1462 
1463   if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1464     return ME->getMemberDecl();
1465 
1466   return nullptr;
1467 }
1468 
1469 template <typename Ty>
1470 class has_arg_iterator_range {
1471   typedef char yes[1];
1472   typedef char no[2];
1473 
1474   template <typename Inner>
1475   static yes& test(Inner *I, decltype(I->args()) * = nullptr);
1476 
1477   template <typename>
1478   static no& test(...);
1479 
1480 public:
1481   static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
1482 };
1483 
ClassifyDiagnostic(const CapabilityAttr * A)1484 static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
1485   return A->getName();
1486 }
1487 
ClassifyDiagnostic(QualType VDT)1488 static StringRef ClassifyDiagnostic(QualType VDT) {
1489   // We need to look at the declaration of the type of the value to determine
1490   // which it is. The type should either be a record or a typedef, or a pointer
1491   // or reference thereof.
1492   if (const auto *RT = VDT->getAs<RecordType>()) {
1493     if (const auto *RD = RT->getDecl())
1494       if (const auto *CA = RD->getAttr<CapabilityAttr>())
1495         return ClassifyDiagnostic(CA);
1496   } else if (const auto *TT = VDT->getAs<TypedefType>()) {
1497     if (const auto *TD = TT->getDecl())
1498       if (const auto *CA = TD->getAttr<CapabilityAttr>())
1499         return ClassifyDiagnostic(CA);
1500   } else if (VDT->isPointerType() || VDT->isReferenceType())
1501     return ClassifyDiagnostic(VDT->getPointeeType());
1502 
1503   return "mutex";
1504 }
1505 
ClassifyDiagnostic(const ValueDecl * VD)1506 static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
1507   assert(VD && "No ValueDecl passed");
1508 
1509   // The ValueDecl is the declaration of a mutex or role (hopefully).
1510   return ClassifyDiagnostic(VD->getType());
1511 }
1512 
1513 template <typename AttrTy>
1514 static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value,
1515                                StringRef>::type
ClassifyDiagnostic(const AttrTy * A)1516 ClassifyDiagnostic(const AttrTy *A) {
1517   if (const ValueDecl *VD = getValueDecl(A->getArg()))
1518     return ClassifyDiagnostic(VD);
1519   return "mutex";
1520 }
1521 
1522 template <typename AttrTy>
1523 static typename std::enable_if<has_arg_iterator_range<AttrTy>::value,
1524                                StringRef>::type
ClassifyDiagnostic(const AttrTy * A)1525 ClassifyDiagnostic(const AttrTy *A) {
1526   for (const auto *Arg : A->args()) {
1527     if (const ValueDecl *VD = getValueDecl(Arg))
1528       return ClassifyDiagnostic(VD);
1529   }
1530   return "mutex";
1531 }
1532 
1533 /// \brief Add a new lock to the lockset, warning if the lock is already there.
1534 /// \param Mutex -- the Mutex expression for the lock
1535 /// \param LDat  -- the LockData for the lock
addLock(FactSet & FSet,const SExpr & Mutex,const LockData & LDat,StringRef DiagKind)1536 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex,
1537                                    const LockData &LDat, StringRef DiagKind) {
1538   // FIXME: deal with acquired before/after annotations.
1539   // FIXME: Don't always warn when we have support for reentrant locks.
1540   if (Mutex.shouldIgnore())
1541     return;
1542 
1543   if (FSet.findLock(FactMan, Mutex)) {
1544     if (!LDat.Asserted)
1545       Handler.handleDoubleLock(DiagKind, Mutex.toString(), LDat.AcquireLoc);
1546   } else {
1547     FSet.addLock(FactMan, Mutex, LDat);
1548   }
1549 }
1550 
1551 
1552 /// \brief Remove a lock from the lockset, warning if the lock is not there.
1553 /// \param Mutex The lock expression corresponding to the lock to be removed
1554 /// \param UnlockLoc The source location of the unlock (only used in error msg)
removeLock(FactSet & FSet,const SExpr & Mutex,SourceLocation UnlockLoc,bool FullyRemove,LockKind ReceivedKind,StringRef DiagKind)1555 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const SExpr &Mutex,
1556                                       SourceLocation UnlockLoc,
1557                                       bool FullyRemove, LockKind ReceivedKind,
1558                                       StringRef DiagKind) {
1559   if (Mutex.shouldIgnore())
1560     return;
1561 
1562   const LockData *LDat = FSet.findLock(FactMan, Mutex);
1563   if (!LDat) {
1564     Handler.handleUnmatchedUnlock(DiagKind, Mutex.toString(), UnlockLoc);
1565     return;
1566   }
1567 
1568   // Generic lock removal doesn't care about lock kind mismatches, but
1569   // otherwise diagnose when the lock kinds are mismatched.
1570   if (ReceivedKind != LK_Generic && LDat->LKind != ReceivedKind) {
1571     Handler.handleIncorrectUnlockKind(DiagKind, Mutex.toString(), LDat->LKind,
1572                                       ReceivedKind, UnlockLoc);
1573     return;
1574   }
1575 
1576   if (LDat->UnderlyingMutex.isValid()) {
1577     // This is scoped lockable object, which manages the real mutex.
1578     if (FullyRemove) {
1579       // We're destroying the managing object.
1580       // Remove the underlying mutex if it exists; but don't warn.
1581       if (FSet.findLock(FactMan, LDat->UnderlyingMutex))
1582         FSet.removeLock(FactMan, LDat->UnderlyingMutex);
1583     } else {
1584       // We're releasing the underlying mutex, but not destroying the
1585       // managing object.  Warn on dual release.
1586       if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) {
1587         Handler.handleUnmatchedUnlock(
1588             DiagKind, LDat->UnderlyingMutex.toString(), UnlockLoc);
1589       }
1590       FSet.removeLock(FactMan, LDat->UnderlyingMutex);
1591       return;
1592     }
1593   }
1594   FSet.removeLock(FactMan, Mutex);
1595 }
1596 
1597 
1598 /// \brief Extract the list of mutexIDs from the attribute on an expression,
1599 /// and push them onto Mtxs, discarding any duplicates.
1600 template <typename AttrType>
getMutexIDs(MutexIDList & Mtxs,AttrType * Attr,Expr * Exp,const NamedDecl * D,VarDecl * SelfDecl)1601 void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
1602                                        Expr *Exp, const NamedDecl *D,
1603                                        VarDecl *SelfDecl) {
1604   if (Attr->args_size() == 0) {
1605     // The mutex held is the "this" object.
1606     SExpr Mu(nullptr, Exp, D, SelfDecl);
1607     if (!Mu.isValid())
1608       SExpr::warnInvalidLock(Handler, nullptr, Exp, D,
1609                              ClassifyDiagnostic(Attr));
1610     else
1611       Mtxs.push_back_nodup(Mu);
1612     return;
1613   }
1614 
1615   for (const auto *Arg : Attr->args()) {
1616     SExpr Mu(Arg, Exp, D, SelfDecl);
1617     if (!Mu.isValid())
1618       SExpr::warnInvalidLock(Handler, Arg, Exp, D, ClassifyDiagnostic(Attr));
1619     else
1620       Mtxs.push_back_nodup(Mu);
1621   }
1622 }
1623 
1624 
1625 /// \brief Extract the list of mutexIDs from a trylock attribute.  If the
1626 /// trylock applies to the given edge, then push them onto Mtxs, discarding
1627 /// any duplicates.
1628 template <class AttrType>
getMutexIDs(MutexIDList & Mtxs,AttrType * Attr,Expr * Exp,const NamedDecl * D,const CFGBlock * PredBlock,const CFGBlock * CurrBlock,Expr * BrE,bool Neg)1629 void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr,
1630                                        Expr *Exp, const NamedDecl *D,
1631                                        const CFGBlock *PredBlock,
1632                                        const CFGBlock *CurrBlock,
1633                                        Expr *BrE, bool Neg) {
1634   // Find out which branch has the lock
1635   bool branch = false;
1636   if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1637     branch = BLE->getValue();
1638   else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1639     branch = ILE->getValue().getBoolValue();
1640 
1641   int branchnum = branch ? 0 : 1;
1642   if (Neg)
1643     branchnum = !branchnum;
1644 
1645   // If we've taken the trylock branch, then add the lock
1646   int i = 0;
1647   for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1648        SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1649     if (*SI == CurrBlock && i == branchnum)
1650       getMutexIDs(Mtxs, Attr, Exp, D);
1651   }
1652 }
1653 
1654 
getStaticBooleanValue(Expr * E,bool & TCond)1655 bool getStaticBooleanValue(Expr* E, bool& TCond) {
1656   if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
1657     TCond = false;
1658     return true;
1659   } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1660     TCond = BLE->getValue();
1661     return true;
1662   } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) {
1663     TCond = ILE->getValue().getBoolValue();
1664     return true;
1665   } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
1666     return getStaticBooleanValue(CE->getSubExpr(), TCond);
1667   }
1668   return false;
1669 }
1670 
1671 
1672 // If Cond can be traced back to a function call, return the call expression.
1673 // The negate variable should be called with false, and will be set to true
1674 // if the function call is negated, e.g. if (!mu.tryLock(...))
getTrylockCallExpr(const Stmt * Cond,LocalVarContext C,bool & Negate)1675 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1676                                                          LocalVarContext C,
1677                                                          bool &Negate) {
1678   if (!Cond)
1679     return nullptr;
1680 
1681   if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
1682     return CallExp;
1683   }
1684   else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) {
1685     return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1686   }
1687   else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
1688     return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1689   }
1690   else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) {
1691     return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
1692   }
1693   else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1694     const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1695     return getTrylockCallExpr(E, C, Negate);
1696   }
1697   else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
1698     if (UOP->getOpcode() == UO_LNot) {
1699       Negate = !Negate;
1700       return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1701     }
1702     return nullptr;
1703   }
1704   else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) {
1705     if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1706       if (BOP->getOpcode() == BO_NE)
1707         Negate = !Negate;
1708 
1709       bool TCond = false;
1710       if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1711         if (!TCond) Negate = !Negate;
1712         return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1713       }
1714       TCond = false;
1715       if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1716         if (!TCond) Negate = !Negate;
1717         return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1718       }
1719       return nullptr;
1720     }
1721     if (BOP->getOpcode() == BO_LAnd) {
1722       // LHS must have been evaluated in a different block.
1723       return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1724     }
1725     if (BOP->getOpcode() == BO_LOr) {
1726       return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1727     }
1728     return nullptr;
1729   }
1730   return nullptr;
1731 }
1732 
1733 
1734 /// \brief Find the lockset that holds on the edge between PredBlock
1735 /// and CurrBlock.  The edge set is the exit set of PredBlock (passed
1736 /// as the ExitSet parameter) plus any trylocks, which are conditionally held.
getEdgeLockset(FactSet & Result,const FactSet & ExitSet,const CFGBlock * PredBlock,const CFGBlock * CurrBlock)1737 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1738                                           const FactSet &ExitSet,
1739                                           const CFGBlock *PredBlock,
1740                                           const CFGBlock *CurrBlock) {
1741   Result = ExitSet;
1742 
1743   const Stmt *Cond = PredBlock->getTerminatorCondition();
1744   if (!Cond)
1745     return;
1746 
1747   bool Negate = false;
1748   const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1749   const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1750   StringRef CapDiagKind = "mutex";
1751 
1752   CallExpr *Exp =
1753     const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
1754   if (!Exp)
1755     return;
1756 
1757   NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1758   if(!FunDecl || !FunDecl->hasAttrs())
1759     return;
1760 
1761   MutexIDList ExclusiveLocksToAdd;
1762   MutexIDList SharedLocksToAdd;
1763 
1764   // If the condition is a call to a Trylock function, then grab the attributes
1765   for (auto *Attr : FunDecl->getAttrs()) {
1766     switch (Attr->getKind()) {
1767       case attr::ExclusiveTrylockFunction: {
1768         ExclusiveTrylockFunctionAttr *A =
1769           cast<ExclusiveTrylockFunctionAttr>(Attr);
1770         getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
1771                     PredBlock, CurrBlock, A->getSuccessValue(), Negate);
1772         CapDiagKind = ClassifyDiagnostic(A);
1773         break;
1774       }
1775       case attr::SharedTrylockFunction: {
1776         SharedTrylockFunctionAttr *A =
1777           cast<SharedTrylockFunctionAttr>(Attr);
1778         getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
1779                     PredBlock, CurrBlock, A->getSuccessValue(), Negate);
1780         CapDiagKind = ClassifyDiagnostic(A);
1781         break;
1782       }
1783       default:
1784         break;
1785     }
1786   }
1787 
1788   // Add and remove locks.
1789   SourceLocation Loc = Exp->getExprLoc();
1790   for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1791     addLock(Result, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
1792             CapDiagKind);
1793   for (const auto &SharedLockToAdd : SharedLocksToAdd)
1794     addLock(Result, SharedLockToAdd, LockData(Loc, LK_Shared), CapDiagKind);
1795 }
1796 
1797 /// \brief We use this class to visit different types of expressions in
1798 /// CFGBlocks, and build up the lockset.
1799 /// An expression may cause us to add or remove locks from the lockset, or else
1800 /// output error messages related to missing locks.
1801 /// FIXME: In future, we may be able to not inherit from a visitor.
1802 class BuildLockset : public StmtVisitor<BuildLockset> {
1803   friend class ThreadSafetyAnalyzer;
1804 
1805   ThreadSafetyAnalyzer *Analyzer;
1806   FactSet FSet;
1807   LocalVariableMap::Context LVarCtx;
1808   unsigned CtxIndex;
1809 
1810   // Helper functions
1811 
1812   void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
1813                           Expr *MutexExp, ProtectedOperationKind POK,
1814                           StringRef DiagKind);
1815   void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
1816                        StringRef DiagKind);
1817 
1818   void checkAccess(const Expr *Exp, AccessKind AK);
1819   void checkPtAccess(const Expr *Exp, AccessKind AK);
1820 
1821   void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
1822 
1823 public:
BuildLockset(ThreadSafetyAnalyzer * Anlzr,CFGBlockInfo & Info)1824   BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
1825     : StmtVisitor<BuildLockset>(),
1826       Analyzer(Anlzr),
1827       FSet(Info.EntrySet),
1828       LVarCtx(Info.EntryContext),
1829       CtxIndex(Info.EntryIndex)
1830   {}
1831 
1832   void VisitUnaryOperator(UnaryOperator *UO);
1833   void VisitBinaryOperator(BinaryOperator *BO);
1834   void VisitCastExpr(CastExpr *CE);
1835   void VisitCallExpr(CallExpr *Exp);
1836   void VisitCXXConstructExpr(CXXConstructExpr *Exp);
1837   void VisitDeclStmt(DeclStmt *S);
1838 };
1839 
1840 /// \brief Warn if the LSet does not contain a lock sufficient to protect access
1841 /// of at least the passed in AccessKind.
warnIfMutexNotHeld(const NamedDecl * D,const Expr * Exp,AccessKind AK,Expr * MutexExp,ProtectedOperationKind POK,StringRef DiagKind)1842 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
1843                                       AccessKind AK, Expr *MutexExp,
1844                                       ProtectedOperationKind POK,
1845                                       StringRef DiagKind) {
1846   LockKind LK = getLockKindFromAccessKind(AK);
1847 
1848   SExpr Mutex(MutexExp, Exp, D);
1849   if (!Mutex.isValid()) {
1850     SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
1851     return;
1852   } else if (Mutex.shouldIgnore()) {
1853     return;
1854   }
1855 
1856   LockData* LDat = FSet.findLockUniv(Analyzer->FactMan, Mutex);
1857   bool NoError = true;
1858   if (!LDat) {
1859     // No exact match found.  Look for a partial match.
1860     FactEntry* FEntry = FSet.findPartialMatch(Analyzer->FactMan, Mutex);
1861     if (FEntry) {
1862       // Warn that there's no precise match.
1863       LDat = &FEntry->LDat;
1864       std::string PartMatchStr = FEntry->MutID.toString();
1865       StringRef   PartMatchName(PartMatchStr);
1866       Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
1867                                            LK, Exp->getExprLoc(),
1868                                            &PartMatchName);
1869     } else {
1870       // Warn that there's no match at all.
1871       Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(),
1872                                            LK, Exp->getExprLoc());
1873     }
1874     NoError = false;
1875   }
1876   // Make sure the mutex we found is the right kind.
1877   if (NoError && LDat && !LDat->isAtLeast(LK))
1878     Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Mutex.toString(), LK,
1879                                          Exp->getExprLoc());
1880 }
1881 
1882 /// \brief Warn if the LSet contains the given lock.
warnIfMutexHeld(const NamedDecl * D,const Expr * Exp,Expr * MutexExp,StringRef DiagKind)1883 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
1884                                    Expr *MutexExp,
1885                                    StringRef DiagKind) {
1886   SExpr Mutex(MutexExp, Exp, D);
1887   if (!Mutex.isValid()) {
1888     SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D, DiagKind);
1889     return;
1890   }
1891 
1892   LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex);
1893   if (LDat)
1894     Analyzer->Handler.handleFunExcludesLock(
1895         DiagKind, D->getNameAsString(), Mutex.toString(), Exp->getExprLoc());
1896 }
1897 
1898 /// \brief Checks guarded_by and pt_guarded_by attributes.
1899 /// Whenever we identify an access (read or write) to a DeclRefExpr that is
1900 /// marked with guarded_by, we must ensure the appropriate mutexes are held.
1901 /// Similarly, we check if the access is to an expression that dereferences
1902 /// a pointer marked with pt_guarded_by.
checkAccess(const Expr * Exp,AccessKind AK)1903 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK) {
1904   Exp = Exp->IgnoreParenCasts();
1905 
1906   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) {
1907     // For dereferences
1908     if (UO->getOpcode() == clang::UO_Deref)
1909       checkPtAccess(UO->getSubExpr(), AK);
1910     return;
1911   }
1912 
1913   if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1914     checkPtAccess(AE->getLHS(), AK);
1915     return;
1916   }
1917 
1918   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
1919     if (ME->isArrow())
1920       checkPtAccess(ME->getBase(), AK);
1921     else
1922       checkAccess(ME->getBase(), AK);
1923   }
1924 
1925   const ValueDecl *D = getValueDecl(Exp);
1926   if (!D || !D->hasAttrs())
1927     return;
1928 
1929   if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty())
1930     Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarAccess, AK,
1931                                         Exp->getExprLoc());
1932 
1933   for (const auto *I : D->specific_attrs<GuardedByAttr>())
1934     warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarAccess,
1935                        ClassifyDiagnostic(I));
1936 }
1937 
1938 /// \brief Checks pt_guarded_by and pt_guarded_var attributes.
checkPtAccess(const Expr * Exp,AccessKind AK)1939 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK) {
1940   while (true) {
1941     if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
1942       Exp = PE->getSubExpr();
1943       continue;
1944     }
1945     if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
1946       if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1947         // If it's an actual array, and not a pointer, then it's elements
1948         // are protected by GUARDED_BY, not PT_GUARDED_BY;
1949         checkAccess(CE->getSubExpr(), AK);
1950         return;
1951       }
1952       Exp = CE->getSubExpr();
1953       continue;
1954     }
1955     break;
1956   }
1957 
1958   const ValueDecl *D = getValueDecl(Exp);
1959   if (!D || !D->hasAttrs())
1960     return;
1961 
1962   if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty())
1963     Analyzer->Handler.handleNoMutexHeld("mutex", D, POK_VarDereference, AK,
1964                                         Exp->getExprLoc());
1965 
1966   for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1967     warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK_VarDereference,
1968                        ClassifyDiagnostic(I));
1969 }
1970 
1971 /// \brief Process a function call, method call, constructor call,
1972 /// or destructor call.  This involves looking at the attributes on the
1973 /// corresponding function/method/constructor/destructor, issuing warnings,
1974 /// and updating the locksets accordingly.
1975 ///
1976 /// FIXME: For classes annotated with one of the guarded annotations, we need
1977 /// to treat const method calls as reads and non-const method calls as writes,
1978 /// and check that the appropriate locks are held. Non-const method calls with
1979 /// the same signature as const method calls can be also treated as reads.
1980 ///
handleCall(Expr * Exp,const NamedDecl * D,VarDecl * VD)1981 void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
1982   SourceLocation Loc = Exp->getExprLoc();
1983   const AttrVec &ArgAttrs = D->getAttrs();
1984   MutexIDList ExclusiveLocksToAdd, SharedLocksToAdd;
1985   MutexIDList ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
1986   StringRef CapDiagKind = "mutex";
1987 
1988   for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
1989     Attr *At = const_cast<Attr*>(ArgAttrs[i]);
1990     switch (At->getKind()) {
1991       // When we encounter a lock function, we need to add the lock to our
1992       // lockset.
1993       case attr::AcquireCapability: {
1994         auto *A = cast<AcquireCapabilityAttr>(At);
1995         Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
1996                                             : ExclusiveLocksToAdd,
1997                               A, Exp, D, VD);
1998 
1999         CapDiagKind = ClassifyDiagnostic(A);
2000         break;
2001       }
2002 
2003       // An assert will add a lock to the lockset, but will not generate
2004       // a warning if it is already there, and will not generate a warning
2005       // if it is not removed.
2006       case attr::AssertExclusiveLock: {
2007         AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At);
2008 
2009         MutexIDList AssertLocks;
2010         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
2011         for (const auto &AssertLock : AssertLocks)
2012           Analyzer->addLock(FSet, AssertLock,
2013                             LockData(Loc, LK_Exclusive, false, true),
2014                             ClassifyDiagnostic(A));
2015         break;
2016       }
2017       case attr::AssertSharedLock: {
2018         AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At);
2019 
2020         MutexIDList AssertLocks;
2021         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
2022         for (const auto &AssertLock : AssertLocks)
2023           Analyzer->addLock(FSet, AssertLock,
2024                             LockData(Loc, LK_Shared, false, true),
2025                             ClassifyDiagnostic(A));
2026         break;
2027       }
2028 
2029       // When we encounter an unlock function, we need to remove unlocked
2030       // mutexes from the lockset, and flag a warning if they are not there.
2031       case attr::ReleaseCapability: {
2032         auto *A = cast<ReleaseCapabilityAttr>(At);
2033         if (A->isGeneric())
2034           Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
2035         else if (A->isShared())
2036           Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
2037         else
2038           Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
2039 
2040         CapDiagKind = ClassifyDiagnostic(A);
2041         break;
2042       }
2043 
2044       case attr::RequiresCapability: {
2045         RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
2046         for (auto *Arg : A->args())
2047           warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
2048                              POK_FunctionCall, ClassifyDiagnostic(A));
2049         break;
2050       }
2051 
2052       case attr::LocksExcluded: {
2053         LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
2054         for (auto *Arg : A->args())
2055           warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
2056         break;
2057       }
2058 
2059       // Ignore attributes unrelated to thread-safety
2060       default:
2061         break;
2062     }
2063   }
2064 
2065   // Figure out if we're calling the constructor of scoped lockable class
2066   bool isScopedVar = false;
2067   if (VD) {
2068     if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
2069       const CXXRecordDecl* PD = CD->getParent();
2070       if (PD && PD->hasAttr<ScopedLockableAttr>())
2071         isScopedVar = true;
2072     }
2073   }
2074 
2075   // Add locks.
2076   for (const auto &M : ExclusiveLocksToAdd)
2077     Analyzer->addLock(FSet, M, LockData(Loc, LK_Exclusive, isScopedVar),
2078                       CapDiagKind);
2079   for (const auto &M : SharedLocksToAdd)
2080     Analyzer->addLock(FSet, M, LockData(Loc, LK_Shared, isScopedVar),
2081                       CapDiagKind);
2082 
2083   // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2084   // FIXME -- this doesn't work if we acquire multiple locks.
2085   if (isScopedVar) {
2086     SourceLocation MLoc = VD->getLocation();
2087     DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
2088     SExpr SMutex(&DRE, nullptr, nullptr);
2089 
2090     for (const auto &M : ExclusiveLocksToAdd)
2091       Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive, M),
2092                         CapDiagKind);
2093     for (const auto &M : SharedLocksToAdd)
2094       Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared, M),
2095                         CapDiagKind);
2096   }
2097 
2098   // Remove locks.
2099   // FIXME -- should only fully remove if the attribute refers to 'this'.
2100   bool Dtor = isa<CXXDestructorDecl>(D);
2101   for (const auto &M : ExclusiveLocksToRemove)
2102     Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
2103   for (const auto &M : SharedLocksToRemove)
2104     Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
2105   for (const auto &M : GenericLocksToRemove)
2106     Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
2107 }
2108 
2109 
2110 /// \brief For unary operations which read and write a variable, we need to
2111 /// check whether we hold any required mutexes. Reads are checked in
2112 /// VisitCastExpr.
VisitUnaryOperator(UnaryOperator * UO)2113 void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
2114   switch (UO->getOpcode()) {
2115     case clang::UO_PostDec:
2116     case clang::UO_PostInc:
2117     case clang::UO_PreDec:
2118     case clang::UO_PreInc: {
2119       checkAccess(UO->getSubExpr(), AK_Written);
2120       break;
2121     }
2122     default:
2123       break;
2124   }
2125 }
2126 
2127 /// For binary operations which assign to a variable (writes), we need to check
2128 /// whether we hold any required mutexes.
2129 /// FIXME: Deal with non-primitive types.
VisitBinaryOperator(BinaryOperator * BO)2130 void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
2131   if (!BO->isAssignmentOp())
2132     return;
2133 
2134   // adjust the context
2135   LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
2136 
2137   checkAccess(BO->getLHS(), AK_Written);
2138 }
2139 
2140 
2141 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2142 /// need to ensure we hold any required mutexes.
2143 /// FIXME: Deal with non-primitive types.
VisitCastExpr(CastExpr * CE)2144 void BuildLockset::VisitCastExpr(CastExpr *CE) {
2145   if (CE->getCastKind() != CK_LValueToRValue)
2146     return;
2147   checkAccess(CE->getSubExpr(), AK_Read);
2148 }
2149 
2150 
VisitCallExpr(CallExpr * Exp)2151 void BuildLockset::VisitCallExpr(CallExpr *Exp) {
2152   if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2153     MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee());
2154     // ME can be null when calling a method pointer
2155     CXXMethodDecl *MD = CE->getMethodDecl();
2156 
2157     if (ME && MD) {
2158       if (ME->isArrow()) {
2159         if (MD->isConst()) {
2160           checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2161         } else {  // FIXME -- should be AK_Written
2162           checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2163         }
2164       } else {
2165         if (MD->isConst())
2166           checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2167         else     // FIXME -- should be AK_Written
2168           checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2169       }
2170     }
2171   } else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2172     switch (OE->getOperator()) {
2173       case OO_Equal: {
2174         const Expr *Target = OE->getArg(0);
2175         const Expr *Source = OE->getArg(1);
2176         checkAccess(Target, AK_Written);
2177         checkAccess(Source, AK_Read);
2178         break;
2179       }
2180       case OO_Star:
2181       case OO_Arrow:
2182       case OO_Subscript: {
2183         const Expr *Obj = OE->getArg(0);
2184         checkAccess(Obj, AK_Read);
2185         checkPtAccess(Obj, AK_Read);
2186         break;
2187       }
2188       default: {
2189         const Expr *Obj = OE->getArg(0);
2190         checkAccess(Obj, AK_Read);
2191         break;
2192       }
2193     }
2194   }
2195   NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2196   if(!D || !D->hasAttrs())
2197     return;
2198   handleCall(Exp, D);
2199 }
2200 
VisitCXXConstructExpr(CXXConstructExpr * Exp)2201 void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
2202   const CXXConstructorDecl *D = Exp->getConstructor();
2203   if (D && D->isCopyConstructor()) {
2204     const Expr* Source = Exp->getArg(0);
2205     checkAccess(Source, AK_Read);
2206   }
2207   // FIXME -- only handles constructors in DeclStmt below.
2208 }
2209 
VisitDeclStmt(DeclStmt * S)2210 void BuildLockset::VisitDeclStmt(DeclStmt *S) {
2211   // adjust the context
2212   LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
2213 
2214   for (auto *D : S->getDeclGroup()) {
2215     if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
2216       Expr *E = VD->getInit();
2217       // handle constructors that involve temporaries
2218       if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
2219         E = EWC->getSubExpr();
2220 
2221       if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
2222         NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
2223         if (!CtorD || !CtorD->hasAttrs())
2224           return;
2225         handleCall(CE, CtorD, VD);
2226       }
2227     }
2228   }
2229 }
2230 
2231 
2232 
2233 /// \brief Compute the intersection of two locksets and issue warnings for any
2234 /// locks in the symmetric difference.
2235 ///
2236 /// This function is used at a merge point in the CFG when comparing the lockset
2237 /// of each branch being merged. For example, given the following sequence:
2238 /// A; if () then B; else C; D; we need to check that the lockset after B and C
2239 /// are the same. In the event of a difference, we use the intersection of these
2240 /// two locksets at the start of D.
2241 ///
2242 /// \param FSet1 The first lockset.
2243 /// \param FSet2 The second lockset.
2244 /// \param JoinLoc The location of the join point for error reporting
2245 /// \param LEK1 The error message to report if a mutex is missing from LSet1
2246 /// \param LEK2 The error message to report if a mutex is missing from Lset2
intersectAndWarn(FactSet & FSet1,const FactSet & FSet2,SourceLocation JoinLoc,LockErrorKind LEK1,LockErrorKind LEK2,bool Modify)2247 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
2248                                             const FactSet &FSet2,
2249                                             SourceLocation JoinLoc,
2250                                             LockErrorKind LEK1,
2251                                             LockErrorKind LEK2,
2252                                             bool Modify) {
2253   FactSet FSet1Orig = FSet1;
2254 
2255   // Find locks in FSet2 that conflict or are not in FSet1, and warn.
2256   for (const auto &Fact : FSet2) {
2257     const SExpr &FSet2Mutex = FactMan[Fact].MutID;
2258     const LockData &LDat2 = FactMan[Fact].LDat;
2259     FactSet::iterator I1 = FSet1.findLockIter(FactMan, FSet2Mutex);
2260 
2261     if (I1 != FSet1.end()) {
2262       const LockData* LDat1 = &FactMan[*I1].LDat;
2263       if (LDat1->LKind != LDat2.LKind) {
2264         Handler.handleExclusiveAndShared("mutex", FSet2Mutex.toString(),
2265                                          LDat2.AcquireLoc, LDat1->AcquireLoc);
2266         if (Modify && LDat1->LKind != LK_Exclusive) {
2267           // Take the exclusive lock, which is the one in FSet2.
2268           *I1 = Fact;
2269         }
2270       }
2271       else if (LDat1->Asserted && !LDat2.Asserted) {
2272         // The non-asserted lock in FSet2 is the one we want to track.
2273         *I1 = Fact;
2274       }
2275     } else {
2276       if (LDat2.UnderlyingMutex.isValid()) {
2277         if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) {
2278           // If this is a scoped lock that manages another mutex, and if the
2279           // underlying mutex is still held, then warn about the underlying
2280           // mutex.
2281           Handler.handleMutexHeldEndOfScope("mutex",
2282                                             LDat2.UnderlyingMutex.toString(),
2283                                             LDat2.AcquireLoc, JoinLoc, LEK1);
2284         }
2285       }
2286       else if (!LDat2.Managed && !FSet2Mutex.isUniversal() && !LDat2.Asserted)
2287         Handler.handleMutexHeldEndOfScope("mutex", FSet2Mutex.toString(),
2288                                           LDat2.AcquireLoc, JoinLoc, LEK1);
2289     }
2290   }
2291 
2292   // Find locks in FSet1 that are not in FSet2, and remove them.
2293   for (const auto &Fact : FSet1Orig) {
2294     const SExpr &FSet1Mutex = FactMan[Fact].MutID;
2295     const LockData &LDat1 = FactMan[Fact].LDat;
2296 
2297     if (!FSet2.findLock(FactMan, FSet1Mutex)) {
2298       if (LDat1.UnderlyingMutex.isValid()) {
2299         if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) {
2300           // If this is a scoped lock that manages another mutex, and if the
2301           // underlying mutex is still held, then warn about the underlying
2302           // mutex.
2303           Handler.handleMutexHeldEndOfScope("mutex",
2304                                             LDat1.UnderlyingMutex.toString(),
2305                                             LDat1.AcquireLoc, JoinLoc, LEK1);
2306         }
2307       }
2308       else if (!LDat1.Managed && !FSet1Mutex.isUniversal() && !LDat1.Asserted)
2309         Handler.handleMutexHeldEndOfScope("mutex", FSet1Mutex.toString(),
2310                                           LDat1.AcquireLoc, JoinLoc, LEK2);
2311       if (Modify)
2312         FSet1.removeLock(FactMan, FSet1Mutex);
2313     }
2314   }
2315 }
2316 
2317 
2318 // Return true if block B never continues to its successors.
neverReturns(const CFGBlock * B)2319 inline bool neverReturns(const CFGBlock* B) {
2320   if (B->hasNoReturnElement())
2321     return true;
2322   if (B->empty())
2323     return false;
2324 
2325   CFGElement Last = B->back();
2326   if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2327     if (isa<CXXThrowExpr>(S->getStmt()))
2328       return true;
2329   }
2330   return false;
2331 }
2332 
2333 
2334 /// \brief Check a function's CFG for thread-safety violations.
2335 ///
2336 /// We traverse the blocks in the CFG, compute the set of mutexes that are held
2337 /// at the end of each block, and issue warnings for thread safety violations.
2338 /// Each block in the CFG is traversed exactly once.
runAnalysis(AnalysisDeclContext & AC)2339 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2340   // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2341   // For now, we just use the walker to set things up.
2342   threadSafety::CFGWalker walker;
2343   if (!walker.init(AC))
2344     return;
2345 
2346   // AC.dumpCFG(true);
2347   // threadSafety::printSCFG(walker);
2348 
2349   CFG *CFGraph = walker.getGraph();
2350   const NamedDecl *D = walker.getDecl();
2351 
2352   if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2353     return;
2354 
2355   // FIXME: Do something a bit more intelligent inside constructor and
2356   // destructor code.  Constructors and destructors must assume unique access
2357   // to 'this', so checks on member variable access is disabled, but we should
2358   // still enable checks on other objects.
2359   if (isa<CXXConstructorDecl>(D))
2360     return;  // Don't check inside constructors.
2361   if (isa<CXXDestructorDecl>(D))
2362     return;  // Don't check inside destructors.
2363 
2364   BlockInfo.resize(CFGraph->getNumBlockIDs(),
2365     CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2366 
2367   // We need to explore the CFG via a "topological" ordering.
2368   // That way, we will be guaranteed to have information about required
2369   // predecessor locksets when exploring a new block.
2370   const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2371   PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2372 
2373   // Mark entry block as reachable
2374   BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
2375 
2376   // Compute SSA names for local variables
2377   LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2378 
2379   // Fill in source locations for all CFGBlocks.
2380   findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2381 
2382   MutexIDList ExclusiveLocksAcquired;
2383   MutexIDList SharedLocksAcquired;
2384   MutexIDList LocksReleased;
2385 
2386   // Add locks from exclusive_locks_required and shared_locks_required
2387   // to initial lockset. Also turn off checking for lock and unlock functions.
2388   // FIXME: is there a more intelligent way to check lock/unlock functions?
2389   if (!SortedGraph->empty() && D->hasAttrs()) {
2390     const CFGBlock *FirstBlock = *SortedGraph->begin();
2391     FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
2392     const AttrVec &ArgAttrs = D->getAttrs();
2393 
2394     MutexIDList ExclusiveLocksToAdd;
2395     MutexIDList SharedLocksToAdd;
2396     StringRef CapDiagKind = "mutex";
2397 
2398     SourceLocation Loc = D->getLocation();
2399     for (const auto *Attr : ArgAttrs) {
2400       Loc = Attr->getLocation();
2401       if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2402         getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2403                     nullptr, D);
2404         CapDiagKind = ClassifyDiagnostic(A);
2405       } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2406         // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2407         // We must ignore such methods.
2408         if (A->args_size() == 0)
2409           return;
2410         // FIXME -- deal with exclusive vs. shared unlock functions?
2411         getMutexIDs(ExclusiveLocksToAdd, A, nullptr, D);
2412         getMutexIDs(LocksReleased, A, nullptr, D);
2413         CapDiagKind = ClassifyDiagnostic(A);
2414       } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2415         if (A->args_size() == 0)
2416           return;
2417         getMutexIDs(A->isShared() ? SharedLocksAcquired
2418                                   : ExclusiveLocksAcquired,
2419                     A, nullptr, D);
2420         CapDiagKind = ClassifyDiagnostic(A);
2421       } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
2422         // Don't try to check trylock functions for now
2423         return;
2424       } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
2425         // Don't try to check trylock functions for now
2426         return;
2427       }
2428     }
2429 
2430     // FIXME -- Loc can be wrong here.
2431     for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
2432       addLock(InitialLockset, ExclusiveLockToAdd, LockData(Loc, LK_Exclusive),
2433               CapDiagKind);
2434     for (const auto &SharedLockToAdd : SharedLocksToAdd)
2435       addLock(InitialLockset, SharedLockToAdd, LockData(Loc, LK_Shared),
2436               CapDiagKind);
2437   }
2438 
2439   for (const auto *CurrBlock : *SortedGraph) {
2440     int CurrBlockID = CurrBlock->getBlockID();
2441     CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2442 
2443     // Use the default initial lockset in case there are no predecessors.
2444     VisitedBlocks.insert(CurrBlock);
2445 
2446     // Iterate through the predecessor blocks and warn if the lockset for all
2447     // predecessors is not the same. We take the entry lockset of the current
2448     // block to be the intersection of all previous locksets.
2449     // FIXME: By keeping the intersection, we may output more errors in future
2450     // for a lock which is not in the intersection, but was in the union. We
2451     // may want to also keep the union in future. As an example, let's say
2452     // the intersection contains Mutex L, and the union contains L and M.
2453     // Later we unlock M. At this point, we would output an error because we
2454     // never locked M; although the real error is probably that we forgot to
2455     // lock M on all code paths. Conversely, let's say that later we lock M.
2456     // In this case, we should compare against the intersection instead of the
2457     // union because the real error is probably that we forgot to unlock M on
2458     // all code paths.
2459     bool LocksetInitialized = false;
2460     SmallVector<CFGBlock *, 8> SpecialBlocks;
2461     for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2462          PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
2463 
2464       // if *PI -> CurrBlock is a back edge
2465       if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2466         continue;
2467 
2468       int PrevBlockID = (*PI)->getBlockID();
2469       CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2470 
2471       // Ignore edges from blocks that can't return.
2472       if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2473         continue;
2474 
2475       // Okay, we can reach this block from the entry.
2476       CurrBlockInfo->Reachable = true;
2477 
2478       // If the previous block ended in a 'continue' or 'break' statement, then
2479       // a difference in locksets is probably due to a bug in that block, rather
2480       // than in some other predecessor. In that case, keep the other
2481       // predecessor's lockset.
2482       if (const Stmt *Terminator = (*PI)->getTerminator()) {
2483         if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
2484           SpecialBlocks.push_back(*PI);
2485           continue;
2486         }
2487       }
2488 
2489       FactSet PrevLockset;
2490       getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2491 
2492       if (!LocksetInitialized) {
2493         CurrBlockInfo->EntrySet = PrevLockset;
2494         LocksetInitialized = true;
2495       } else {
2496         intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
2497                          CurrBlockInfo->EntryLoc,
2498                          LEK_LockedSomePredecessors);
2499       }
2500     }
2501 
2502     // Skip rest of block if it's not reachable.
2503     if (!CurrBlockInfo->Reachable)
2504       continue;
2505 
2506     // Process continue and break blocks. Assume that the lockset for the
2507     // resulting block is unaffected by any discrepancies in them.
2508     for (const auto *PrevBlock : SpecialBlocks) {
2509       int PrevBlockID = PrevBlock->getBlockID();
2510       CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2511 
2512       if (!LocksetInitialized) {
2513         CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
2514         LocksetInitialized = true;
2515       } else {
2516         // Determine whether this edge is a loop terminator for diagnostic
2517         // purposes. FIXME: A 'break' statement might be a loop terminator, but
2518         // it might also be part of a switch. Also, a subsequent destructor
2519         // might add to the lockset, in which case the real issue might be a
2520         // double lock on the other path.
2521         const Stmt *Terminator = PrevBlock->getTerminator();
2522         bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
2523 
2524         FactSet PrevLockset;
2525         getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet,
2526                        PrevBlock, CurrBlock);
2527 
2528         // Do not update EntrySet.
2529         intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
2530                          PrevBlockInfo->ExitLoc,
2531                          IsLoop ? LEK_LockedSomeLoopIterations
2532                                 : LEK_LockedSomePredecessors,
2533                          false);
2534       }
2535     }
2536 
2537     BuildLockset LocksetBuilder(this, *CurrBlockInfo);
2538 
2539     // Visit all the statements in the basic block.
2540     for (CFGBlock::const_iterator BI = CurrBlock->begin(),
2541          BE = CurrBlock->end(); BI != BE; ++BI) {
2542       switch (BI->getKind()) {
2543         case CFGElement::Statement: {
2544           CFGStmt CS = BI->castAs<CFGStmt>();
2545           LocksetBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
2546           break;
2547         }
2548         // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
2549         case CFGElement::AutomaticObjectDtor: {
2550           CFGAutomaticObjDtor AD = BI->castAs<CFGAutomaticObjDtor>();
2551           CXXDestructorDecl *DD = const_cast<CXXDestructorDecl *>(
2552               AD.getDestructorDecl(AC.getASTContext()));
2553           if (!DD->hasAttrs())
2554             break;
2555 
2556           // Create a dummy expression,
2557           VarDecl *VD = const_cast<VarDecl*>(AD.getVarDecl());
2558           DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue,
2559                           AD.getTriggerStmt()->getLocEnd());
2560           LocksetBuilder.handleCall(&DRE, DD);
2561           break;
2562         }
2563         default:
2564           break;
2565       }
2566     }
2567     CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2568 
2569     // For every back edge from CurrBlock (the end of the loop) to another block
2570     // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2571     // the one held at the beginning of FirstLoopBlock. We can look up the
2572     // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2573     for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2574          SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
2575 
2576       // if CurrBlock -> *SI is *not* a back edge
2577       if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2578         continue;
2579 
2580       CFGBlock *FirstLoopBlock = *SI;
2581       CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2582       CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2583       intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
2584                        PreLoop->EntryLoc,
2585                        LEK_LockedSomeLoopIterations,
2586                        false);
2587     }
2588   }
2589 
2590   CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
2591   CFGBlockInfo *Final   = &BlockInfo[CFGraph->getExit().getBlockID()];
2592 
2593   // Skip the final check if the exit block is unreachable.
2594   if (!Final->Reachable)
2595     return;
2596 
2597   // By default, we expect all locks held on entry to be held on exit.
2598   FactSet ExpectedExitSet = Initial->EntrySet;
2599 
2600   // Adjust the expected exit set by adding or removing locks, as declared
2601   // by *-LOCK_FUNCTION and UNLOCK_FUNCTION.  The intersect below will then
2602   // issue the appropriate warning.
2603   // FIXME: the location here is not quite right.
2604   for (const auto &Lock : ExclusiveLocksAcquired)
2605     ExpectedExitSet.addLock(FactMan, Lock,
2606                             LockData(D->getLocation(), LK_Exclusive));
2607   for (const auto &Lock : SharedLocksAcquired)
2608     ExpectedExitSet.addLock(FactMan, Lock,
2609                             LockData(D->getLocation(), LK_Shared));
2610   for (const auto &Lock : LocksReleased)
2611     ExpectedExitSet.removeLock(FactMan, Lock);
2612 
2613   // FIXME: Should we call this function for all blocks which exit the function?
2614   intersectAndWarn(ExpectedExitSet, Final->ExitSet,
2615                    Final->ExitLoc,
2616                    LEK_LockedAtEndOfFunction,
2617                    LEK_NotLockedAtEndOfFunction,
2618                    false);
2619 }
2620 
2621 } // end anonymous namespace
2622 
2623 
2624 namespace clang {
2625 namespace thread_safety {
2626 
2627 /// \brief Check a function's CFG for thread-safety violations.
2628 ///
2629 /// We traverse the blocks in the CFG, compute the set of mutexes that are held
2630 /// at the end of each block, and issue warnings for thread safety violations.
2631 /// Each block in the CFG is traversed exactly once.
runThreadSafetyAnalysis(AnalysisDeclContext & AC,ThreadSafetyHandler & Handler)2632 void runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2633                              ThreadSafetyHandler &Handler) {
2634   ThreadSafetyAnalyzer Analyzer(Handler);
2635   Analyzer.runAnalysis(AC);
2636 }
2637 
2638 /// \brief Helper function that returns a LockKind required for the given level
2639 /// of access.
getLockKindFromAccessKind(AccessKind AK)2640 LockKind getLockKindFromAccessKind(AccessKind AK) {
2641   switch (AK) {
2642     case AK_Read :
2643       return LK_Shared;
2644     case AK_Written :
2645       return LK_Exclusive;
2646   }
2647   llvm_unreachable("Unknown AccessKind");
2648 }
2649 
2650 }} // end namespace clang::thread_safety
2651