• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  This file defines ExprEngine's support for calls and returns.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #define DEBUG_TYPE "ExprEngine"
15 
16 #include "clang/Analysis/Analyses/LiveVariables.h"
17 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
18 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
19 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/ParentMap.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Support/SaveAndRestore.h"
25 
26 using namespace clang;
27 using namespace ento;
28 
29 STATISTIC(NumOfDynamicDispatchPathSplits,
30   "The # of times we split the path due to imprecise dynamic dispatch info");
31 
32 STATISTIC(NumInlinedCalls,
33   "The # of times we inlined a call");
34 
processCallEnter(CallEnter CE,ExplodedNode * Pred)35 void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
36   // Get the entry block in the CFG of the callee.
37   const StackFrameContext *calleeCtx = CE.getCalleeContext();
38   const CFG *CalleeCFG = calleeCtx->getCFG();
39   const CFGBlock *Entry = &(CalleeCFG->getEntry());
40 
41   // Validate the CFG.
42   assert(Entry->empty());
43   assert(Entry->succ_size() == 1);
44 
45   // Get the solitary sucessor.
46   const CFGBlock *Succ = *(Entry->succ_begin());
47 
48   // Construct an edge representing the starting location in the callee.
49   BlockEdge Loc(Entry, Succ, calleeCtx);
50 
51   ProgramStateRef state = Pred->getState();
52 
53   // Construct a new node and add it to the worklist.
54   bool isNew;
55   ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
56   Node->addPredecessor(Pred, G);
57   if (isNew)
58     Engine.getWorkList()->enqueue(Node);
59 }
60 
61 // Find the last statement on the path to the exploded node and the
62 // corresponding Block.
63 static std::pair<const Stmt*,
getLastStmt(const ExplodedNode * Node)64                  const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
65   const Stmt *S = 0;
66   const StackFrameContext *SF =
67           Node->getLocation().getLocationContext()->getCurrentStackFrame();
68 
69   // Back up through the ExplodedGraph until we reach a statement node in this
70   // stack frame.
71   while (Node) {
72     const ProgramPoint &PP = Node->getLocation();
73 
74     if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
75       if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP)) {
76         S = SP->getStmt();
77         break;
78       } else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&PP)) {
79         S = CEE->getCalleeContext()->getCallSite();
80         if (S)
81           break;
82 
83         // If there is no statement, this is an implicitly-generated call.
84         // We'll walk backwards over it and then continue the loop to find
85         // an actual statement.
86         const CallEnter *CE;
87         do {
88           Node = Node->getFirstPred();
89           CE = Node->getLocationAs<CallEnter>();
90         } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
91 
92         // Continue searching the graph.
93       }
94     } else if (const CallEnter *CE = dyn_cast<CallEnter>(&PP)) {
95       // If we reached the CallEnter for this function, it has no statements.
96       if (CE->getCalleeContext() == SF)
97         break;
98     }
99 
100     Node = *Node->pred_begin();
101   }
102 
103   const CFGBlock *Blk = 0;
104   if (S) {
105     // Now, get the enclosing basic block.
106     while (Node && Node->pred_size() >=1 ) {
107       const ProgramPoint &PP = Node->getLocation();
108       if (isa<BlockEdge>(PP) &&
109           (PP.getLocationContext()->getCurrentStackFrame() == SF)) {
110         BlockEdge &EPP = cast<BlockEdge>(PP);
111         Blk = EPP.getDst();
112         break;
113       }
114       Node = *Node->pred_begin();
115     }
116   }
117 
118   return std::pair<const Stmt*, const CFGBlock*>(S, Blk);
119 }
120 
121 /// The call exit is simulated with a sequence of nodes, which occur between
122 /// CallExitBegin and CallExitEnd. The following operations occur between the
123 /// two program points:
124 /// 1. CallExitBegin (triggers the start of call exit sequence)
125 /// 2. Bind the return value
126 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
127 /// 4. CallExitEnd (switch to the caller context)
128 /// 5. PostStmt<CallExpr>
processCallExit(ExplodedNode * CEBNode)129 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
130   // Step 1 CEBNode was generated before the call.
131 
132   const StackFrameContext *calleeCtx =
133       CEBNode->getLocationContext()->getCurrentStackFrame();
134 
135   // The parent context might not be a stack frame, so make sure we
136   // look up the first enclosing stack frame.
137   const StackFrameContext *callerCtx =
138     calleeCtx->getParent()->getCurrentStackFrame();
139 
140   const Stmt *CE = calleeCtx->getCallSite();
141   ProgramStateRef state = CEBNode->getState();
142   // Find the last statement in the function and the corresponding basic block.
143   const Stmt *LastSt = 0;
144   const CFGBlock *Blk = 0;
145   llvm::tie(LastSt, Blk) = getLastStmt(CEBNode);
146 
147   // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
148 
149   // If the callee returns an expression, bind its value to CallExpr.
150   if (CE) {
151     if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
152       const LocationContext *LCtx = CEBNode->getLocationContext();
153       SVal V = state->getSVal(RS, LCtx);
154       state = state->BindExpr(CE, callerCtx, V);
155     }
156 
157     // Bind the constructed object value to CXXConstructExpr.
158     if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
159       loc::MemRegionVal This =
160         svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
161       SVal ThisV = state->getSVal(This);
162 
163       // Always bind the region to the CXXConstructExpr.
164       state = state->BindExpr(CCE, callerCtx, ThisV);
165     }
166   }
167 
168   // Generate a CallEvent /before/ cleaning the state, so that we can get the
169   // correct value for 'this' (if necessary).
170   CallEventManager &CEMgr = getStateManager().getCallEventManager();
171   CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
172 
173   // Step 3: BindedRetNode -> CleanedNodes
174   // If we can find a statement and a block in the inlined function, run remove
175   // dead bindings before returning from the call. This is important to ensure
176   // that we report the issues such as leaks in the stack contexts in which
177   // they occurred.
178   ExplodedNodeSet CleanedNodes;
179   if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
180     static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value");
181     PostStmt Loc(LastSt, calleeCtx, &retValBind);
182     bool isNew;
183     ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
184     BindedRetNode->addPredecessor(CEBNode, G);
185     if (!isNew)
186       return;
187 
188     NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
189     currBldrCtx = &Ctx;
190     // Here, we call the Symbol Reaper with 0 statement and caller location
191     // context, telling it to clean up everything in the callee's context
192     // (and it's children). We use LastStmt as a diagnostic statement, which
193     // which the PreStmtPurge Dead point will be associated.
194     removeDead(BindedRetNode, CleanedNodes, 0, callerCtx, LastSt,
195                ProgramPoint::PostStmtPurgeDeadSymbolsKind);
196     currBldrCtx = 0;
197   } else {
198     CleanedNodes.Add(CEBNode);
199   }
200 
201   for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
202                                  E = CleanedNodes.end(); I != E; ++I) {
203 
204     // Step 4: Generate the CallExit and leave the callee's context.
205     // CleanedNodes -> CEENode
206     CallExitEnd Loc(calleeCtx, callerCtx);
207     bool isNew;
208     ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
209     ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
210     CEENode->addPredecessor(*I, G);
211     if (!isNew)
212       return;
213 
214     // Step 5: Perform the post-condition check of the CallExpr and enqueue the
215     // result onto the work list.
216     // CEENode -> Dst -> WorkList
217     NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
218     SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
219         &Ctx);
220     SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
221 
222     CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
223 
224     ExplodedNodeSet DstPostCall;
225     getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
226                                                *UpdatedCall, *this,
227                                                /*WasInlined=*/true);
228 
229     ExplodedNodeSet Dst;
230     if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
231       getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
232                                                         *this,
233                                                         /*WasInlined=*/true);
234     } else if (CE) {
235       getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
236                                                  *this, /*WasInlined=*/true);
237     } else {
238       Dst.insert(DstPostCall);
239     }
240 
241     // Enqueue the next element in the block.
242     for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
243                                    PSI != PSE; ++PSI) {
244       Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
245                                     calleeCtx->getIndex()+1);
246     }
247   }
248 }
249 
getNumberStackFrames(const LocationContext * LCtx)250 static unsigned getNumberStackFrames(const LocationContext *LCtx) {
251   unsigned count = 0;
252   while (LCtx) {
253     if (isa<StackFrameContext>(LCtx))
254       ++count;
255     LCtx = LCtx->getParent();
256   }
257   return count;
258 }
259 
IsInStdNamespace(const FunctionDecl * FD)260 static bool IsInStdNamespace(const FunctionDecl *FD) {
261   const DeclContext *DC = FD->getEnclosingNamespaceContext();
262   const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
263   if (!ND)
264     return false;
265 
266   while (const DeclContext *Parent = ND->getParent()) {
267     if (!isa<NamespaceDecl>(Parent))
268       break;
269     ND = cast<NamespaceDecl>(Parent);
270   }
271 
272   return ND->getName() == "std";
273 }
274 
275 // Determine if we should inline the call.
shouldInlineDecl(const Decl * D,ExplodedNode * Pred)276 bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
277   AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
278   const CFG *CalleeCFG = CalleeADC->getCFG();
279 
280   // It is possible that the CFG cannot be constructed.
281   // Be safe, and check if the CalleeCFG is valid.
282   if (!CalleeCFG)
283     return false;
284 
285   if (getNumberStackFrames(Pred->getLocationContext())
286         == AMgr.options.InlineMaxStackDepth)
287     return false;
288 
289   if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D))
290     return false;
291 
292   if (CalleeCFG->getNumBlockIDs() > AMgr.options.InlineMaxFunctionSize)
293     return false;
294 
295   // Do not inline variadic calls (for now).
296   if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
297     if (BD->isVariadic())
298       return false;
299   }
300   else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
301     if (FD->isVariadic())
302       return false;
303   }
304 
305   if (getContext().getLangOpts().CPlusPlus) {
306     if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
307       // Conditionally allow the inlining of template functions.
308       if (!getAnalysisManager().options.mayInlineTemplateFunctions())
309         if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
310           return false;
311 
312       // Conditionally allow the inlining of C++ standard library functions.
313       if (!getAnalysisManager().options.mayInlineCXXStandardLibrary())
314         if (getContext().getSourceManager().isInSystemHeader(FD->getLocation()))
315           if (IsInStdNamespace(FD))
316             return false;
317     }
318   }
319 
320   // It is possible that the live variables analysis cannot be
321   // run.  If so, bail out.
322   if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
323     return false;
324 
325   return true;
326 }
327 
328 /// The GDM component containing the dynamic dispatch bifurcation info. When
329 /// the exact type of the receiver is not known, we want to explore both paths -
330 /// one on which we do inline it and the other one on which we don't. This is
331 /// done to ensure we do not drop coverage.
332 /// This is the map from the receiver region to a bool, specifying either we
333 /// consider this region's information precise or not along the given path.
334 namespace clang {
335 namespace ento {
336 enum DynamicDispatchMode { DynamicDispatchModeInlined = 1,
337                            DynamicDispatchModeConservative };
338 
339 struct DynamicDispatchBifurcationMap {};
340 typedef llvm::ImmutableMap<const MemRegion*,
341                            unsigned int> DynamicDispatchBifur;
342 template<> struct ProgramStateTrait<DynamicDispatchBifurcationMap>
343     :  public ProgramStatePartialTrait<DynamicDispatchBifur> {
GDMIndexclang::ento::ProgramStateTrait344   static void *GDMIndex() { static int index; return &index; }
345 };
346 
347 }}
348 
inlineCall(const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)349 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
350                             NodeBuilder &Bldr, ExplodedNode *Pred,
351                             ProgramStateRef State) {
352   assert(D);
353 
354   const LocationContext *CurLC = Pred->getLocationContext();
355   const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
356   const LocationContext *ParentOfCallee = 0;
357 
358   const AnalyzerOptions &Opts = getAnalysisManager().options;
359 
360   // FIXME: Refactor this check into a hypothetical CallEvent::canInline.
361   switch (Call.getKind()) {
362   case CE_Function:
363     break;
364   case CE_CXXMember:
365   case CE_CXXMemberOperator:
366     if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
367       return false;
368     break;
369   case CE_CXXConstructor: {
370     if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
371       return false;
372 
373     const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
374 
375     // FIXME: We don't handle constructors or destructors for arrays properly.
376     const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
377     if (Target && isa<ElementRegion>(Target))
378       return false;
379 
380     // FIXME: This is a hack. We don't use the correct region for a new
381     // expression, so if we inline the constructor its result will just be
382     // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
383     // and the longer-term possible fix is discussed in PR12014.
384     const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
385     if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
386       if (isa<CXXNewExpr>(Parent))
387         return false;
388 
389     // Inlining constructors requires including initializers in the CFG.
390     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
391     assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
392     (void)ADC;
393 
394     // If the destructor is trivial, it's always safe to inline the constructor.
395     if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
396       break;
397 
398     // For other types, only inline constructors if destructor inlining is
399     // also enabled.
400     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
401       return false;
402 
403     // FIXME: This is a hack. We don't handle temporary destructors
404     // right now, so we shouldn't inline their constructors.
405     if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
406       if (!Target || !isa<DeclRegion>(Target))
407         return false;
408 
409     break;
410   }
411   case CE_CXXDestructor: {
412     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
413       return false;
414 
415     // Inlining destructors requires building the CFG correctly.
416     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
417     assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
418     (void)ADC;
419 
420     const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
421 
422     // FIXME: We don't handle constructors or destructors for arrays properly.
423     const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
424     if (Target && isa<ElementRegion>(Target))
425       return false;
426 
427     break;
428   }
429   case CE_CXXAllocator:
430     // Do not inline allocators until we model deallocators.
431     // This is unfortunate, but basically necessary for smart pointers and such.
432     return false;
433   case CE_Block: {
434     const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
435     assert(BR && "If we have the block definition we should have its region");
436     AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
437     ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
438                                                          cast<BlockDecl>(D),
439                                                          BR);
440     break;
441   }
442   case CE_ObjCMessage:
443     if (!(getAnalysisManager().options.IPAMode == DynamicDispatch ||
444           getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate))
445       return false;
446     break;
447   }
448 
449   if (!shouldInlineDecl(D, Pred))
450     return false;
451 
452   if (!ParentOfCallee)
453     ParentOfCallee = CallerSFC;
454 
455   // This may be NULL, but that's fine.
456   const Expr *CallE = Call.getOriginExpr();
457 
458   // Construct a new stack frame for the callee.
459   AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
460   const StackFrameContext *CalleeSFC =
461     CalleeADC->getStackFrame(ParentOfCallee, CallE,
462                              currBldrCtx->getBlock(),
463                              currStmtIdx);
464 
465   CallEnter Loc(CallE, CalleeSFC, CurLC);
466 
467   // Construct a new state which contains the mapping from actual to
468   // formal arguments.
469   State = State->enterStackFrame(Call, CalleeSFC);
470 
471   bool isNew;
472   if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
473     N->addPredecessor(Pred, G);
474     if (isNew)
475       Engine.getWorkList()->enqueue(N);
476   }
477 
478   // If we decided to inline the call, the successor has been manually
479   // added onto the work list so remove it from the node builder.
480   Bldr.takeNodes(Pred);
481 
482   NumInlinedCalls++;
483 
484   // Mark the decl as visited.
485   if (VisitedCallees)
486     VisitedCallees->insert(D);
487 
488   return true;
489 }
490 
getInlineFailedState(ProgramStateRef State,const Stmt * CallE)491 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
492                                             const Stmt *CallE) {
493   void *ReplayState = State->get<ReplayWithoutInlining>();
494   if (!ReplayState)
495     return 0;
496 
497   assert(ReplayState == (const void*)CallE && "Backtracked to the wrong call.");
498   (void)CallE;
499 
500   return State->remove<ReplayWithoutInlining>();
501 }
502 
VisitCallExpr(const CallExpr * CE,ExplodedNode * Pred,ExplodedNodeSet & dst)503 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
504                                ExplodedNodeSet &dst) {
505   // Perform the previsit of the CallExpr.
506   ExplodedNodeSet dstPreVisit;
507   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
508 
509   // Get the call in its initial state. We use this as a template to perform
510   // all the checks.
511   CallEventManager &CEMgr = getStateManager().getCallEventManager();
512   CallEventRef<> CallTemplate
513     = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
514 
515   // Evaluate the function call.  We try each of the checkers
516   // to see if the can evaluate the function call.
517   ExplodedNodeSet dstCallEvaluated;
518   for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
519        I != E; ++I) {
520     evalCall(dstCallEvaluated, *I, *CallTemplate);
521   }
522 
523   // Finally, perform the post-condition check of the CallExpr and store
524   // the created nodes in 'Dst'.
525   // Note that if the call was inlined, dstCallEvaluated will be empty.
526   // The post-CallExpr check will occur in processCallExit.
527   getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
528                                              *this);
529 }
530 
evalCall(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)531 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
532                           const CallEvent &Call) {
533   // WARNING: At this time, the state attached to 'Call' may be older than the
534   // state in 'Pred'. This is a minor optimization since CheckerManager will
535   // use an updated CallEvent instance when calling checkers, but if 'Call' is
536   // ever used directly in this function all callers should be updated to pass
537   // the most recent state. (It is probably not worth doing the work here since
538   // for some callers this will not be necessary.)
539 
540   // Run any pre-call checks using the generic call interface.
541   ExplodedNodeSet dstPreVisit;
542   getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
543 
544   // Actually evaluate the function call.  We try each of the checkers
545   // to see if the can evaluate the function call, and get a callback at
546   // defaultEvalCall if all of them fail.
547   ExplodedNodeSet dstCallEvaluated;
548   getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
549                                              Call, *this);
550 
551   // Finally, run any post-call checks.
552   getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
553                                              Call, *this);
554 }
555 
bindReturnValue(const CallEvent & Call,const LocationContext * LCtx,ProgramStateRef State)556 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
557                                             const LocationContext *LCtx,
558                                             ProgramStateRef State) {
559   const Expr *E = Call.getOriginExpr();
560   if (!E)
561     return State;
562 
563   // Some method families have known return values.
564   if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
565     switch (Msg->getMethodFamily()) {
566     default:
567       break;
568     case OMF_autorelease:
569     case OMF_retain:
570     case OMF_self: {
571       // These methods return their receivers.
572       return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
573     }
574     }
575   } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
576     return State->BindExpr(E, LCtx, C->getCXXThisVal());
577   }
578 
579   // Conjure a symbol if the return value is unknown.
580   QualType ResultTy = Call.getResultType();
581   SValBuilder &SVB = getSValBuilder();
582   unsigned Count = currBldrCtx->blockCount();
583   SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count);
584   return State->BindExpr(E, LCtx, R);
585 }
586 
587 // Conservatively evaluate call by invalidating regions and binding
588 // a conjured return value.
conservativeEvalCall(const CallEvent & Call,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)589 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
590                                       ExplodedNode *Pred, ProgramStateRef State) {
591   State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
592   State = bindReturnValue(Call, Pred->getLocationContext(), State);
593 
594   // And make the result node.
595   Bldr.generateNode(Call.getProgramPoint(), State, Pred);
596 }
597 
defaultEvalCall(NodeBuilder & Bldr,ExplodedNode * Pred,const CallEvent & CallTemplate)598 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
599                                  const CallEvent &CallTemplate) {
600   // Make sure we have the most recent state attached to the call.
601   ProgramStateRef State = Pred->getState();
602   CallEventRef<> Call = CallTemplate.cloneWithState(State);
603 
604   if (!getAnalysisManager().shouldInlineCall()) {
605     conservativeEvalCall(*Call, Bldr, Pred, State);
606     return;
607   }
608   // Try to inline the call.
609   // The origin expression here is just used as a kind of checksum;
610   // this should still be safe even for CallEvents that don't come from exprs.
611   const Expr *E = Call->getOriginExpr();
612   ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
613 
614   if (InlinedFailedState) {
615     // If we already tried once and failed, make sure we don't retry later.
616     State = InlinedFailedState;
617   } else {
618     RuntimeDefinition RD = Call->getRuntimeDefinition();
619     const Decl *D = RD.getDecl();
620     if (D) {
621       if (RD.mayHaveOtherDefinitions()) {
622         // Explore with and without inlining the call.
623         if (getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate) {
624           BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
625           return;
626         }
627 
628         // Don't inline if we're not in any dynamic dispatch mode.
629         if (getAnalysisManager().options.IPAMode != DynamicDispatch) {
630           conservativeEvalCall(*Call, Bldr, Pred, State);
631           return;
632         }
633       }
634 
635       // We are not bifurcating and we do have a Decl, so just inline.
636       if (inlineCall(*Call, D, Bldr, Pred, State))
637         return;
638     }
639   }
640 
641   // If we can't inline it, handle the return value and invalidate the regions.
642   conservativeEvalCall(*Call, Bldr, Pred, State);
643 }
644 
BifurcateCall(const MemRegion * BifurReg,const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred)645 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
646                                const CallEvent &Call, const Decl *D,
647                                NodeBuilder &Bldr, ExplodedNode *Pred) {
648   assert(BifurReg);
649   BifurReg = BifurReg->StripCasts();
650 
651   // Check if we've performed the split already - note, we only want
652   // to split the path once per memory region.
653   ProgramStateRef State = Pred->getState();
654   const unsigned int *BState =
655                         State->get<DynamicDispatchBifurcationMap>(BifurReg);
656   if (BState) {
657     // If we are on "inline path", keep inlining if possible.
658     if (*BState == DynamicDispatchModeInlined)
659       if (inlineCall(Call, D, Bldr, Pred, State))
660         return;
661     // If inline failed, or we are on the path where we assume we
662     // don't have enough info about the receiver to inline, conjure the
663     // return value and invalidate the regions.
664     conservativeEvalCall(Call, Bldr, Pred, State);
665     return;
666   }
667 
668   // If we got here, this is the first time we process a message to this
669   // region, so split the path.
670   ProgramStateRef IState =
671       State->set<DynamicDispatchBifurcationMap>(BifurReg,
672                                                DynamicDispatchModeInlined);
673   inlineCall(Call, D, Bldr, Pred, IState);
674 
675   ProgramStateRef NoIState =
676       State->set<DynamicDispatchBifurcationMap>(BifurReg,
677                                                DynamicDispatchModeConservative);
678   conservativeEvalCall(Call, Bldr, Pred, NoIState);
679 
680   NumOfDynamicDispatchPathSplits++;
681   return;
682 }
683 
684 
VisitReturnStmt(const ReturnStmt * RS,ExplodedNode * Pred,ExplodedNodeSet & Dst)685 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
686                                  ExplodedNodeSet &Dst) {
687 
688   ExplodedNodeSet dstPreVisit;
689   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
690 
691   StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
692 
693   if (RS->getRetValue()) {
694     for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
695                                   ei = dstPreVisit.end(); it != ei; ++it) {
696       B.generateNode(RS, *it, (*it)->getState());
697     }
698   }
699 }
700