1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines ExprEngine's support for calls and returns.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
15 #include "PrettyStackTraceLocationContext.h"
16 #include "clang/AST/CXXInheritance.h"
17 #include "clang/AST/DeclCXX.h"
18 #include "clang/AST/ParentMap.h"
19 #include "clang/Analysis/Analyses/LiveVariables.h"
20 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Support/SaveAndRestore.h"
25
26 using namespace clang;
27 using namespace ento;
28
29 #define DEBUG_TYPE "ExprEngine"
30
31 STATISTIC(NumOfDynamicDispatchPathSplits,
32 "The # of times we split the path due to imprecise dynamic dispatch info");
33
34 STATISTIC(NumInlinedCalls,
35 "The # of times we inlined a call");
36
37 STATISTIC(NumReachedInlineCountMax,
38 "The # of times we reached inline count maximum");
39
processCallEnter(NodeBuilderContext & BC,CallEnter CE,ExplodedNode * Pred)40 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
41 ExplodedNode *Pred) {
42 // Get the entry block in the CFG of the callee.
43 const StackFrameContext *calleeCtx = CE.getCalleeContext();
44 PrettyStackTraceLocationContext CrashInfo(calleeCtx);
45 const CFGBlock *Entry = CE.getEntry();
46
47 // Validate the CFG.
48 assert(Entry->empty());
49 assert(Entry->succ_size() == 1);
50
51 // Get the solitary successor.
52 const CFGBlock *Succ = *(Entry->succ_begin());
53
54 // Construct an edge representing the starting location in the callee.
55 BlockEdge Loc(Entry, Succ, calleeCtx);
56
57 ProgramStateRef state = Pred->getState();
58
59 // Construct a new node, notify checkers that analysis of the function has
60 // begun, and add the resultant nodes to the worklist.
61 bool isNew;
62 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
63 Node->addPredecessor(Pred, G);
64 if (isNew) {
65 ExplodedNodeSet DstBegin;
66 processBeginOfFunction(BC, Node, DstBegin, Loc);
67 Engine.enqueue(DstBegin);
68 }
69 }
70
71 // Find the last statement on the path to the exploded node and the
72 // corresponding Block.
73 static std::pair<const Stmt*,
getLastStmt(const ExplodedNode * Node)74 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
75 const Stmt *S = nullptr;
76 const CFGBlock *Blk = nullptr;
77 const StackFrameContext *SF =
78 Node->getLocation().getLocationContext()->getCurrentStackFrame();
79
80 // Back up through the ExplodedGraph until we reach a statement node in this
81 // stack frame.
82 while (Node) {
83 const ProgramPoint &PP = Node->getLocation();
84
85 if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
86 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
87 S = SP->getStmt();
88 break;
89 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
90 S = CEE->getCalleeContext()->getCallSite();
91 if (S)
92 break;
93
94 // If there is no statement, this is an implicitly-generated call.
95 // We'll walk backwards over it and then continue the loop to find
96 // an actual statement.
97 Optional<CallEnter> CE;
98 do {
99 Node = Node->getFirstPred();
100 CE = Node->getLocationAs<CallEnter>();
101 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
102
103 // Continue searching the graph.
104 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
105 Blk = BE->getSrc();
106 }
107 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
108 // If we reached the CallEnter for this function, it has no statements.
109 if (CE->getCalleeContext() == SF)
110 break;
111 }
112
113 if (Node->pred_empty())
114 return std::make_pair(nullptr, nullptr);
115
116 Node = *Node->pred_begin();
117 }
118
119 return std::make_pair(S, Blk);
120 }
121
122 /// Adjusts a return value when the called function's return type does not
123 /// match the caller's expression type. This can happen when a dynamic call
124 /// is devirtualized, and the overridding method has a covariant (more specific)
125 /// return type than the parent's method. For C++ objects, this means we need
126 /// to add base casts.
adjustReturnValue(SVal V,QualType ExpectedTy,QualType ActualTy,StoreManager & StoreMgr)127 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
128 StoreManager &StoreMgr) {
129 // For now, the only adjustments we handle apply only to locations.
130 if (!V.getAs<Loc>())
131 return V;
132
133 // If the types already match, don't do any unnecessary work.
134 ExpectedTy = ExpectedTy.getCanonicalType();
135 ActualTy = ActualTy.getCanonicalType();
136 if (ExpectedTy == ActualTy)
137 return V;
138
139 // No adjustment is needed between Objective-C pointer types.
140 if (ExpectedTy->isObjCObjectPointerType() &&
141 ActualTy->isObjCObjectPointerType())
142 return V;
143
144 // C++ object pointers may need "derived-to-base" casts.
145 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
146 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
147 if (ExpectedClass && ActualClass) {
148 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
149 /*DetectVirtual=*/false);
150 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
151 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
152 return StoreMgr.evalDerivedToBase(V, Paths.front());
153 }
154 }
155
156 // Unfortunately, Objective-C does not enforce that overridden methods have
157 // covariant return types, so we can't assert that that never happens.
158 // Be safe and return UnknownVal().
159 return UnknownVal();
160 }
161
removeDeadOnEndOfFunction(NodeBuilderContext & BC,ExplodedNode * Pred,ExplodedNodeSet & Dst)162 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
163 ExplodedNode *Pred,
164 ExplodedNodeSet &Dst) {
165 // Find the last statement in the function and the corresponding basic block.
166 const Stmt *LastSt = nullptr;
167 const CFGBlock *Blk = nullptr;
168 std::tie(LastSt, Blk) = getLastStmt(Pred);
169 if (!Blk || !LastSt) {
170 Dst.Add(Pred);
171 return;
172 }
173
174 // Here, we destroy the current location context. We use the current
175 // function's entire body as a diagnostic statement, with which the program
176 // point will be associated. However, we only want to use LastStmt as a
177 // reference for what to clean up if it's a ReturnStmt; otherwise, everything
178 // is dead.
179 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
180 const LocationContext *LCtx = Pred->getLocationContext();
181 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
182 LCtx->getAnalysisDeclContext()->getBody(),
183 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
184 }
185
wasDifferentDeclUsedForInlining(CallEventRef<> Call,const StackFrameContext * calleeCtx)186 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
187 const StackFrameContext *calleeCtx) {
188 const Decl *RuntimeCallee = calleeCtx->getDecl();
189 const Decl *StaticDecl = Call->getDecl();
190 assert(RuntimeCallee);
191 if (!StaticDecl)
192 return true;
193 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
194 }
195
196 /// Returns true if the CXXConstructExpr \p E was intended to construct a
197 /// prvalue for the region in \p V.
198 ///
199 /// Note that we can't just test for rvalue vs. glvalue because
200 /// CXXConstructExprs embedded in DeclStmts and initializers are considered
201 /// rvalues by the AST, and the analyzer would like to treat them as lvalues.
isTemporaryPRValue(const CXXConstructExpr * E,SVal V)202 static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
203 if (E->isGLValue())
204 return false;
205
206 const MemRegion *MR = V.getAsRegion();
207 if (!MR)
208 return false;
209
210 return isa<CXXTempObjectRegion>(MR);
211 }
212
213 /// The call exit is simulated with a sequence of nodes, which occur between
214 /// CallExitBegin and CallExitEnd. The following operations occur between the
215 /// two program points:
216 /// 1. CallExitBegin (triggers the start of call exit sequence)
217 /// 2. Bind the return value
218 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
219 /// 4. CallExitEnd (switch to the caller context)
220 /// 5. PostStmt<CallExpr>
processCallExit(ExplodedNode * CEBNode)221 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
222 // Step 1 CEBNode was generated before the call.
223 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
224 const StackFrameContext *calleeCtx =
225 CEBNode->getLocationContext()->getCurrentStackFrame();
226
227 // The parent context might not be a stack frame, so make sure we
228 // look up the first enclosing stack frame.
229 const StackFrameContext *callerCtx =
230 calleeCtx->getParent()->getCurrentStackFrame();
231
232 const Stmt *CE = calleeCtx->getCallSite();
233 ProgramStateRef state = CEBNode->getState();
234 // Find the last statement in the function and the corresponding basic block.
235 const Stmt *LastSt = nullptr;
236 const CFGBlock *Blk = nullptr;
237 std::tie(LastSt, Blk) = getLastStmt(CEBNode);
238
239 // Generate a CallEvent /before/ cleaning the state, so that we can get the
240 // correct value for 'this' (if necessary).
241 CallEventManager &CEMgr = getStateManager().getCallEventManager();
242 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
243
244 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
245
246 // If the callee returns an expression, bind its value to CallExpr.
247 if (CE) {
248 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
249 const LocationContext *LCtx = CEBNode->getLocationContext();
250 SVal V = state->getSVal(RS, LCtx);
251
252 // Ensure that the return type matches the type of the returned Expr.
253 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
254 QualType ReturnedTy =
255 CallEvent::getDeclaredResultType(calleeCtx->getDecl());
256 if (!ReturnedTy.isNull()) {
257 if (const Expr *Ex = dyn_cast<Expr>(CE)) {
258 V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
259 getStoreManager());
260 }
261 }
262 }
263
264 state = state->BindExpr(CE, callerCtx, V);
265 }
266
267 // Bind the constructed object value to CXXConstructExpr.
268 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
269 loc::MemRegionVal This =
270 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
271 SVal ThisV = state->getSVal(This);
272
273 // If the constructed object is a temporary prvalue, get its bindings.
274 if (isTemporaryPRValue(CCE, ThisV))
275 ThisV = state->getSVal(ThisV.castAs<Loc>());
276
277 state = state->BindExpr(CCE, callerCtx, ThisV);
278 }
279 }
280
281 // Step 3: BindedRetNode -> CleanedNodes
282 // If we can find a statement and a block in the inlined function, run remove
283 // dead bindings before returning from the call. This is important to ensure
284 // that we report the issues such as leaks in the stack contexts in which
285 // they occurred.
286 ExplodedNodeSet CleanedNodes;
287 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
288 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
289 PostStmt Loc(LastSt, calleeCtx, &retValBind);
290 bool isNew;
291 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
292 BindedRetNode->addPredecessor(CEBNode, G);
293 if (!isNew)
294 return;
295
296 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
297 currBldrCtx = &Ctx;
298 // Here, we call the Symbol Reaper with 0 statement and callee location
299 // context, telling it to clean up everything in the callee's context
300 // (and its children). We use the callee's function body as a diagnostic
301 // statement, with which the program point will be associated.
302 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
303 calleeCtx->getAnalysisDeclContext()->getBody(),
304 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
305 currBldrCtx = nullptr;
306 } else {
307 CleanedNodes.Add(CEBNode);
308 }
309
310 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
311 E = CleanedNodes.end(); I != E; ++I) {
312
313 // Step 4: Generate the CallExit and leave the callee's context.
314 // CleanedNodes -> CEENode
315 CallExitEnd Loc(calleeCtx, callerCtx);
316 bool isNew;
317 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
318 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
319 CEENode->addPredecessor(*I, G);
320 if (!isNew)
321 return;
322
323 // Step 5: Perform the post-condition check of the CallExpr and enqueue the
324 // result onto the work list.
325 // CEENode -> Dst -> WorkList
326 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
327 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
328 &Ctx);
329 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
330
331 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
332
333 ExplodedNodeSet DstPostCall;
334 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
335 *UpdatedCall, *this,
336 /*WasInlined=*/true);
337
338 ExplodedNodeSet Dst;
339 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
340 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
341 *this,
342 /*WasInlined=*/true);
343 } else if (CE) {
344 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
345 *this, /*WasInlined=*/true);
346 } else {
347 Dst.insert(DstPostCall);
348 }
349
350 // Enqueue the next element in the block.
351 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
352 PSI != PSE; ++PSI) {
353 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
354 calleeCtx->getIndex()+1);
355 }
356 }
357 }
358
examineStackFrames(const Decl * D,const LocationContext * LCtx,bool & IsRecursive,unsigned & StackDepth)359 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
360 bool &IsRecursive, unsigned &StackDepth) {
361 IsRecursive = false;
362 StackDepth = 0;
363
364 while (LCtx) {
365 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
366 const Decl *DI = SFC->getDecl();
367
368 // Mark recursive (and mutually recursive) functions and always count
369 // them when measuring the stack depth.
370 if (DI == D) {
371 IsRecursive = true;
372 ++StackDepth;
373 LCtx = LCtx->getParent();
374 continue;
375 }
376
377 // Do not count the small functions when determining the stack depth.
378 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
379 const CFG *CalleeCFG = CalleeADC->getCFG();
380 if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
381 ++StackDepth;
382 }
383 LCtx = LCtx->getParent();
384 }
385 }
386
387 // The GDM component containing the dynamic dispatch bifurcation info. When
388 // the exact type of the receiver is not known, we want to explore both paths -
389 // one on which we do inline it and the other one on which we don't. This is
390 // done to ensure we do not drop coverage.
391 // This is the map from the receiver region to a bool, specifying either we
392 // consider this region's information precise or not along the given path.
393 namespace {
394 enum DynamicDispatchMode {
395 DynamicDispatchModeInlined = 1,
396 DynamicDispatchModeConservative
397 };
398 } // end anonymous namespace
399
REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,CLANG_ENTO_PROGRAMSTATE_MAP (const MemRegion *,unsigned))400 REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
401 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
402 unsigned))
403
404 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
405 NodeBuilder &Bldr, ExplodedNode *Pred,
406 ProgramStateRef State) {
407 assert(D);
408
409 const LocationContext *CurLC = Pred->getLocationContext();
410 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
411 const LocationContext *ParentOfCallee = CallerSFC;
412 if (Call.getKind() == CE_Block &&
413 !cast<BlockCall>(Call).isConversionFromLambda()) {
414 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
415 assert(BR && "If we have the block definition we should have its region");
416 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
417 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
418 cast<BlockDecl>(D),
419 BR);
420 }
421
422 // This may be NULL, but that's fine.
423 const Expr *CallE = Call.getOriginExpr();
424
425 // Construct a new stack frame for the callee.
426 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
427 const StackFrameContext *CalleeSFC =
428 CalleeADC->getStackFrame(ParentOfCallee, CallE,
429 currBldrCtx->getBlock(),
430 currStmtIdx);
431
432 CallEnter Loc(CallE, CalleeSFC, CurLC);
433
434 // Construct a new state which contains the mapping from actual to
435 // formal arguments.
436 State = State->enterStackFrame(Call, CalleeSFC);
437
438 bool isNew;
439 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
440 N->addPredecessor(Pred, G);
441 if (isNew)
442 Engine.getWorkList()->enqueue(N);
443 }
444
445 // If we decided to inline the call, the successor has been manually
446 // added onto the work list so remove it from the node builder.
447 Bldr.takeNodes(Pred);
448
449 NumInlinedCalls++;
450
451 // Mark the decl as visited.
452 if (VisitedCallees)
453 VisitedCallees->insert(D);
454
455 return true;
456 }
457
getInlineFailedState(ProgramStateRef State,const Stmt * CallE)458 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
459 const Stmt *CallE) {
460 const void *ReplayState = State->get<ReplayWithoutInlining>();
461 if (!ReplayState)
462 return nullptr;
463
464 assert(ReplayState == CallE && "Backtracked to the wrong call.");
465 (void)CallE;
466
467 return State->remove<ReplayWithoutInlining>();
468 }
469
VisitCallExpr(const CallExpr * CE,ExplodedNode * Pred,ExplodedNodeSet & dst)470 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
471 ExplodedNodeSet &dst) {
472 // Perform the previsit of the CallExpr.
473 ExplodedNodeSet dstPreVisit;
474 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
475
476 // Get the call in its initial state. We use this as a template to perform
477 // all the checks.
478 CallEventManager &CEMgr = getStateManager().getCallEventManager();
479 CallEventRef<> CallTemplate
480 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
481
482 // Evaluate the function call. We try each of the checkers
483 // to see if the can evaluate the function call.
484 ExplodedNodeSet dstCallEvaluated;
485 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
486 I != E; ++I) {
487 evalCall(dstCallEvaluated, *I, *CallTemplate);
488 }
489
490 // Finally, perform the post-condition check of the CallExpr and store
491 // the created nodes in 'Dst'.
492 // Note that if the call was inlined, dstCallEvaluated will be empty.
493 // The post-CallExpr check will occur in processCallExit.
494 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
495 *this);
496 }
497
evalCall(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)498 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
499 const CallEvent &Call) {
500 // WARNING: At this time, the state attached to 'Call' may be older than the
501 // state in 'Pred'. This is a minor optimization since CheckerManager will
502 // use an updated CallEvent instance when calling checkers, but if 'Call' is
503 // ever used directly in this function all callers should be updated to pass
504 // the most recent state. (It is probably not worth doing the work here since
505 // for some callers this will not be necessary.)
506
507 // Run any pre-call checks using the generic call interface.
508 ExplodedNodeSet dstPreVisit;
509 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
510
511 // Actually evaluate the function call. We try each of the checkers
512 // to see if the can evaluate the function call, and get a callback at
513 // defaultEvalCall if all of them fail.
514 ExplodedNodeSet dstCallEvaluated;
515 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
516 Call, *this);
517
518 // Finally, run any post-call checks.
519 getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
520 Call, *this);
521 }
522
bindReturnValue(const CallEvent & Call,const LocationContext * LCtx,ProgramStateRef State)523 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
524 const LocationContext *LCtx,
525 ProgramStateRef State) {
526 const Expr *E = Call.getOriginExpr();
527 if (!E)
528 return State;
529
530 // Some method families have known return values.
531 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
532 switch (Msg->getMethodFamily()) {
533 default:
534 break;
535 case OMF_autorelease:
536 case OMF_retain:
537 case OMF_self: {
538 // These methods return their receivers.
539 return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
540 }
541 }
542 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
543 SVal ThisV = C->getCXXThisVal();
544
545 // If the constructed object is a temporary prvalue, get its bindings.
546 if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV))
547 ThisV = State->getSVal(ThisV.castAs<Loc>());
548
549 return State->BindExpr(E, LCtx, ThisV);
550 }
551
552 // Conjure a symbol if the return value is unknown.
553 QualType ResultTy = Call.getResultType();
554 SValBuilder &SVB = getSValBuilder();
555 unsigned Count = currBldrCtx->blockCount();
556 SVal R = SVB.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
557 return State->BindExpr(E, LCtx, R);
558 }
559
560 // Conservatively evaluate call by invalidating regions and binding
561 // a conjured return value.
conservativeEvalCall(const CallEvent & Call,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)562 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
563 ExplodedNode *Pred,
564 ProgramStateRef State) {
565 State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
566 State = bindReturnValue(Call, Pred->getLocationContext(), State);
567
568 // And make the result node.
569 Bldr.generateNode(Call.getProgramPoint(), State, Pred);
570 }
571
572 enum CallInlinePolicy {
573 CIP_Allowed,
574 CIP_DisallowedOnce,
575 CIP_DisallowedAlways
576 };
577
mayInlineCallKind(const CallEvent & Call,const ExplodedNode * Pred,AnalyzerOptions & Opts)578 static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
579 const ExplodedNode *Pred,
580 AnalyzerOptions &Opts) {
581 const LocationContext *CurLC = Pred->getLocationContext();
582 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
583 switch (Call.getKind()) {
584 case CE_Function:
585 case CE_Block:
586 break;
587 case CE_CXXMember:
588 case CE_CXXMemberOperator:
589 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
590 return CIP_DisallowedAlways;
591 break;
592 case CE_CXXConstructor: {
593 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
594 return CIP_DisallowedAlways;
595
596 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
597
598 // FIXME: We don't handle constructors or destructors for arrays properly.
599 // Even once we do, we still need to be careful about implicitly-generated
600 // initializers for array fields in default move/copy constructors.
601 const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
602 if (Target && isa<ElementRegion>(Target))
603 return CIP_DisallowedOnce;
604
605 // FIXME: This is a hack. We don't use the correct region for a new
606 // expression, so if we inline the constructor its result will just be
607 // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
608 // and the longer-term possible fix is discussed in PR12014.
609 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
610 if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
611 if (isa<CXXNewExpr>(Parent))
612 return CIP_DisallowedOnce;
613
614 // Inlining constructors requires including initializers in the CFG.
615 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
616 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
617 (void)ADC;
618
619 // If the destructor is trivial, it's always safe to inline the constructor.
620 if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
621 break;
622
623 // For other types, only inline constructors if destructor inlining is
624 // also enabled.
625 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
626 return CIP_DisallowedAlways;
627
628 // FIXME: This is a hack. We don't handle temporary destructors
629 // right now, so we shouldn't inline their constructors.
630 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
631 if (!Target || !isa<DeclRegion>(Target))
632 return CIP_DisallowedOnce;
633
634 break;
635 }
636 case CE_CXXDestructor: {
637 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
638 return CIP_DisallowedAlways;
639
640 // Inlining destructors requires building the CFG correctly.
641 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
642 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
643 (void)ADC;
644
645 const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
646
647 // FIXME: We don't handle constructors or destructors for arrays properly.
648 const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
649 if (Target && isa<ElementRegion>(Target))
650 return CIP_DisallowedOnce;
651
652 break;
653 }
654 case CE_CXXAllocator:
655 if (Opts.mayInlineCXXAllocator())
656 break;
657 // Do not inline allocators until we model deallocators.
658 // This is unfortunate, but basically necessary for smart pointers and such.
659 return CIP_DisallowedAlways;
660 case CE_ObjCMessage:
661 if (!Opts.mayInlineObjCMethod())
662 return CIP_DisallowedAlways;
663 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
664 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
665 return CIP_DisallowedAlways;
666 break;
667 }
668
669 return CIP_Allowed;
670 }
671
672 /// Returns true if the given C++ class contains a member with the given name.
hasMember(const ASTContext & Ctx,const CXXRecordDecl * RD,StringRef Name)673 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
674 StringRef Name) {
675 const IdentifierInfo &II = Ctx.Idents.get(Name);
676 DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
677 if (!RD->lookup(DeclName).empty())
678 return true;
679
680 CXXBasePaths Paths(false, false, false);
681 if (RD->lookupInBases(
682 [DeclName](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
683 return CXXRecordDecl::FindOrdinaryMember(Specifier, Path, DeclName);
684 },
685 Paths))
686 return true;
687
688 return false;
689 }
690
691 /// Returns true if the given C++ class is a container or iterator.
692 ///
693 /// Our heuristic for this is whether it contains a method named 'begin()' or a
694 /// nested type named 'iterator' or 'iterator_category'.
isContainerClass(const ASTContext & Ctx,const CXXRecordDecl * RD)695 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
696 return hasMember(Ctx, RD, "begin") ||
697 hasMember(Ctx, RD, "iterator") ||
698 hasMember(Ctx, RD, "iterator_category");
699 }
700
701 /// Returns true if the given function refers to a method of a C++ container
702 /// or iterator.
703 ///
704 /// We generally do a poor job modeling most containers right now, and might
705 /// prefer not to inline their methods.
isContainerMethod(const ASTContext & Ctx,const FunctionDecl * FD)706 static bool isContainerMethod(const ASTContext &Ctx,
707 const FunctionDecl *FD) {
708 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
709 return isContainerClass(Ctx, MD->getParent());
710 return false;
711 }
712
713 /// Returns true if the given function is the destructor of a class named
714 /// "shared_ptr".
isCXXSharedPtrDtor(const FunctionDecl * FD)715 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
716 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
717 if (!Dtor)
718 return false;
719
720 const CXXRecordDecl *RD = Dtor->getParent();
721 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
722 if (II->isStr("shared_ptr"))
723 return true;
724
725 return false;
726 }
727
728 /// Returns true if the function in \p CalleeADC may be inlined in general.
729 ///
730 /// This checks static properties of the function, such as its signature and
731 /// CFG, to determine whether the analyzer should ever consider inlining it,
732 /// in any context.
mayInlineDecl(AnalysisDeclContext * CalleeADC,AnalyzerOptions & Opts)733 static bool mayInlineDecl(AnalysisDeclContext *CalleeADC,
734 AnalyzerOptions &Opts) {
735 // FIXME: Do not inline variadic calls.
736 if (CallEvent::isVariadic(CalleeADC->getDecl()))
737 return false;
738
739 // Check certain C++-related inlining policies.
740 ASTContext &Ctx = CalleeADC->getASTContext();
741 if (Ctx.getLangOpts().CPlusPlus) {
742 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
743 // Conditionally control the inlining of template functions.
744 if (!Opts.mayInlineTemplateFunctions())
745 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
746 return false;
747
748 // Conditionally control the inlining of C++ standard library functions.
749 if (!Opts.mayInlineCXXStandardLibrary())
750 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
751 if (AnalysisDeclContext::isInStdNamespace(FD))
752 return false;
753
754 // Conditionally control the inlining of methods on objects that look
755 // like C++ containers.
756 if (!Opts.mayInlineCXXContainerMethods())
757 if (!Ctx.getSourceManager().isInMainFile(FD->getLocation()))
758 if (isContainerMethod(Ctx, FD))
759 return false;
760
761 // Conditionally control the inlining of the destructor of C++ shared_ptr.
762 // We don't currently do a good job modeling shared_ptr because we can't
763 // see the reference count, so treating as opaque is probably the best
764 // idea.
765 if (!Opts.mayInlineCXXSharedPtrDtor())
766 if (isCXXSharedPtrDtor(FD))
767 return false;
768 }
769 }
770
771 // It is possible that the CFG cannot be constructed.
772 // Be safe, and check if the CalleeCFG is valid.
773 const CFG *CalleeCFG = CalleeADC->getCFG();
774 if (!CalleeCFG)
775 return false;
776
777 // Do not inline large functions.
778 if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
779 return false;
780
781 // It is possible that the live variables analysis cannot be
782 // run. If so, bail out.
783 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
784 return false;
785
786 return true;
787 }
788
shouldInlineCall(const CallEvent & Call,const Decl * D,const ExplodedNode * Pred)789 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
790 const ExplodedNode *Pred) {
791 if (!D)
792 return false;
793
794 AnalysisManager &AMgr = getAnalysisManager();
795 AnalyzerOptions &Opts = AMgr.options;
796 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
797 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
798
799 // Temporary object destructor processing is currently broken, so we never
800 // inline them.
801 // FIXME: Remove this once temp destructors are working.
802 if (isa<CXXDestructorCall>(Call)) {
803 if ((*currBldrCtx->getBlock())[currStmtIdx].getAs<CFGTemporaryDtor>())
804 return false;
805 }
806
807 // The auto-synthesized bodies are essential to inline as they are
808 // usually small and commonly used. Note: we should do this check early on to
809 // ensure we always inline these calls.
810 if (CalleeADC->isBodyAutosynthesized())
811 return true;
812
813 if (!AMgr.shouldInlineCall())
814 return false;
815
816 // Check if this function has been marked as non-inlinable.
817 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
818 if (MayInline.hasValue()) {
819 if (!MayInline.getValue())
820 return false;
821
822 } else {
823 // We haven't actually checked the static properties of this function yet.
824 // Do that now, and record our decision in the function summaries.
825 if (mayInlineDecl(CalleeADC, Opts)) {
826 Engine.FunctionSummaries->markMayInline(D);
827 } else {
828 Engine.FunctionSummaries->markShouldNotInline(D);
829 return false;
830 }
831 }
832
833 // Check if we should inline a call based on its kind.
834 // FIXME: this checks both static and dynamic properties of the call, which
835 // means we're redoing a bit of work that could be cached in the function
836 // summary.
837 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
838 if (CIP != CIP_Allowed) {
839 if (CIP == CIP_DisallowedAlways) {
840 assert(!MayInline.hasValue() || MayInline.getValue());
841 Engine.FunctionSummaries->markShouldNotInline(D);
842 }
843 return false;
844 }
845
846 const CFG *CalleeCFG = CalleeADC->getCFG();
847
848 // Do not inline if recursive or we've reached max stack frame count.
849 bool IsRecursive = false;
850 unsigned StackDepth = 0;
851 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
852 if ((StackDepth >= Opts.InlineMaxStackDepth) &&
853 ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
854 || IsRecursive))
855 return false;
856
857 // Do not inline large functions too many times.
858 if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
859 Opts.getMaxTimesInlineLarge()) &&
860 CalleeCFG->getNumBlockIDs() >=
861 Opts.getMinCFGSizeTreatFunctionsAsLarge()) {
862 NumReachedInlineCountMax++;
863 return false;
864 }
865
866 if (HowToInline == Inline_Minimal &&
867 (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
868 || IsRecursive))
869 return false;
870
871 Engine.FunctionSummaries->bumpNumTimesInlined(D);
872
873 return true;
874 }
875
isTrivialObjectAssignment(const CallEvent & Call)876 static bool isTrivialObjectAssignment(const CallEvent &Call) {
877 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
878 if (!ICall)
879 return false;
880
881 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
882 if (!MD)
883 return false;
884 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
885 return false;
886
887 return MD->isTrivial();
888 }
889
defaultEvalCall(NodeBuilder & Bldr,ExplodedNode * Pred,const CallEvent & CallTemplate)890 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
891 const CallEvent &CallTemplate) {
892 // Make sure we have the most recent state attached to the call.
893 ProgramStateRef State = Pred->getState();
894 CallEventRef<> Call = CallTemplate.cloneWithState(State);
895
896 // Special-case trivial assignment operators.
897 if (isTrivialObjectAssignment(*Call)) {
898 performTrivialCopy(Bldr, Pred, *Call);
899 return;
900 }
901
902 // Try to inline the call.
903 // The origin expression here is just used as a kind of checksum;
904 // this should still be safe even for CallEvents that don't come from exprs.
905 const Expr *E = Call->getOriginExpr();
906
907 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
908 if (InlinedFailedState) {
909 // If we already tried once and failed, make sure we don't retry later.
910 State = InlinedFailedState;
911 } else {
912 RuntimeDefinition RD = Call->getRuntimeDefinition();
913 const Decl *D = RD.getDecl();
914 if (shouldInlineCall(*Call, D, Pred)) {
915 if (RD.mayHaveOtherDefinitions()) {
916 AnalyzerOptions &Options = getAnalysisManager().options;
917
918 // Explore with and without inlining the call.
919 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
920 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
921 return;
922 }
923
924 // Don't inline if we're not in any dynamic dispatch mode.
925 if (Options.getIPAMode() != IPAK_DynamicDispatch) {
926 conservativeEvalCall(*Call, Bldr, Pred, State);
927 return;
928 }
929 }
930
931 // We are not bifurcating and we do have a Decl, so just inline.
932 if (inlineCall(*Call, D, Bldr, Pred, State))
933 return;
934 }
935 }
936
937 // If we can't inline it, handle the return value and invalidate the regions.
938 conservativeEvalCall(*Call, Bldr, Pred, State);
939 }
940
BifurcateCall(const MemRegion * BifurReg,const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred)941 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
942 const CallEvent &Call, const Decl *D,
943 NodeBuilder &Bldr, ExplodedNode *Pred) {
944 assert(BifurReg);
945 BifurReg = BifurReg->StripCasts();
946
947 // Check if we've performed the split already - note, we only want
948 // to split the path once per memory region.
949 ProgramStateRef State = Pred->getState();
950 const unsigned *BState =
951 State->get<DynamicDispatchBifurcationMap>(BifurReg);
952 if (BState) {
953 // If we are on "inline path", keep inlining if possible.
954 if (*BState == DynamicDispatchModeInlined)
955 if (inlineCall(Call, D, Bldr, Pred, State))
956 return;
957 // If inline failed, or we are on the path where we assume we
958 // don't have enough info about the receiver to inline, conjure the
959 // return value and invalidate the regions.
960 conservativeEvalCall(Call, Bldr, Pred, State);
961 return;
962 }
963
964 // If we got here, this is the first time we process a message to this
965 // region, so split the path.
966 ProgramStateRef IState =
967 State->set<DynamicDispatchBifurcationMap>(BifurReg,
968 DynamicDispatchModeInlined);
969 inlineCall(Call, D, Bldr, Pred, IState);
970
971 ProgramStateRef NoIState =
972 State->set<DynamicDispatchBifurcationMap>(BifurReg,
973 DynamicDispatchModeConservative);
974 conservativeEvalCall(Call, Bldr, Pred, NoIState);
975
976 NumOfDynamicDispatchPathSplits++;
977 }
978
VisitReturnStmt(const ReturnStmt * RS,ExplodedNode * Pred,ExplodedNodeSet & Dst)979 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
980 ExplodedNodeSet &Dst) {
981 ExplodedNodeSet dstPreVisit;
982 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
983
984 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
985
986 if (RS->getRetValue()) {
987 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
988 ei = dstPreVisit.end(); it != ei; ++it) {
989 B.generateNode(RS, *it, (*it)->getState());
990 }
991 }
992 }
993