1//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines ExprEngine's support for calls and returns.
10//
11//===----------------------------------------------------------------------===//
12
13#include "PrettyStackTraceLocationContext.h"
14#include "clang/AST/CXXInheritance.h"
15#include "clang/AST/Decl.h"
16#include "clang/AST/DeclCXX.h"
17#include "clang/Analysis/Analyses/LiveVariables.h"
18#include "clang/Analysis/ConstructionContext.h"
19#include "clang/StaticAnalyzer/Core/CheckerManager.h"
20#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
22#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
23#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Support/Casting.h"
26#include "llvm/Support/Compiler.h"
27#include "llvm/Support/SaveAndRestore.h"
28
29using namespace clang;
30using namespace ento;
31
32#define DEBUG_TYPE "ExprEngine"
33
34STATISTIC(NumOfDynamicDispatchPathSplits,
35 "The # of times we split the path due to imprecise dynamic dispatch info");
36
37STATISTIC(NumInlinedCalls,
38 "The # of times we inlined a call");
39
40STATISTIC(NumReachedInlineCountMax,
41 "The # of times we reached inline count maximum");
42
43void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
44 ExplodedNode *Pred) {
45 // Get the entry block in the CFG of the callee.
46 const StackFrameContext *calleeCtx = CE.getCalleeContext();
47 PrettyStackTraceLocationContext CrashInfo(calleeCtx);
48 const CFGBlock *Entry = CE.getEntry();
49
50 // Validate the CFG.
51 assert(Entry->empty());
52 assert(Entry->succ_size() == 1);
53
54 // Get the solitary successor.
55 const CFGBlock *Succ = *(Entry->succ_begin());
56
57 // Construct an edge representing the starting location in the callee.
58 BlockEdge Loc(Entry, Succ, calleeCtx);
59
60 ProgramStateRef state = Pred->getState();
61
62 // Construct a new node, notify checkers that analysis of the function has
63 // begun, and add the resultant nodes to the worklist.
64 bool isNew;
65 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
66 Node->addPredecessor(Pred, G);
67 if (isNew) {
68 ExplodedNodeSet DstBegin;
69 processBeginOfFunction(BC, Node, DstBegin, Loc);
70 Engine.enqueue(DstBegin);
71 }
72}
73
74// Find the last statement on the path to the exploded node and the
75// corresponding Block.
76static std::pair<const Stmt*,
77 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
78 const Stmt *S = nullptr;
79 const CFGBlock *Blk = nullptr;
80 const StackFrameContext *SF = Node->getStackFrame();
81
82 // Back up through the ExplodedGraph until we reach a statement node in this
83 // stack frame.
84 while (Node) {
85 const ProgramPoint &PP = Node->getLocation();
86
87 if (PP.getStackFrame() == SF) {
88 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
89 S = SP->getStmt();
90 break;
91 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
92 S = CEE->getCalleeContext()->getCallSite();
93 if (S)
94 break;
95
96 // If there is no statement, this is an implicitly-generated call.
97 // We'll walk backwards over it and then continue the loop to find
98 // an actual statement.
99 Optional<CallEnter> CE;
100 do {
101 Node = Node->getFirstPred();
102 CE = Node->getLocationAs<CallEnter>();
103 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
104
105 // Continue searching the graph.
106 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
107 Blk = BE->getSrc();
108 }
109 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
110 // If we reached the CallEnter for this function, it has no statements.
111 if (CE->getCalleeContext() == SF)
112 break;
113 }
114
115 if (Node->pred_empty())
116 return std::make_pair(nullptr, nullptr);
117
118 Node = *Node->pred_begin();
119 }
120
121 return std::make_pair(S, Blk);
122}
123
124/// Adjusts a return value when the called function's return type does not
125/// match the caller's expression type. This can happen when a dynamic call
126/// is devirtualized, and the overriding method has a covariant (more specific)
127/// return type than the parent's method. For C++ objects, this means we need
128/// to add base casts.
129static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
130 StoreManager &StoreMgr) {
131 // For now, the only adjustments we handle apply only to locations.
132 if (!isa<Loc>(V))
133 return V;
134
135 // If the types already match, don't do any unnecessary work.
136 ExpectedTy = ExpectedTy.getCanonicalType();
137 ActualTy = ActualTy.getCanonicalType();
138 if (ExpectedTy == ActualTy)
139 return V;
140
141 // No adjustment is needed between Objective-C pointer types.
142 if (ExpectedTy->isObjCObjectPointerType() &&
143 ActualTy->isObjCObjectPointerType())
144 return V;
145
146 // C++ object pointers may need "derived-to-base" casts.
147 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
148 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
149 if (ExpectedClass && ActualClass) {
150 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
151 /*DetectVirtual=*/false);
152 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
153 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
154 return StoreMgr.evalDerivedToBase(V, Paths.front());
155 }
156 }
157
158 // Unfortunately, Objective-C does not enforce that overridden methods have
159 // covariant return types, so we can't assert that that never happens.
160 // Be safe and return UnknownVal().
161 return UnknownVal();
162}
163
164void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
165 ExplodedNode *Pred,
166 ExplodedNodeSet &Dst) {
167 // Find the last statement in the function and the corresponding basic block.
168 const Stmt *LastSt = nullptr;
169 const CFGBlock *Blk = nullptr;
170 std::tie(LastSt, Blk) = getLastStmt(Pred);
171 if (!Blk || !LastSt) {
172 Dst.Add(Pred);
173 return;
174 }
175
176 // Here, we destroy the current location context. We use the current
177 // function's entire body as a diagnostic statement, with which the program
178 // point will be associated. However, we only want to use LastStmt as a
179 // reference for what to clean up if it's a ReturnStmt; otherwise, everything
180 // is dead.
181 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
182 const LocationContext *LCtx = Pred->getLocationContext();
183 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
184 LCtx->getAnalysisDeclContext()->getBody(),
185 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
186}
187
188static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
189 const StackFrameContext *calleeCtx) {
190 const Decl *RuntimeCallee = calleeCtx->getDecl();
191 const Decl *StaticDecl = Call->getDecl();
192 assert(RuntimeCallee);
193 if (!StaticDecl)
194 return true;
195 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
196}
197
198/// The call exit is simulated with a sequence of nodes, which occur between
199/// CallExitBegin and CallExitEnd. The following operations occur between the
200/// two program points:
201/// 1. CallExitBegin (triggers the start of call exit sequence)
202/// 2. Bind the return value
203/// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
204/// 4. CallExitEnd (switch to the caller context)
205/// 5. PostStmt<CallExpr>
206void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
207 // Step 1 CEBNode was generated before the call.
208 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
209 const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
210
211 // The parent context might not be a stack frame, so make sure we
212 // look up the first enclosing stack frame.
213 const StackFrameContext *callerCtx =
214 calleeCtx->getParent()->getStackFrame();
215
216 const Stmt *CE = calleeCtx->getCallSite();
217 ProgramStateRef state = CEBNode->getState();
218 // Find the last statement in the function and the corresponding basic block.
219 const Stmt *LastSt = nullptr;
220 const CFGBlock *Blk = nullptr;
221 std::tie(LastSt, Blk) = getLastStmt(CEBNode);
222
223 // Generate a CallEvent /before/ cleaning the state, so that we can get the
224 // correct value for 'this' (if necessary).
225 CallEventManager &CEMgr = getStateManager().getCallEventManager();
226 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
227
228 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
229
230 // If this variable is set to 'true' the analyzer will evaluate the call
231 // statement we are about to exit again, instead of continuing the execution
232 // from the statement after the call. This is useful for non-POD type array
233 // construction where the CXXConstructExpr is referenced only once in the CFG,
234 // but we want to evaluate it as many times as many elements the array has.
235 bool ShouldRepeatCall = false;
236
237 // If the callee returns an expression, bind its value to CallExpr.
238 if (CE) {
239 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
240 const LocationContext *LCtx = CEBNode->getLocationContext();
241 SVal V = state->getSVal(RS, LCtx);
242
243 // Ensure that the return type matches the type of the returned Expr.
244 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
245 QualType ReturnedTy =
246 CallEvent::getDeclaredResultType(calleeCtx->getDecl());
247 if (!ReturnedTy.isNull()) {
248 if (const Expr *Ex = dyn_cast<Expr>(CE)) {
249 V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
250 getStoreManager());
251 }
252 }
253 }
254
255 state = state->BindExpr(CE, callerCtx, V);
256 }
257
258 // Bind the constructed object value to CXXConstructExpr.
259 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
260 loc::MemRegionVal This =
261 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
262 SVal ThisV = state->getSVal(This);
263 ThisV = state->getSVal(ThisV.castAs<Loc>());
264 state = state->BindExpr(CCE, callerCtx, ThisV);
265
266 ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx);
267
268 if (!ShouldRepeatCall) {
269 if (getIndexOfElementToConstruct(state, CCE, callerCtx))
270 state = removeIndexOfElementToConstruct(state, CCE, callerCtx);
271
272 if (getPendingInitLoop(state, CCE, callerCtx))
273 state = removePendingInitLoop(state, CCE, callerCtx);
274 }
275 }
276
277 if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
278 // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
279 // while to reach the actual CXXNewExpr element from here, so keep the
280 // region for later use.
281 // Additionally cast the return value of the inlined operator new
282 // (which is of type 'void *') to the correct object type.
283 SVal AllocV = state->getSVal(CNE, callerCtx);
284 AllocV = svalBuilder.evalCast(
285 AllocV, CNE->getType(),
286 getContext().getPointerType(getContext().VoidTy));
287
288 state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
289 AllocV);
290 }
291 }
292
293 // Step 3: BindedRetNode -> CleanedNodes
294 // If we can find a statement and a block in the inlined function, run remove
295 // dead bindings before returning from the call. This is important to ensure
296 // that we report the issues such as leaks in the stack contexts in which
297 // they occurred.
298 ExplodedNodeSet CleanedNodes;
299 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
300 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
301 PostStmt Loc(LastSt, calleeCtx, &retValBind);
302 bool isNew;
303 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
304 BindedRetNode->addPredecessor(CEBNode, G);
305 if (!isNew)
306 return;
307
308 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
309 currBldrCtx = &Ctx;
310 // Here, we call the Symbol Reaper with 0 statement and callee location
311 // context, telling it to clean up everything in the callee's context
312 // (and its children). We use the callee's function body as a diagnostic
313 // statement, with which the program point will be associated.
314 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
315 calleeCtx->getAnalysisDeclContext()->getBody(),
316 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
317 currBldrCtx = nullptr;
318 } else {
319 CleanedNodes.Add(CEBNode);
320 }
321
322 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
323 E = CleanedNodes.end(); I != E; ++I) {
324
325 // Step 4: Generate the CallExit and leave the callee's context.
326 // CleanedNodes -> CEENode
327 CallExitEnd Loc(calleeCtx, callerCtx);
328 bool isNew;
329 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
330
331 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
332 CEENode->addPredecessor(*I, G);
333 if (!isNew)
334 return;
335
336 // Step 5: Perform the post-condition check of the CallExpr and enqueue the
337 // result onto the work list.
338 // CEENode -> Dst -> WorkList
339 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
340 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
341 &Ctx);
342 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
343
344 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
345
346 ExplodedNodeSet DstPostCall;
347 if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
348 ExplodedNodeSet DstPostPostCallCallback;
349 getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
350 CEENode, *UpdatedCall, *this,
351 /*wasInlined=*/true);
352 for (ExplodedNode *I : DstPostPostCallCallback) {
353 getCheckerManager().runCheckersForNewAllocator(
354 cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
355 /*wasInlined=*/true);
356 }
357 } else {
358 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
359 *UpdatedCall, *this,
360 /*wasInlined=*/true);
361 }
362 ExplodedNodeSet Dst;
363 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
364 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
365 *this,
366 /*wasInlined=*/true);
367 } else if (CE &&
368 !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
369 AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
370 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
371 *this, /*wasInlined=*/true);
372 } else {
373 Dst.insert(DstPostCall);
374 }
375
376 // Enqueue the next element in the block.
377 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
378 PSI != PSE; ++PSI) {
379 unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0 : 1);
380
381 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx);
382 }
383 }
384}
385
386bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
387 // When there are no branches in the function, it means that there's no
388 // exponential complexity introduced by inlining such function.
389 // Such functions also don't trigger various fundamental problems
390 // with our inlining mechanism, such as the problem of
391 // inlined defensive checks. Hence isLinear().
392 const CFG *Cfg = ADC->getCFG();
393 return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
394}
395
396bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
397 const CFG *Cfg = ADC->getCFG();
398 return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
399}
400
401bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
402 const CFG *Cfg = ADC->getCFG();
403 return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
404}
405
406void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
407 bool &IsRecursive, unsigned &StackDepth) {
408 IsRecursive = false;
409 StackDepth = 0;
410
411 while (LCtx) {
412 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
413 const Decl *DI = SFC->getDecl();
414
415 // Mark recursive (and mutually recursive) functions and always count
416 // them when measuring the stack depth.
417 if (DI == D) {
418 IsRecursive = true;
419 ++StackDepth;
420 LCtx = LCtx->getParent();
421 continue;
422 }
423
424 // Do not count the small functions when determining the stack depth.
425 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
426 if (!isSmall(CalleeADC))
427 ++StackDepth;
428 }
429 LCtx = LCtx->getParent();
430 }
431}
432
433// The GDM component containing the dynamic dispatch bifurcation info. When
434// the exact type of the receiver is not known, we want to explore both paths -
435// one on which we do inline it and the other one on which we don't. This is
436// done to ensure we do not drop coverage.
437// This is the map from the receiver region to a bool, specifying either we
438// consider this region's information precise or not along the given path.
439namespace {
440 enum DynamicDispatchMode {
441 DynamicDispatchModeInlined = 1,
442 DynamicDispatchModeConservative
443 };
444} // end anonymous namespace
445
446REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
447 const MemRegion *, unsigned)
448REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
449
450void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
451 NodeBuilder &Bldr, ExplodedNode *Pred,
452 ProgramStateRef State) {
453 ProgramStateRef ConservativeEvalState = nullptr;
454 if (Call.isForeign() && !isSecondPhaseCTU()) {
455 const auto IK = AMgr.options.getCTUPhase1Inlining();
456 const bool DoInline = IK == CTUPhase1InliningKind::All ||
457 (IK == CTUPhase1InliningKind::Small &&
458 isSmall(AMgr.getAnalysisDeclContext(D)));
459 if (DoInline) {
460 inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
461 return;
462 }
463 const bool BState = State->get<CTUDispatchBifurcation>();
464 if (!BState) { // This is the first time we see this foreign function.
465 // Enqueue it to be analyzed in the second (ctu) phase.
466 inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
467 // Conservatively evaluate in the first phase.
468 ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
469 conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
470 } else {
471 conservativeEvalCall(Call, Bldr, Pred, State);
472 }
473 return;
474 }
475 inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
476}
477
478void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
479 const Decl *D, NodeBuilder &Bldr,
480 ExplodedNode *Pred, ProgramStateRef State) {
481 assert(D);
482
483 const LocationContext *CurLC = Pred->getLocationContext();
484 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
485 const LocationContext *ParentOfCallee = CallerSFC;
486 if (Call.getKind() == CE_Block &&
487 !cast<BlockCall>(Call).isConversionFromLambda()) {
488 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
489 assert(BR && "If we have the block definition we should have its region");
490 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
491 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
492 cast<BlockDecl>(D),
493 BR);
494 }
495
496 // This may be NULL, but that's fine.
497 const Expr *CallE = Call.getOriginExpr();
498
499 // Construct a new stack frame for the callee.
500 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
501 const StackFrameContext *CalleeSFC =
502 CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
503 currBldrCtx->blockCount(), currStmtIdx);
504
505 CallEnter Loc(CallE, CalleeSFC, CurLC);
506
507 // Construct a new state which contains the mapping from actual to
508 // formal arguments.
509 State = State->enterStackFrame(Call, CalleeSFC);
510
511 bool isNew;
512 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
513 N->addPredecessor(Pred, G);
514 if (isNew)
515 WList->enqueue(N);
516 }
517
518 // If we decided to inline the call, the successor has been manually
519 // added onto the work list so remove it from the node builder.
520 Bldr.takeNodes(Pred);
521
522 NumInlinedCalls++;
523 Engine.FunctionSummaries->bumpNumTimesInlined(D);
524
525 // Do not mark as visited in the 2nd run (CTUWList), so the function will
526 // be visited as top-level, this way we won't loose reports in non-ctu
527 // mode. Considering the case when a function in a foreign TU calls back
528 // into the main TU.
529 // Note, during the 1st run, it doesn't matter if we mark the foreign
530 // functions as visited (or not) because they can never appear as a top level
531 // function in the main TU.
532 if (!isSecondPhaseCTU())
533 // Mark the decl as visited.
534 if (VisitedCallees)
535 VisitedCallees->insert(D);
536}
537
538static ProgramStateRef getInlineFailedState(ProgramStateRef State,
539 const Stmt *CallE) {
540 const void *ReplayState = State->get<ReplayWithoutInlining>();
541 if (!ReplayState)
542 return nullptr;
543
544 assert(ReplayState == CallE && "Backtracked to the wrong call.");
545 (void)CallE;
546
547 return State->remove<ReplayWithoutInlining>();
548}
549
550void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
551 ExplodedNodeSet &dst) {
552 // Perform the previsit of the CallExpr.
553 ExplodedNodeSet dstPreVisit;
554 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
555
556 // Get the call in its initial state. We use this as a template to perform
557 // all the checks.
558 CallEventManager &CEMgr = getStateManager().getCallEventManager();
559 CallEventRef<> CallTemplate
560 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
561
562 // Evaluate the function call. We try each of the checkers
563 // to see if the can evaluate the function call.
564 ExplodedNodeSet dstCallEvaluated;
565 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
566 I != E; ++I) {
567 evalCall(dstCallEvaluated, *I, *CallTemplate);
568 }
569
570 // Finally, perform the post-condition check of the CallExpr and store
571 // the created nodes in 'Dst'.
572 // Note that if the call was inlined, dstCallEvaluated will be empty.
573 // The post-CallExpr check will occur in processCallExit.
574 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
575 *this);
576}
577
578ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
579 const CallEvent &Call) {
580 const Expr *E = Call.getOriginExpr();
581 // FIXME: Constructors to placement arguments of operator new
582 // are not supported yet.
583 if (!E || isa<CXXNewExpr>(E))
584 return State;
585
586 const LocationContext *LC = Call.getLocationContext();
587 for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
588 unsigned I = Call.getASTArgumentIndex(CallI);
589 if (Optional<SVal> V =
590 getObjectUnderConstruction(State, {E, I}, LC)) {
591 SVal VV = *V;
592 (void)VV;
593 assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
594 ->getStackFrame()->getParent()
595 ->getStackFrame() == LC->getStackFrame());
596 State = finishObjectConstruction(State, {E, I}, LC);
597 }
598 }
599
600 return State;
601}
602
603void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
604 ExplodedNode *Pred,
605 const CallEvent &Call) {
606 ProgramStateRef State = Pred->getState();
607 ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
608 if (CleanedState == State) {
609 Dst.insert(Pred);
610 return;
611 }
612
613 const Expr *E = Call.getOriginExpr();
614 const LocationContext *LC = Call.getLocationContext();
615 NodeBuilder B(Pred, Dst, *currBldrCtx);
616 static SimpleProgramPointTag Tag("ExprEngine",
617 "Finish argument construction");
618 PreStmt PP(E, LC, &Tag);
619 B.generateNode(PP, CleanedState, Pred);
620}
621
622void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
623 const CallEvent &Call) {
624 // WARNING: At this time, the state attached to 'Call' may be older than the
625 // state in 'Pred'. This is a minor optimization since CheckerManager will
626 // use an updated CallEvent instance when calling checkers, but if 'Call' is
627 // ever used directly in this function all callers should be updated to pass
628 // the most recent state. (It is probably not worth doing the work here since
629 // for some callers this will not be necessary.)
630
631 // Run any pre-call checks using the generic call interface.
632 ExplodedNodeSet dstPreVisit;
633 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
634 Call, *this);
635
636 // Actually evaluate the function call. We try each of the checkers
637 // to see if the can evaluate the function call, and get a callback at
638 // defaultEvalCall if all of them fail.
639 ExplodedNodeSet dstCallEvaluated;
640 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
641 Call, *this, EvalCallOptions());
642
643 // If there were other constructors called for object-type arguments
644 // of this call, clean them up.
645 ExplodedNodeSet dstArgumentCleanup;
646 for (ExplodedNode *I : dstCallEvaluated)
647 finishArgumentConstruction(dstArgumentCleanup, I, Call);
648
649 ExplodedNodeSet dstPostCall;
650 getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
651 Call, *this);
652
653 // Escaping symbols conjured during invalidating the regions above.
654 // Note that, for inlined calls the nodes were put back into the worklist,
655 // so we can assume that every node belongs to a conservative call at this
656 // point.
657
658 // Run pointerEscape callback with the newly conjured symbols.
659 SmallVector<std::pair<SVal, SVal>, 8> Escaped;
660 for (ExplodedNode *I : dstPostCall) {
661 NodeBuilder B(I, Dst, *currBldrCtx);
662 ProgramStateRef State = I->getState();
663 Escaped.clear();
664 {
665 unsigned Arg = -1;
666 for (const ParmVarDecl *PVD : Call.parameters()) {
667 ++Arg;
668 QualType ParamTy = PVD->getType();
669 if (ParamTy.isNull() ||
670 (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
671 continue;
672 QualType Pointee = ParamTy->getPointeeType();
673 if (Pointee.isConstQualified() || Pointee->isVoidType())
674 continue;
675 if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
676 Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
677 }
678 }
679
680 State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
681 PSK_EscapeOutParameters, &Call);
682
683 if (State == I->getState())
684 Dst.insert(I);
685 else
686 B.generateNode(I->getLocation(), State, I);
687 }
688}
689
690ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
691 const LocationContext *LCtx,
692 ProgramStateRef State) {
693 const Expr *E = Call.getOriginExpr();
694 if (!E)
695 return State;
696
697 // Some method families have known return values.
698 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
699 switch (Msg->getMethodFamily()) {
700 default:
701 break;
702 case OMF_autorelease:
703 case OMF_retain:
704 case OMF_self: {
705 // These methods return their receivers.
706 return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
707 }
708 }
709 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
710 SVal ThisV = C->getCXXThisVal();
711 ThisV = State->getSVal(ThisV.castAs<Loc>());
712 return State->BindExpr(E, LCtx, ThisV);
713 }
714
715 SVal R;
716 QualType ResultTy = Call.getResultType();
717 unsigned Count = currBldrCtx->blockCount();
718 if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
719 // Conjure a temporary if the function returns an object by value.
720 SVal Target;
721 assert(RTC->getStmt() == Call.getOriginExpr());
722 EvalCallOptions CallOpts; // FIXME: We won't really need those.
723 std::tie(State, Target) =
724 handleConstructionContext(Call.getOriginExpr(), State, LCtx,
725 RTC->getConstructionContext(), CallOpts);
726 const MemRegion *TargetR = Target.getAsRegion();
727 assert(TargetR);
728 // Invalidate the region so that it didn't look uninitialized. If this is
729 // a field or element constructor, we do not want to invalidate
730 // the whole structure. Pointer escape is meaningless because
731 // the structure is a product of conservative evaluation
732 // and therefore contains nothing interesting at this point.
733 RegionAndSymbolInvalidationTraits ITraits;
734 ITraits.setTrait(TargetR,
735 RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
736 State = State->invalidateRegions(TargetR, E, Count, LCtx,
737 /* CausesPointerEscape=*/false, nullptr,
738 &Call, &ITraits);
739
740 R = State->getSVal(Target.castAs<Loc>(), E->getType());
741 } else {
742 // Conjure a symbol if the return value is unknown.
743
744 // See if we need to conjure a heap pointer instead of
745 // a regular unknown pointer.
746 const auto *CNE = dyn_cast<CXXNewExpr>(E);
747 if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
748 R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
749 const MemRegion *MR = R.getAsRegion()->StripCasts();
750
751 // Store the extent of the allocated object(s).
752 SVal ElementCount;
753 if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
754 ElementCount = State->getSVal(SizeExpr, LCtx);
755 } else {
756 ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
757 }
758
759 SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
760
761 SVal Size =
762 svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
763 svalBuilder.getArrayIndexType());
764
765 // FIXME: This line is to prevent a crash. For more details please check
766 // issue #56264.
767 if (Size.isUndef())
768 Size = UnknownVal();
769
770 State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
771 svalBuilder);
772 } else {
773 R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
774 }
775 }
776 return State->BindExpr(E, LCtx, R);
777}
778
779// Conservatively evaluate call by invalidating regions and binding
780// a conjured return value.
781void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
782 ExplodedNode *Pred, ProgramStateRef State) {
783 State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
784 State = bindReturnValue(Call, Pred->getLocationContext(), State);
785
786 // And make the result node.
787 Bldr.generateNode(Call.getProgramPoint(), State, Pred);
788}
789
790ExprEngine::CallInlinePolicy
791ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
792 AnalyzerOptions &Opts,
793 const EvalCallOptions &CallOpts) {
794 const LocationContext *CurLC = Pred->getLocationContext();
795 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
796 switch (Call.getKind()) {
797 case CE_Function:
798 case CE_Block:
799 break;
800 case CE_CXXMember:
801 case CE_CXXMemberOperator:
802 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
803 return CIP_DisallowedAlways;
804 break;
805 case CE_CXXConstructor: {
806 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
807 return CIP_DisallowedAlways;
808
809 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
810
811 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
812
813 auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
814 const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
815 : nullptr;
816
817 if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
818 !Opts.MayInlineCXXAllocator)
819 return CIP_DisallowedOnce;
820
821 // FIXME: We don't handle constructors or destructors for arrays properly.
822 // Even once we do, we still need to be careful about implicitly-generated
823 // initializers for array fields in default move/copy constructors.
824 // We still allow construction into ElementRegion targets when they don't
825 // represent array elements.
826 if (CallOpts.IsArrayCtorOrDtor) {
827 if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC))
828 return CIP_DisallowedOnce;
829 }
830
831 // Inlining constructors requires including initializers in the CFG.
832 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
833 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
834 (void)ADC;
835
836 // If the destructor is trivial, it's always safe to inline the constructor.
837 if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
838 break;
839
840 // For other types, only inline constructors if destructor inlining is
841 // also enabled.
842 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
843 return CIP_DisallowedAlways;
844
845 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
846 // If we don't handle temporary destructors, we shouldn't inline
847 // their constructors.
848 if (CallOpts.IsTemporaryCtorOrDtor &&
849 !Opts.ShouldIncludeTemporaryDtorsInCFG)
850 return CIP_DisallowedOnce;
851
852 // If we did not find the correct this-region, it would be pointless
853 // to inline the constructor. Instead we will simply invalidate
854 // the fake temporary target.
855 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
856 return CIP_DisallowedOnce;
857
858 // If the temporary is lifetime-extended by binding it to a reference-type
859 // field within an aggregate, automatic destructors don't work properly.
860 if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
861 return CIP_DisallowedOnce;
862 }
863
864 break;
865 }
866 case CE_CXXInheritedConstructor: {
867 // This doesn't really increase the cost of inlining ever, because
868 // the stack frame of the inherited constructor is trivial.
869 return CIP_Allowed;
870 }
871 case CE_CXXDestructor: {
872 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
873 return CIP_DisallowedAlways;
874
875 // Inlining destructors requires building the CFG correctly.
876 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
877 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
878 (void)ADC;
879
880 // FIXME: We don't handle destructors for arrays properly.
881 if (CallOpts.IsArrayCtorOrDtor)
882 return CIP_DisallowedOnce;
883
884 // Allow disabling temporary destructor inlining with a separate option.
885 if (CallOpts.IsTemporaryCtorOrDtor &&
886 !Opts.MayInlineCXXTemporaryDtors)
887 return CIP_DisallowedOnce;
888
889 // If we did not find the correct this-region, it would be pointless
890 // to inline the destructor. Instead we will simply invalidate
891 // the fake temporary target.
892 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
893 return CIP_DisallowedOnce;
894 break;
895 }
896 case CE_CXXDeallocator:
897 [[fallthrough]];
898 case CE_CXXAllocator:
899 if (Opts.MayInlineCXXAllocator)
900 break;
901 // Do not inline allocators until we model deallocators.
902 // This is unfortunate, but basically necessary for smart pointers and such.
903 return CIP_DisallowedAlways;
904 case CE_ObjCMessage:
905 if (!Opts.MayInlineObjCMethod)
906 return CIP_DisallowedAlways;
907 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
908 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
909 return CIP_DisallowedAlways;
910 break;
911 }
912
913 return CIP_Allowed;
914}
915
916/// Returns true if the given C++ class contains a member with the given name.
917static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
918 StringRef Name) {
919 const IdentifierInfo &II = Ctx.Idents.get(Name);
920 return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
921}
922
923/// Returns true if the given C++ class is a container or iterator.
924///
925/// Our heuristic for this is whether it contains a method named 'begin()' or a
926/// nested type named 'iterator' or 'iterator_category'.
927static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
928 return hasMember(Ctx, RD, "begin") ||
929 hasMember(Ctx, RD, "iterator") ||
930 hasMember(Ctx, RD, "iterator_category");
931}
932
933/// Returns true if the given function refers to a method of a C++ container
934/// or iterator.
935///
936/// We generally do a poor job modeling most containers right now, and might
937/// prefer not to inline their methods.
938static bool isContainerMethod(const ASTContext &Ctx,
939 const FunctionDecl *FD) {
940 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
941 return isContainerClass(Ctx, MD->getParent());
942 return false;
943}
944
945/// Returns true if the given function is the destructor of a class named
946/// "shared_ptr".
947static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
948 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
949 if (!Dtor)
950 return false;
951
952 const CXXRecordDecl *RD = Dtor->getParent();
953 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
954 if (II->isStr("shared_ptr"))
955 return true;
956
957 return false;
958}
959
960/// Returns true if the function in \p CalleeADC may be inlined in general.
961///
962/// This checks static properties of the function, such as its signature and
963/// CFG, to determine whether the analyzer should ever consider inlining it,
964/// in any context.
965bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
966 AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
967 // FIXME: Do not inline variadic calls.
968 if (CallEvent::isVariadic(CalleeADC->getDecl()))
969 return false;
970
971 // Check certain C++-related inlining policies.
972 ASTContext &Ctx = CalleeADC->getASTContext();
973 if (Ctx.getLangOpts().CPlusPlus) {
974 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
975 // Conditionally control the inlining of template functions.
976 if (!Opts.MayInlineTemplateFunctions)
977 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
978 return false;
979
980 // Conditionally control the inlining of C++ standard library functions.
981 if (!Opts.MayInlineCXXStandardLibrary)
982 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
983 if (AnalysisDeclContext::isInStdNamespace(FD))
984 return false;
985
986 // Conditionally control the inlining of methods on objects that look
987 // like C++ containers.
988 if (!Opts.MayInlineCXXContainerMethods)
989 if (!AMgr.isInCodeFile(FD->getLocation()))
990 if (isContainerMethod(Ctx, FD))
991 return false;
992
993 // Conditionally control the inlining of the destructor of C++ shared_ptr.
994 // We don't currently do a good job modeling shared_ptr because we can't
995 // see the reference count, so treating as opaque is probably the best
996 // idea.
997 if (!Opts.MayInlineCXXSharedPtrDtor)
998 if (isCXXSharedPtrDtor(FD))
999 return false;
1000 }
1001 }
1002
1003 // It is possible that the CFG cannot be constructed.
1004 // Be safe, and check if the CalleeCFG is valid.
1005 const CFG *CalleeCFG = CalleeADC->getCFG();
1006 if (!CalleeCFG)
1007 return false;
1008
1009 // Do not inline large functions.
1010 if (isHuge(CalleeADC))
1011 return false;
1012
1013 // It is possible that the live variables analysis cannot be
1014 // run. If so, bail out.
1015 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
1016 return false;
1017
1018 return true;
1019}
1020
1021bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
1022 const ExplodedNode *Pred,
1023 const EvalCallOptions &CallOpts) {
1024 if (!D)
1025 return false;
1026
1027 AnalysisManager &AMgr = getAnalysisManager();
1028 AnalyzerOptions &Opts = AMgr.options;
1029 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
1030 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
1031
1032 // The auto-synthesized bodies are essential to inline as they are
1033 // usually small and commonly used. Note: we should do this check early on to
1034 // ensure we always inline these calls.
1035 if (CalleeADC->isBodyAutosynthesized())
1036 return true;
1037
1038 if (!AMgr.shouldInlineCall())
1039 return false;
1040
1041 // Check if this function has been marked as non-inlinable.
1042 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
1043 if (MayInline) {
1044 if (!MayInline.value())
1045 return false;
1046
1047 } else {
1048 // We haven't actually checked the static properties of this function yet.
1049 // Do that now, and record our decision in the function summaries.
1050 if (mayInlineDecl(CalleeADC)) {
1051 Engine.FunctionSummaries->markMayInline(D);
1052 } else {
1053 Engine.FunctionSummaries->markShouldNotInline(D);
1054 return false;
1055 }
1056 }
1057
1058 // Check if we should inline a call based on its kind.
1059 // FIXME: this checks both static and dynamic properties of the call, which
1060 // means we're redoing a bit of work that could be cached in the function
1061 // summary.
1062 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1063 if (CIP != CIP_Allowed) {
1064 if (CIP == CIP_DisallowedAlways) {
1065 assert(!MayInline || *MayInline);
1066 Engine.FunctionSummaries->markShouldNotInline(D);
1067 }
1068 return false;
1069 }
1070
1071 // Do not inline if recursive or we've reached max stack frame count.
1072 bool IsRecursive = false;
1073 unsigned StackDepth = 0;
1074 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1075 if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1076 (!isSmall(CalleeADC) || IsRecursive))
1077 return false;
1078
1079 // Do not inline large functions too many times.
1080 if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1081 Opts.MaxTimesInlineLarge) &&
1082 isLarge(CalleeADC)) {
1083 NumReachedInlineCountMax++;
1084 return false;
1085 }
1086
1087 if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1088 return false;
1089
1090 return true;
1091}
1092
1093bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State,
1094 const CXXConstructExpr *CE,
1095 const LocationContext *LCtx) {
1096 if (!CE)
1097 return false;
1098
1099 auto Type = CE->getType();
1100
1101 // FIXME: Handle other arrays types.
1102 if (const auto *CAT = dyn_cast<ConstantArrayType>(Type)) {
1103 unsigned Size = getContext().getConstantArrayElementCount(CAT);
1104
1105 return Size <= AMgr.options.maxBlockVisitOnPath;
1106 }
1107
1108 // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small.
1109 if (auto Size = getPendingInitLoop(State, CE, LCtx))
1110 return *Size <= AMgr.options.maxBlockVisitOnPath;
1111
1112 return false;
1113}
1114
1115bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State,
1116 const CXXConstructExpr *E,
1117 const LocationContext *LCtx) {
1118
1119 if (!E)
1120 return false;
1121
1122 auto Ty = E->getType();
1123
1124 // FIXME: Handle non constant array types
1125 if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) {
1126 unsigned Size = getContext().getConstantArrayElementCount(CAT);
1127 return Size > getIndexOfElementToConstruct(State, E, LCtx);
1128 }
1129
1130 if (auto Size = getPendingInitLoop(State, E, LCtx))
1131 return Size > getIndexOfElementToConstruct(State, E, LCtx);
1132
1133 return false;
1134}
1135
1136static bool isTrivialObjectAssignment(const CallEvent &Call) {
1137 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1138 if (!ICall)
1139 return false;
1140
1141 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1142 if (!MD)
1143 return false;
1144 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1145 return false;
1146
1147 return MD->isTrivial();
1148}
1149
1150void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1151 const CallEvent &CallTemplate,
1152 const EvalCallOptions &CallOpts) {
1153 // Make sure we have the most recent state attached to the call.
1154 ProgramStateRef State = Pred->getState();
1155 CallEventRef<> Call = CallTemplate.cloneWithState(State);
1156
1157 // Special-case trivial assignment operators.
1158 if (isTrivialObjectAssignment(*Call)) {
1159 performTrivialCopy(Bldr, Pred, *Call);
1160 return;
1161 }
1162
1163 // Try to inline the call.
1164 // The origin expression here is just used as a kind of checksum;
1165 // this should still be safe even for CallEvents that don't come from exprs.
1166 const Expr *E = Call->getOriginExpr();
1167
1168 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1169 if (InlinedFailedState) {
1170 // If we already tried once and failed, make sure we don't retry later.
1171 State = InlinedFailedState;
1172 } else {
1173 RuntimeDefinition RD = Call->getRuntimeDefinition();
1174 Call->setForeign(RD.isForeign());
1175 const Decl *D = RD.getDecl();
1176 if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1177 if (RD.mayHaveOtherDefinitions()) {
1178 AnalyzerOptions &Options = getAnalysisManager().options;
1179
1180 // Explore with and without inlining the call.
1181 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1182 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1183 return;
1184 }
1185
1186 // Don't inline if we're not in any dynamic dispatch mode.
1187 if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1188 conservativeEvalCall(*Call, Bldr, Pred, State);
1189 return;
1190 }
1191 }
1192 ctuBifurcate(*Call, D, Bldr, Pred, State);
1193 return;
1194 }
1195 }
1196
1197 // If we can't inline it, handle the return value and invalidate the regions.
1198 conservativeEvalCall(*Call, Bldr, Pred, State);
1199}
1200
1201void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1202 const CallEvent &Call, const Decl *D,
1203 NodeBuilder &Bldr, ExplodedNode *Pred) {
1204 assert(BifurReg);
1205 BifurReg = BifurReg->StripCasts();
1206
1207 // Check if we've performed the split already - note, we only want
1208 // to split the path once per memory region.
1209 ProgramStateRef State = Pred->getState();
1210 const unsigned *BState =
1211 State->get<DynamicDispatchBifurcationMap>(BifurReg);
1212 if (BState) {
1213 // If we are on "inline path", keep inlining if possible.
1214 if (*BState == DynamicDispatchModeInlined)
1215 ctuBifurcate(Call, D, Bldr, Pred, State);
1216 // If inline failed, or we are on the path where we assume we
1217 // don't have enough info about the receiver to inline, conjure the
1218 // return value and invalidate the regions.
1219 conservativeEvalCall(Call, Bldr, Pred, State);
1220 return;
1221 }
1222
1223 // If we got here, this is the first time we process a message to this
1224 // region, so split the path.
1225 ProgramStateRef IState =
1226 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1227 DynamicDispatchModeInlined);
1228 ctuBifurcate(Call, D, Bldr, Pred, IState);
1229
1230 ProgramStateRef NoIState =
1231 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1232 DynamicDispatchModeConservative);
1233 conservativeEvalCall(Call, Bldr, Pred, NoIState);
1234
1235 NumOfDynamicDispatchPathSplits++;
1236}
1237
1238void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1239 ExplodedNodeSet &Dst) {
1240 ExplodedNodeSet dstPreVisit;
1241 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1242
1243 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1244
1245 if (RS->getRetValue()) {
1246 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1247 ei = dstPreVisit.end(); it != ei; ++it) {
1248 B.generateNode(RS, *it, (*it)->getState());
1249 }
1250 }
1251}
1252

source code of clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp