1 | //===- Store.cpp - Interface for maps from Locations to Values ------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defined the types Store and StoreManager. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "clang/StaticAnalyzer/Core/PathSensitive/Store.h" |
14 | #include "clang/AST/ASTContext.h" |
15 | #include "clang/AST/CXXInheritance.h" |
16 | #include "clang/AST/CharUnits.h" |
17 | #include "clang/AST/Decl.h" |
18 | #include "clang/AST/DeclCXX.h" |
19 | #include "clang/AST/DeclObjC.h" |
20 | #include "clang/AST/Expr.h" |
21 | #include "clang/AST/Type.h" |
22 | #include "clang/Basic/LLVM.h" |
23 | #include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h" |
24 | #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" |
25 | #include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h" |
26 | #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" |
27 | #include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h" |
28 | #include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h" |
29 | #include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h" |
30 | #include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h" |
31 | #include "llvm/ADT/APSInt.h" |
32 | #include "llvm/ADT/STLExtras.h" |
33 | #include "llvm/Support/ErrorHandling.h" |
34 | #include <cassert> |
35 | #include <cstdint> |
36 | #include <optional> |
37 | |
38 | using namespace clang; |
39 | using namespace ento; |
40 | |
41 | StoreManager::StoreManager(ProgramStateManager &stateMgr) |
42 | : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr), |
43 | MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {} |
44 | |
45 | BindResult StoreManager::enterStackFrame(Store OldStore, const CallEvent &Call, |
46 | const StackFrameContext *LCtx) { |
47 | BindResult Result{.ResultingStore: StoreRef(OldStore, *this), .FailedToBindValues: {}}; |
48 | |
49 | SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings; |
50 | Call.getInitialStackFrameContents(CalleeCtx: LCtx, Bindings&: InitialBindings); |
51 | |
52 | for (const auto &[Location, Val] : InitialBindings) { |
53 | Store S = Result.ResultingStore.getStore(); |
54 | BindResult Curr = Bind(store: S, loc: Location.castAs<Loc>(), val: Val); |
55 | Result.ResultingStore = Curr.ResultingStore; |
56 | llvm::append_range(C&: Result.FailedToBindValues, R&: Curr.FailedToBindValues); |
57 | } |
58 | |
59 | return Result; |
60 | } |
61 | |
62 | const ElementRegion *StoreManager::MakeElementRegion(const SubRegion *Base, |
63 | QualType EleTy, |
64 | uint64_t index) { |
65 | NonLoc idx = svalBuilder.makeArrayIndex(idx: index); |
66 | return MRMgr.getElementRegion(elementType: EleTy, Idx: idx, superRegion: Base, Ctx: svalBuilder.getContext()); |
67 | } |
68 | |
69 | const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R, |
70 | QualType T) { |
71 | NonLoc idx = svalBuilder.makeZeroArrayIndex(); |
72 | assert(!T.isNull()); |
73 | return MRMgr.getElementRegion(elementType: T, Idx: idx, superRegion: R, Ctx); |
74 | } |
75 | |
76 | std::optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R, |
77 | QualType CastToTy) { |
78 | ASTContext &Ctx = StateMgr.getContext(); |
79 | |
80 | // Handle casts to Objective-C objects. |
81 | if (CastToTy->isObjCObjectPointerType()) |
82 | return R->StripCasts(); |
83 | |
84 | if (CastToTy->isBlockPointerType()) { |
85 | // FIXME: We may need different solutions, depending on the symbol |
86 | // involved. Blocks can be casted to/from 'id', as they can be treated |
87 | // as Objective-C objects. This could possibly be handled by enhancing |
88 | // our reasoning of downcasts of symbolic objects. |
89 | if (isa<CodeTextRegion, SymbolicRegion>(Val: R)) |
90 | return R; |
91 | |
92 | // We don't know what to make of it. Return a NULL region, which |
93 | // will be interpreted as UnknownVal. |
94 | return std::nullopt; |
95 | } |
96 | |
97 | // Now assume we are casting from pointer to pointer. Other cases should |
98 | // already be handled. |
99 | QualType PointeeTy = CastToTy->getPointeeType(); |
100 | QualType CanonPointeeTy = Ctx.getCanonicalType(T: PointeeTy); |
101 | CanonPointeeTy = CanonPointeeTy.getLocalUnqualifiedType(); |
102 | |
103 | // Handle casts to void*. We just pass the region through. |
104 | if (CanonPointeeTy == Ctx.VoidTy) |
105 | return R; |
106 | |
107 | const auto IsSameRegionType = [&Ctx](const MemRegion *R, QualType OtherTy) { |
108 | if (const auto *TR = dyn_cast<TypedValueRegion>(Val: R)) { |
109 | QualType ObjTy = Ctx.getCanonicalType(T: TR->getValueType()); |
110 | if (OtherTy == ObjTy.getLocalUnqualifiedType()) |
111 | return true; |
112 | } |
113 | return false; |
114 | }; |
115 | |
116 | // Handle casts from compatible types. |
117 | if (R->isBoundable() && IsSameRegionType(R, CanonPointeeTy)) |
118 | return R; |
119 | |
120 | // Process region cast according to the kind of the region being cast. |
121 | switch (R->getKind()) { |
122 | case MemRegion::CXXThisRegionKind: |
123 | case MemRegion::CodeSpaceRegionKind: |
124 | case MemRegion::StackLocalsSpaceRegionKind: |
125 | case MemRegion::StackArgumentsSpaceRegionKind: |
126 | case MemRegion::HeapSpaceRegionKind: |
127 | case MemRegion::UnknownSpaceRegionKind: |
128 | case MemRegion::StaticGlobalSpaceRegionKind: |
129 | case MemRegion::GlobalInternalSpaceRegionKind: |
130 | case MemRegion::GlobalSystemSpaceRegionKind: |
131 | case MemRegion::GlobalImmutableSpaceRegionKind: { |
132 | llvm_unreachable("Invalid region cast" ); |
133 | } |
134 | |
135 | case MemRegion::FunctionCodeRegionKind: |
136 | case MemRegion::BlockCodeRegionKind: |
137 | case MemRegion::BlockDataRegionKind: |
138 | case MemRegion::StringRegionKind: |
139 | // FIXME: Need to handle arbitrary downcasts. |
140 | case MemRegion::SymbolicRegionKind: |
141 | case MemRegion::AllocaRegionKind: |
142 | case MemRegion::CompoundLiteralRegionKind: |
143 | case MemRegion::FieldRegionKind: |
144 | case MemRegion::ObjCIvarRegionKind: |
145 | case MemRegion::ObjCStringRegionKind: |
146 | case MemRegion::NonParamVarRegionKind: |
147 | case MemRegion::ParamVarRegionKind: |
148 | case MemRegion::CXXTempObjectRegionKind: |
149 | case MemRegion::CXXLifetimeExtendedObjectRegionKind: |
150 | case MemRegion::CXXBaseObjectRegionKind: |
151 | case MemRegion::CXXDerivedObjectRegionKind: |
152 | return MakeElementRegion(Base: cast<SubRegion>(Val: R), EleTy: PointeeTy); |
153 | |
154 | case MemRegion::ElementRegionKind: { |
155 | // If we are casting from an ElementRegion to another type, the |
156 | // algorithm is as follows: |
157 | // |
158 | // (1) Compute the "raw offset" of the ElementRegion from the |
159 | // base region. This is done by calling 'getAsRawOffset()'. |
160 | // |
161 | // (2a) If we get a 'RegionRawOffset' after calling |
162 | // 'getAsRawOffset()', determine if the absolute offset |
163 | // can be exactly divided into chunks of the size of the |
164 | // casted-pointee type. If so, create a new ElementRegion with |
165 | // the pointee-cast type as the new ElementType and the index |
166 | // being the offset divded by the chunk size. If not, create |
167 | // a new ElementRegion at offset 0 off the raw offset region. |
168 | // |
169 | // (2b) If we don't a get a 'RegionRawOffset' after calling |
170 | // 'getAsRawOffset()', it means that we are at offset 0. |
171 | // |
172 | // FIXME: Handle symbolic raw offsets. |
173 | |
174 | const ElementRegion *elementR = cast<ElementRegion>(Val: R); |
175 | const RegionRawOffset &rawOff = elementR->getAsArrayOffset(); |
176 | const MemRegion *baseR = rawOff.getRegion(); |
177 | |
178 | // If we cannot compute a raw offset, throw up our hands and return |
179 | // a NULL MemRegion*. |
180 | if (!baseR) |
181 | return std::nullopt; |
182 | |
183 | CharUnits off = rawOff.getOffset(); |
184 | |
185 | if (off.isZero()) { |
186 | // Edge case: we are at 0 bytes off the beginning of baseR. We check to |
187 | // see if the type we are casting to is the same as the type of the base |
188 | // region. If so, just return the base region. |
189 | if (IsSameRegionType(baseR, CanonPointeeTy)) |
190 | return baseR; |
191 | // Otherwise, create a new ElementRegion at offset 0. |
192 | return MakeElementRegion(Base: cast<SubRegion>(Val: baseR), EleTy: PointeeTy); |
193 | } |
194 | |
195 | // We have a non-zero offset from the base region. We want to determine |
196 | // if the offset can be evenly divided by sizeof(PointeeTy). If so, |
197 | // we create an ElementRegion whose index is that value. Otherwise, we |
198 | // create two ElementRegions, one that reflects a raw offset and the other |
199 | // that reflects the cast. |
200 | |
201 | // Compute the index for the new ElementRegion. |
202 | int64_t newIndex = 0; |
203 | const MemRegion *newSuperR = nullptr; |
204 | |
205 | // We can only compute sizeof(PointeeTy) if it is a complete type. |
206 | if (!PointeeTy->isIncompleteType()) { |
207 | // Compute the size in **bytes**. |
208 | CharUnits pointeeTySize = Ctx.getTypeSizeInChars(T: PointeeTy); |
209 | if (!pointeeTySize.isZero()) { |
210 | // Is the offset a multiple of the size? If so, we can layer the |
211 | // ElementRegion (with elementType == PointeeTy) directly on top of |
212 | // the base region. |
213 | if (off % pointeeTySize == 0) { |
214 | newIndex = off / pointeeTySize; |
215 | newSuperR = baseR; |
216 | } |
217 | } |
218 | } |
219 | |
220 | if (!newSuperR) { |
221 | // Create an intermediate ElementRegion to represent the raw byte. |
222 | // This will be the super region of the final ElementRegion. |
223 | newSuperR = MakeElementRegion(Base: cast<SubRegion>(Val: baseR), EleTy: Ctx.CharTy, |
224 | index: off.getQuantity()); |
225 | } |
226 | |
227 | return MakeElementRegion(Base: cast<SubRegion>(Val: newSuperR), EleTy: PointeeTy, index: newIndex); |
228 | } |
229 | } |
230 | |
231 | llvm_unreachable("unreachable" ); |
232 | } |
233 | |
234 | static bool regionMatchesCXXRecordType(SVal V, QualType Ty) { |
235 | const MemRegion *MR = V.getAsRegion(); |
236 | if (!MR) |
237 | return true; |
238 | |
239 | const auto *TVR = dyn_cast<TypedValueRegion>(Val: MR); |
240 | if (!TVR) |
241 | return true; |
242 | |
243 | const CXXRecordDecl *RD = TVR->getValueType()->getAsCXXRecordDecl(); |
244 | if (!RD) |
245 | return true; |
246 | |
247 | const CXXRecordDecl *Expected = Ty->getPointeeCXXRecordDecl(); |
248 | if (!Expected) |
249 | Expected = Ty->getAsCXXRecordDecl(); |
250 | |
251 | return Expected->getCanonicalDecl() == RD->getCanonicalDecl(); |
252 | } |
253 | |
254 | SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) { |
255 | // Early return to avoid doing the wrong thing in the face of |
256 | // reinterpret_cast. |
257 | if (!regionMatchesCXXRecordType(V: Derived, Ty: Cast->getSubExpr()->getType())) |
258 | return UnknownVal(); |
259 | |
260 | // Walk through the cast path to create nested CXXBaseRegions. |
261 | SVal Result = Derived; |
262 | for (const CXXBaseSpecifier *Base : Cast->path()) { |
263 | Result = evalDerivedToBase(Derived: Result, DerivedPtrType: Base->getType(), IsVirtual: Base->isVirtual()); |
264 | } |
265 | return Result; |
266 | } |
267 | |
268 | SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) { |
269 | // Walk through the path to create nested CXXBaseRegions. |
270 | SVal Result = Derived; |
271 | for (const auto &I : Path) |
272 | Result = evalDerivedToBase(Derived: Result, DerivedPtrType: I.Base->getType(), |
273 | IsVirtual: I.Base->isVirtual()); |
274 | return Result; |
275 | } |
276 | |
277 | SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType, |
278 | bool IsVirtual) { |
279 | const MemRegion *DerivedReg = Derived.getAsRegion(); |
280 | if (!DerivedReg) |
281 | return Derived; |
282 | |
283 | const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl(); |
284 | if (!BaseDecl) |
285 | BaseDecl = BaseType->getAsCXXRecordDecl(); |
286 | assert(BaseDecl && "not a C++ object?" ); |
287 | |
288 | if (const auto *AlreadyDerivedReg = |
289 | dyn_cast<CXXDerivedObjectRegion>(Val: DerivedReg)) { |
290 | if (const auto *SR = |
291 | dyn_cast<SymbolicRegion>(Val: AlreadyDerivedReg->getSuperRegion())) |
292 | if (SR->getSymbol()->getType()->getPointeeCXXRecordDecl() == BaseDecl) |
293 | return loc::MemRegionVal(SR); |
294 | |
295 | DerivedReg = AlreadyDerivedReg->getSuperRegion(); |
296 | } |
297 | |
298 | const MemRegion *BaseReg = MRMgr.getCXXBaseObjectRegion( |
299 | BaseClass: BaseDecl, Super: cast<SubRegion>(Val: DerivedReg), IsVirtual); |
300 | |
301 | return loc::MemRegionVal(BaseReg); |
302 | } |
303 | |
304 | /// Returns the static type of the given region, if it represents a C++ class |
305 | /// object. |
306 | /// |
307 | /// This handles both fully-typed regions, where the dynamic type is known, and |
308 | /// symbolic regions, where the dynamic type is merely bounded (and even then, |
309 | /// only ostensibly!), but does not take advantage of any dynamic type info. |
310 | static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) { |
311 | if (const auto *TVR = dyn_cast<TypedValueRegion>(Val: MR)) |
312 | return TVR->getValueType()->getAsCXXRecordDecl(); |
313 | if (const auto *SR = dyn_cast<SymbolicRegion>(Val: MR)) |
314 | return SR->getSymbol()->getType()->getPointeeCXXRecordDecl(); |
315 | return nullptr; |
316 | } |
317 | |
318 | std::optional<SVal> StoreManager::evalBaseToDerived(SVal Base, |
319 | QualType TargetType) { |
320 | const MemRegion *MR = Base.getAsRegion(); |
321 | if (!MR) |
322 | return UnknownVal(); |
323 | |
324 | // Assume the derived class is a pointer or a reference to a CXX record. |
325 | TargetType = TargetType->getPointeeType(); |
326 | assert(!TargetType.isNull()); |
327 | const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl(); |
328 | if (!TargetClass && !TargetType->isVoidType()) |
329 | return UnknownVal(); |
330 | |
331 | // Drill down the CXXBaseObject chains, which represent upcasts (casts from |
332 | // derived to base). |
333 | while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) { |
334 | // If found the derived class, the cast succeeds. |
335 | if (MRClass == TargetClass) |
336 | return loc::MemRegionVal(MR); |
337 | |
338 | // We skip over incomplete types. They must be the result of an earlier |
339 | // reinterpret_cast, as one can only dynamic_cast between types in the same |
340 | // class hierarchy. |
341 | if (!TargetType->isVoidType() && MRClass->hasDefinition()) { |
342 | // Static upcasts are marked as DerivedToBase casts by Sema, so this will |
343 | // only happen when multiple or virtual inheritance is involved. |
344 | CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, |
345 | /*DetectVirtual=*/false); |
346 | if (MRClass->isDerivedFrom(Base: TargetClass, Paths)) |
347 | return evalDerivedToBase(Derived: loc::MemRegionVal(MR), Path: Paths.front()); |
348 | } |
349 | |
350 | if (const auto *BaseR = dyn_cast<CXXBaseObjectRegion>(Val: MR)) { |
351 | // Drill down the chain to get the derived classes. |
352 | MR = BaseR->getSuperRegion(); |
353 | continue; |
354 | } |
355 | |
356 | // If this is a cast to void*, return the region. |
357 | if (TargetType->isVoidType()) |
358 | return loc::MemRegionVal(MR); |
359 | |
360 | // Strange use of reinterpret_cast can give us paths we don't reason |
361 | // about well, by putting in ElementRegions where we'd expect |
362 | // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the |
363 | // derived class has a zero offset from the base class), then it's safe |
364 | // to strip the cast; if it's invalid, -Wreinterpret-base-class should |
365 | // catch it. In the interest of performance, the analyzer will silently |
366 | // do the wrong thing in the invalid case (because offsets for subregions |
367 | // will be wrong). |
368 | const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/StripBaseAndDerivedCasts: false); |
369 | if (Uncasted == MR) { |
370 | // We reached the bottom of the hierarchy and did not find the derived |
371 | // class. We must be casting the base to derived, so the cast should |
372 | // fail. |
373 | break; |
374 | } |
375 | |
376 | MR = Uncasted; |
377 | } |
378 | |
379 | // If we're casting a symbolic base pointer to a derived class, use |
380 | // CXXDerivedObjectRegion to represent the cast. If it's a pointer to an |
381 | // unrelated type, it must be a weird reinterpret_cast and we have to |
382 | // be fine with ElementRegion. TODO: Should we instead make |
383 | // Derived{TargetClass, Element{SourceClass, SR}}? |
384 | if (const auto *SR = dyn_cast<SymbolicRegion>(Val: MR)) { |
385 | QualType T = SR->getSymbol()->getType(); |
386 | const CXXRecordDecl *SourceClass = T->getPointeeCXXRecordDecl(); |
387 | if (TargetClass && SourceClass && TargetClass->isDerivedFrom(Base: SourceClass)) |
388 | return loc::MemRegionVal( |
389 | MRMgr.getCXXDerivedObjectRegion(BaseClass: TargetClass, Super: SR)); |
390 | return loc::MemRegionVal(GetElementZeroRegion(R: SR, T: TargetType)); |
391 | } |
392 | |
393 | // We failed if the region we ended up with has perfect type info. |
394 | if (isa<TypedValueRegion>(Val: MR)) |
395 | return std::nullopt; |
396 | |
397 | return UnknownVal(); |
398 | } |
399 | |
400 | SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) { |
401 | if (Base.isUnknownOrUndef()) |
402 | return Base; |
403 | |
404 | Loc BaseL = Base.castAs<Loc>(); |
405 | const SubRegion* BaseR = nullptr; |
406 | |
407 | switch (BaseL.getKind()) { |
408 | case loc::MemRegionValKind: |
409 | BaseR = cast<SubRegion>(Val: BaseL.castAs<loc::MemRegionVal>().getRegion()); |
410 | break; |
411 | |
412 | case loc::GotoLabelKind: |
413 | // These are anormal cases. Flag an undefined value. |
414 | return UndefinedVal(); |
415 | |
416 | case loc::ConcreteIntKind: |
417 | // While these seem funny, this can happen through casts. |
418 | // FIXME: What we should return is the field offset, not base. For example, |
419 | // add the field offset to the integer value. That way things |
420 | // like this work properly: &(((struct foo *) 0xa)->f) |
421 | // However, that's not easy to fix without reducing our abilities |
422 | // to catch null pointer dereference. Eg., ((struct foo *)0x0)->f = 7 |
423 | // is a null dereference even though we're dereferencing offset of f |
424 | // rather than null. Coming up with an approach that computes offsets |
425 | // over null pointers properly while still being able to catch null |
426 | // dereferences might be worth it. |
427 | return Base; |
428 | |
429 | default: |
430 | llvm_unreachable("Unhandled Base." ); |
431 | } |
432 | |
433 | // NOTE: We must have this check first because ObjCIvarDecl is a subclass |
434 | // of FieldDecl. |
435 | if (const auto *ID = dyn_cast<ObjCIvarDecl>(Val: D)) |
436 | return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ivd: ID, superRegion: BaseR)); |
437 | |
438 | return loc::MemRegionVal(MRMgr.getFieldRegion(fd: cast<FieldDecl>(Val: D), superRegion: BaseR)); |
439 | } |
440 | |
441 | SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) { |
442 | return getLValueFieldOrIvar(decl, base); |
443 | } |
444 | |
445 | SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, |
446 | SVal Base) { |
447 | |
448 | // Special case, if index is 0, return the same type as if |
449 | // this was not an array dereference. |
450 | if (Offset.isZeroConstant()) { |
451 | QualType BT = Base.getType(this->Ctx); |
452 | if (!BT.isNull() && !elementType.isNull()) { |
453 | QualType PointeeTy = BT->getPointeeType(); |
454 | if (!PointeeTy.isNull() && |
455 | PointeeTy.getCanonicalType() == elementType.getCanonicalType()) |
456 | return Base; |
457 | } |
458 | } |
459 | |
460 | // If the base is an unknown or undefined value, just return it back. |
461 | // FIXME: For absolute pointer addresses, we just return that value back as |
462 | // well, although in reality we should return the offset added to that |
463 | // value. See also the similar FIXME in getLValueFieldOrIvar(). |
464 | if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Val: Base)) |
465 | return Base; |
466 | |
467 | if (isa<loc::GotoLabel>(Val: Base)) |
468 | return UnknownVal(); |
469 | |
470 | const SubRegion *BaseRegion = |
471 | Base.castAs<loc::MemRegionVal>().getRegionAs<SubRegion>(); |
472 | |
473 | // Pointer of any type can be cast and used as array base. |
474 | const auto *ElemR = dyn_cast<ElementRegion>(Val: BaseRegion); |
475 | |
476 | // Convert the offset to the appropriate size and signedness. |
477 | auto Off = svalBuilder.convertToArrayIndex(val: Offset).getAs<NonLoc>(); |
478 | if (!Off) { |
479 | // Handle cases when LazyCompoundVal is used for an array index. |
480 | // Such case is possible if code does: |
481 | // char b[4]; |
482 | // a[__builtin_bitcast(int, b)]; |
483 | // Return UnknownVal, since we cannot model it. |
484 | return UnknownVal(); |
485 | } |
486 | |
487 | Offset = Off.value(); |
488 | |
489 | if (!ElemR) { |
490 | // If the base region is not an ElementRegion, create one. |
491 | // This can happen in the following example: |
492 | // |
493 | // char *p = __builtin_alloc(10); |
494 | // p[1] = 8; |
495 | // |
496 | // Observe that 'p' binds to an AllocaRegion. |
497 | return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Idx: Offset, |
498 | superRegion: BaseRegion, Ctx)); |
499 | } |
500 | |
501 | SVal BaseIdx = ElemR->getIndex(); |
502 | |
503 | if (!isa<nonloc::ConcreteInt>(Val: BaseIdx)) |
504 | return UnknownVal(); |
505 | |
506 | const llvm::APSInt &BaseIdxI = |
507 | BaseIdx.castAs<nonloc::ConcreteInt>().getValue(); |
508 | |
509 | // Only allow non-integer offsets if the base region has no offset itself. |
510 | // FIXME: This is a somewhat arbitrary restriction. We should be using |
511 | // SValBuilder here to add the two offsets without checking their types. |
512 | if (!isa<nonloc::ConcreteInt>(Val: Offset)) |
513 | return loc::MemRegionVal(MRMgr.getElementRegion( |
514 | elementType, Idx: Offset, superRegion: cast<SubRegion>(ElemR->getSuperRegion()), Ctx)); |
515 | |
516 | const llvm::APSInt& OffI = Offset.castAs<nonloc::ConcreteInt>().getValue(); |
517 | assert(BaseIdxI.isSigned()); |
518 | |
519 | // Compute the new index. |
520 | nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(X: BaseIdxI + |
521 | OffI)); |
522 | |
523 | // Construct the new ElementRegion. |
524 | const SubRegion *ArrayR = cast<SubRegion>(ElemR->getSuperRegion()); |
525 | return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Idx: NewIdx, superRegion: ArrayR, |
526 | Ctx)); |
527 | } |
528 | |
529 | StoreManager::BindingsHandler::~BindingsHandler() = default; |
530 | |
531 | bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr, |
532 | Store store, |
533 | const MemRegion* R, |
534 | SVal val) { |
535 | SymbolRef SymV = val.getAsLocSymbol(); |
536 | if (!SymV || SymV != Sym) |
537 | return true; |
538 | |
539 | if (Binding) { |
540 | First = false; |
541 | return false; |
542 | } |
543 | else |
544 | Binding = R; |
545 | |
546 | return true; |
547 | } |
548 | |