1//== MemoryTaggingSupport.cpp - helpers for memory tagging implementations ===//
2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3// See https://llvm.org/LICENSE.txt for license information.
4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5//
6//===----------------------------------------------------------------------===//
7//
8// This file declares common infrastructure for HWAddressSanitizer and
9// Aarch64StackTagging.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
14
15#include "llvm/Analysis/CFG.h"
16#include "llvm/Analysis/PostDominators.h"
17#include "llvm/Analysis/StackSafetyAnalysis.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/IR/BasicBlock.h"
20#include "llvm/IR/IntrinsicInst.h"
21#include "llvm/Transforms/Utils/PromoteMemToReg.h"
22
23namespace llvm {
24namespace memtag {
25namespace {
26bool maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
27 const DominatorTree *DT, const LoopInfo *LI,
28 size_t MaxLifetimes) {
29 // If we have too many lifetime ends, give up, as the algorithm below is N^2.
30 if (Insts.size() > MaxLifetimes)
31 return true;
32 for (size_t I = 0; I < Insts.size(); ++I) {
33 for (size_t J = 0; J < Insts.size(); ++J) {
34 if (I == J)
35 continue;
36 if (isPotentiallyReachable(From: Insts[I], To: Insts[J], ExclusionSet: nullptr, DT, LI))
37 return true;
38 }
39 }
40 return false;
41}
42} // namespace
43
44bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
45 const LoopInfo &LI, const Instruction *Start,
46 const SmallVectorImpl<IntrinsicInst *> &Ends,
47 const SmallVectorImpl<Instruction *> &RetVec,
48 llvm::function_ref<void(Instruction *)> Callback) {
49 if (Ends.size() == 1 && PDT.dominates(I1: Ends[0], I2: Start)) {
50 Callback(Ends[0]);
51 return true;
52 }
53 SmallPtrSet<BasicBlock *, 2> EndBlocks;
54 for (auto *End : Ends) {
55 EndBlocks.insert(Ptr: End->getParent());
56 }
57 SmallVector<Instruction *, 8> ReachableRetVec;
58 unsigned NumCoveredExits = 0;
59 for (auto *RI : RetVec) {
60 if (!isPotentiallyReachable(From: Start, To: RI, ExclusionSet: nullptr, DT: &DT, LI: &LI))
61 continue;
62 ReachableRetVec.push_back(Elt: RI);
63 // If there is an end in the same basic block as the return, we know for
64 // sure that the return is covered. Otherwise, we can check whether there
65 // is a way to reach the RI from the start of the lifetime without passing
66 // through an end.
67 if (EndBlocks.contains(Ptr: RI->getParent()) ||
68 !isPotentiallyReachable(From: Start, To: RI, ExclusionSet: &EndBlocks, DT: &DT, LI: &LI)) {
69 ++NumCoveredExits;
70 }
71 }
72 // If there's a mix of covered and non-covered exits, just put the untag
73 // on exits, so we avoid the redundancy of untagging twice.
74 if (NumCoveredExits == ReachableRetVec.size()) {
75 for (auto *End : Ends)
76 Callback(End);
77 } else {
78 for (auto *RI : ReachableRetVec)
79 Callback(RI);
80 // We may have inserted untag outside of the lifetime interval.
81 // Signal the caller to remove the lifetime end call for this alloca.
82 return false;
83 }
84 return true;
85}
86
87bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
88 const SmallVectorImpl<IntrinsicInst *> &LifetimeEnd,
89 const DominatorTree *DT, const LoopInfo *LI,
90 size_t MaxLifetimes) {
91 // An alloca that has exactly one start and end in every possible execution.
92 // If it has multiple ends, they have to be unreachable from each other, so
93 // at most one of them is actually used for each execution of the function.
94 return LifetimeStart.size() == 1 &&
95 (LifetimeEnd.size() == 1 ||
96 (LifetimeEnd.size() > 0 &&
97 !maybeReachableFromEachOther(Insts: LifetimeEnd, DT, LI, MaxLifetimes)));
98}
99
100Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
101 if (isa<ReturnInst>(Val: Inst)) {
102 if (CallInst *CI = Inst.getParent()->getTerminatingMustTailCall())
103 return CI;
104 return &Inst;
105 }
106 if (isa<ResumeInst, CleanupReturnInst>(Val: Inst)) {
107 return &Inst;
108 }
109 return nullptr;
110}
111
112void StackInfoBuilder::visit(Instruction &Inst) {
113 // Visit non-intrinsic debug-info records attached to Inst.
114 for (auto &DPV : Inst.getDbgValueRange()) {
115 auto AddIfInteresting = [&](Value *V) {
116 if (auto *AI = dyn_cast_or_null<AllocaInst>(Val: V)) {
117 if (!isInterestingAlloca(AI: *AI))
118 return;
119 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
120 auto &DPVVec = AInfo.DbgVariableRecords;
121 if (DPVVec.empty() || DPVVec.back() != &DPV)
122 DPVVec.push_back(Elt: &DPV);
123 }
124 };
125
126 for_each(Range: DPV.location_ops(), F: AddIfInteresting);
127 if (DPV.isDbgAssign())
128 AddIfInteresting(DPV.getAddress());
129 }
130
131 if (CallInst *CI = dyn_cast<CallInst>(Val: &Inst)) {
132 if (CI->canReturnTwice()) {
133 Info.CallsReturnTwice = true;
134 }
135 }
136 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: &Inst)) {
137 if (isInterestingAlloca(AI: *AI)) {
138 Info.AllocasToInstrument[AI].AI = AI;
139 }
140 return;
141 }
142 auto *II = dyn_cast<IntrinsicInst>(Val: &Inst);
143 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
144 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
145 AllocaInst *AI = findAllocaForValue(V: II->getArgOperand(i: 1));
146 if (!AI) {
147 Info.UnrecognizedLifetimes.push_back(Elt: &Inst);
148 return;
149 }
150 if (!isInterestingAlloca(AI: *AI))
151 return;
152 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
153 Info.AllocasToInstrument[AI].LifetimeStart.push_back(Elt: II);
154 else
155 Info.AllocasToInstrument[AI].LifetimeEnd.push_back(Elt: II);
156 return;
157 }
158 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(Val: &Inst)) {
159 auto AddIfInteresting = [&](Value *V) {
160 if (auto *AI = dyn_cast_or_null<AllocaInst>(Val: V)) {
161 if (!isInterestingAlloca(AI: *AI))
162 return;
163 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
164 auto &DVIVec = AInfo.DbgVariableIntrinsics;
165 if (DVIVec.empty() || DVIVec.back() != DVI)
166 DVIVec.push_back(Elt: DVI);
167 }
168 };
169 for_each(Range: DVI->location_ops(), F: AddIfInteresting);
170 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: DVI))
171 AddIfInteresting(DAI->getAddress());
172 }
173
174 Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
175 if (ExitUntag)
176 Info.RetVec.push_back(Elt: ExitUntag);
177}
178
179bool StackInfoBuilder::isInterestingAlloca(const AllocaInst &AI) {
180 return (AI.getAllocatedType()->isSized() &&
181 // FIXME: instrument dynamic allocas, too
182 AI.isStaticAlloca() &&
183 // alloca() may be called with 0 size, ignore it.
184 memtag::getAllocaSizeInBytes(AI) > 0 &&
185 // We are only interested in allocas not promotable to registers.
186 // Promotable allocas are common under -O0.
187 !isAllocaPromotable(AI: &AI) &&
188 // inalloca allocas are not treated as static, and we don't want
189 // dynamic alloca instrumentation for them as well.
190 !AI.isUsedWithInAlloca() &&
191 // swifterror allocas are register promoted by ISel
192 !AI.isSwiftError()) &&
193 // safe allocas are not interesting
194 !(SSI && SSI->isSafe(AI));
195}
196
197uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
198 auto DL = AI.getModule()->getDataLayout();
199 return *AI.getAllocationSize(DL);
200}
201
202void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Alignment) {
203 const Align NewAlignment = std::max(a: Info.AI->getAlign(), b: Alignment);
204 Info.AI->setAlignment(NewAlignment);
205 auto &Ctx = Info.AI->getFunction()->getContext();
206
207 uint64_t Size = getAllocaSizeInBytes(AI: *Info.AI);
208 uint64_t AlignedSize = alignTo(Size, A: Alignment);
209 if (Size == AlignedSize)
210 return;
211
212 // Add padding to the alloca.
213 Type *AllocatedType =
214 Info.AI->isArrayAllocation()
215 ? ArrayType::get(
216 ElementType: Info.AI->getAllocatedType(),
217 NumElements: cast<ConstantInt>(Val: Info.AI->getArraySize())->getZExtValue())
218 : Info.AI->getAllocatedType();
219 Type *PaddingType = ArrayType::get(ElementType: Type::getInt8Ty(C&: Ctx), NumElements: AlignedSize - Size);
220 Type *TypeWithPadding = StructType::get(elt1: AllocatedType, elts: PaddingType);
221 auto *NewAI = new AllocaInst(TypeWithPadding, Info.AI->getAddressSpace(),
222 nullptr, "", Info.AI);
223 NewAI->takeName(V: Info.AI);
224 NewAI->setAlignment(Info.AI->getAlign());
225 NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
226 NewAI->setSwiftError(Info.AI->isSwiftError());
227 NewAI->copyMetadata(SrcInst: *Info.AI);
228
229 Value *NewPtr = NewAI;
230
231 // TODO: Remove when typed pointers dropped
232 if (Info.AI->getType() != NewAI->getType())
233 NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
234
235 Info.AI->replaceAllUsesWith(V: NewPtr);
236 Info.AI->eraseFromParent();
237 Info.AI = NewAI;
238}
239
240} // namespace memtag
241} // namespace llvm
242

source code of llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp