1//===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This pass replaces accesses to kernel arguments with loads from
10/// offsets from the kernarg base pointer.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
16#include "llvm/CodeGen/TargetPassConfig.h"
17#include "llvm/IR/IRBuilder.h"
18#include "llvm/IR/IntrinsicsAMDGPU.h"
19#include "llvm/IR/MDBuilder.h"
20#include "llvm/Target/TargetMachine.h"
21
22#define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
23
24using namespace llvm;
25
26namespace {
27
28class PreloadKernelArgInfo {
29private:
30 Function &F;
31 const GCNSubtarget &ST;
32 unsigned NumFreeUserSGPRs;
33
34public:
35 SmallVector<llvm::Metadata *, 8> KernelArgMetadata;
36
37 PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
38 setInitialFreeUserSGPRsCount();
39 }
40
41 // Returns the maximum number of user SGPRs that we have available to preload
42 // arguments.
43 void setInitialFreeUserSGPRsCount() {
44 const unsigned MaxUserSGPRs = ST.getMaxNumUserSGPRs();
45 GCNUserSGPRUsageInfo UserSGPRInfo(F, ST);
46
47 NumFreeUserSGPRs = MaxUserSGPRs - UserSGPRInfo.getNumUsedUserSGPRs();
48 }
49
50 bool tryAllocPreloadSGPRs(unsigned AllocSize, uint64_t ArgOffset,
51 uint64_t LastExplicitArgOffset) {
52 // Check if this argument may be loaded into the same register as the
53 // previous argument.
54 if (!isAligned(Lhs: Align(4), SizeInBytes: ArgOffset) && AllocSize < 4)
55 return true;
56
57 // Pad SGPRs for kernarg alignment.
58 unsigned Padding = ArgOffset - LastExplicitArgOffset;
59 unsigned PaddingSGPRs = alignTo(Value: Padding, Align: 4) / 4;
60 unsigned NumPreloadSGPRs = alignTo(Value: AllocSize, Align: 4) / 4;
61 if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs)
62 return false;
63
64 NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
65 return true;
66 }
67};
68
69class AMDGPULowerKernelArguments : public FunctionPass {
70public:
71 static char ID;
72
73 AMDGPULowerKernelArguments() : FunctionPass(ID) {}
74
75 bool runOnFunction(Function &F) override;
76
77 void getAnalysisUsage(AnalysisUsage &AU) const override {
78 AU.addRequired<TargetPassConfig>();
79 AU.setPreservesAll();
80 }
81};
82
83} // end anonymous namespace
84
85// skip allocas
86static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
87 BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
88 for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
89 AllocaInst *AI = dyn_cast<AllocaInst>(Val: &*InsPt);
90
91 // If this is a dynamic alloca, the value may depend on the loaded kernargs,
92 // so loads will need to be inserted before it.
93 if (!AI || !AI->isStaticAlloca())
94 break;
95 }
96
97 return InsPt;
98}
99
100static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
101 CallingConv::ID CC = F.getCallingConv();
102 if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
103 return false;
104
105 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
106 LLVMContext &Ctx = F.getParent()->getContext();
107 const DataLayout &DL = F.getParent()->getDataLayout();
108 BasicBlock &EntryBlock = *F.begin();
109 IRBuilder<> Builder(&EntryBlock, getInsertPt(BB&: EntryBlock));
110
111 const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
112 const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
113
114 Align MaxAlign;
115 // FIXME: Alignment is broken with explicit arg offset.;
116 const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
117 if (TotalKernArgSize == 0)
118 return false;
119
120 CallInst *KernArgSegment =
121 Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
122 nullptr, F.getName() + ".kernarg.segment");
123
124 KernArgSegment->addRetAttr(Attribute::NonNull);
125 KernArgSegment->addRetAttr(
126 Attr: Attribute::getWithDereferenceableBytes(Context&: Ctx, Bytes: TotalKernArgSize));
127
128 uint64_t ExplicitArgOffset = 0;
129 // Preloaded kernel arguments must be sequential.
130 bool InPreloadSequence = true;
131 PreloadKernelArgInfo PreloadInfo(F, ST);
132
133 for (Argument &Arg : F.args()) {
134 const bool IsByRef = Arg.hasByRefAttr();
135 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
136 MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
137 Align ABITypeAlign = DL.getValueOrABITypeAlignment(Alignment: ParamAlign, Ty: ArgTy);
138
139 uint64_t Size = DL.getTypeSizeInBits(Ty: ArgTy);
140 uint64_t AllocSize = DL.getTypeAllocSize(Ty: ArgTy);
141
142 uint64_t EltOffset = alignTo(Size: ExplicitArgOffset, A: ABITypeAlign) + BaseOffset;
143 uint64_t LastExplicitArgOffset = ExplicitArgOffset;
144 ExplicitArgOffset = alignTo(Size: ExplicitArgOffset, A: ABITypeAlign) + AllocSize;
145
146 // Try to preload this argument into user SGPRs.
147 if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
148 !Arg.getType()->isAggregateType())
149 if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, ArgOffset: EltOffset,
150 LastExplicitArgOffset))
151 continue;
152
153 InPreloadSequence = false;
154
155 if (Arg.use_empty())
156 continue;
157
158 // If this is byval, the loads are already explicit in the function. We just
159 // need to rewrite the pointer values.
160 if (IsByRef) {
161 Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
162 Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: EltOffset,
163 Name: Arg.getName() + ".byval.kernarg.offset");
164
165 Value *CastOffsetPtr =
166 Builder.CreateAddrSpaceCast(V: ArgOffsetPtr, DestTy: Arg.getType());
167 Arg.replaceAllUsesWith(V: CastOffsetPtr);
168 continue;
169 }
170
171 if (PointerType *PT = dyn_cast<PointerType>(Val: ArgTy)) {
172 // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
173 // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
174 // can't represent this with range metadata because it's only allowed for
175 // integer types.
176 if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
177 PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
178 !ST.hasUsableDSOffset())
179 continue;
180
181 // FIXME: We can replace this with equivalent alias.scope/noalias
182 // metadata, but this appears to be a lot of work.
183 if (Arg.hasNoAliasAttr())
184 continue;
185 }
186
187 auto *VT = dyn_cast<FixedVectorType>(Val: ArgTy);
188 bool IsV3 = VT && VT->getNumElements() == 3;
189 bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
190
191 VectorType *V4Ty = nullptr;
192
193 int64_t AlignDownOffset = alignDown(Value: EltOffset, Align: 4);
194 int64_t OffsetDiff = EltOffset - AlignDownOffset;
195 Align AdjustedAlign = commonAlignment(
196 A: KernArgBaseAlign, Offset: DoShiftOpt ? AlignDownOffset : EltOffset);
197
198 Value *ArgPtr;
199 Type *AdjustedArgTy;
200 if (DoShiftOpt) { // FIXME: Handle aggregate types
201 // Since we don't have sub-dword scalar loads, avoid doing an extload by
202 // loading earlier than the argument address, and extracting the relevant
203 // bits.
204 // TODO: Update this for GFX12 which does have scalar sub-dword loads.
205 //
206 // Additionally widen any sub-dword load to i32 even if suitably aligned,
207 // so that CSE between different argument loads works easily.
208 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
209 Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: AlignDownOffset,
210 Name: Arg.getName() + ".kernarg.offset.align.down");
211 AdjustedArgTy = Builder.getInt32Ty();
212 } else {
213 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
214 Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: EltOffset,
215 Name: Arg.getName() + ".kernarg.offset");
216 AdjustedArgTy = ArgTy;
217 }
218
219 if (IsV3 && Size >= 32) {
220 V4Ty = FixedVectorType::get(ElementType: VT->getElementType(), NumElts: 4);
221 // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
222 AdjustedArgTy = V4Ty;
223 }
224
225 LoadInst *Load =
226 Builder.CreateAlignedLoad(Ty: AdjustedArgTy, Ptr: ArgPtr, Align: AdjustedAlign);
227 Load->setMetadata(KindID: LLVMContext::MD_invariant_load, Node: MDNode::get(Context&: Ctx, MDs: {}));
228
229 MDBuilder MDB(Ctx);
230
231 if (isa<PointerType>(Val: ArgTy)) {
232 if (Arg.hasNonNullAttr())
233 Load->setMetadata(KindID: LLVMContext::MD_nonnull, Node: MDNode::get(Context&: Ctx, MDs: {}));
234
235 uint64_t DerefBytes = Arg.getDereferenceableBytes();
236 if (DerefBytes != 0) {
237 Load->setMetadata(
238 KindID: LLVMContext::MD_dereferenceable,
239 Node: MDNode::get(Context&: Ctx,
240 MDs: MDB.createConstant(
241 C: ConstantInt::get(Ty: Builder.getInt64Ty(), V: DerefBytes))));
242 }
243
244 uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
245 if (DerefOrNullBytes != 0) {
246 Load->setMetadata(
247 KindID: LLVMContext::MD_dereferenceable_or_null,
248 Node: MDNode::get(Context&: Ctx,
249 MDs: MDB.createConstant(C: ConstantInt::get(Ty: Builder.getInt64Ty(),
250 V: DerefOrNullBytes))));
251 }
252
253 if (MaybeAlign ParamAlign = Arg.getParamAlign()) {
254 Load->setMetadata(
255 KindID: LLVMContext::MD_align,
256 Node: MDNode::get(Context&: Ctx, MDs: MDB.createConstant(C: ConstantInt::get(
257 Ty: Builder.getInt64Ty(), V: ParamAlign->value()))));
258 }
259 }
260
261 // TODO: Convert noalias arg to !noalias
262
263 if (DoShiftOpt) {
264 Value *ExtractBits = OffsetDiff == 0 ?
265 Load : Builder.CreateLShr(LHS: Load, RHS: OffsetDiff * 8);
266
267 IntegerType *ArgIntTy = Builder.getIntNTy(N: Size);
268 Value *Trunc = Builder.CreateTrunc(V: ExtractBits, DestTy: ArgIntTy);
269 Value *NewVal = Builder.CreateBitCast(V: Trunc, DestTy: ArgTy,
270 Name: Arg.getName() + ".load");
271 Arg.replaceAllUsesWith(V: NewVal);
272 } else if (IsV3) {
273 Value *Shuf = Builder.CreateShuffleVector(V: Load, Mask: ArrayRef<int>{0, 1, 2},
274 Name: Arg.getName() + ".load");
275 Arg.replaceAllUsesWith(V: Shuf);
276 } else {
277 Load->setName(Arg.getName() + ".load");
278 Arg.replaceAllUsesWith(V: Load);
279 }
280 }
281
282 KernArgSegment->addRetAttr(
283 Attr: Attribute::getWithAlignment(Context&: Ctx, Alignment: std::max(a: KernArgBaseAlign, b: MaxAlign)));
284
285 return true;
286}
287
288bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
289 auto &TPC = getAnalysis<TargetPassConfig>();
290 const TargetMachine &TM = TPC.getTM<TargetMachine>();
291 return lowerKernelArguments(F, TM);
292}
293
294INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
295 "AMDGPU Lower Kernel Arguments", false, false)
296INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
297 false, false)
298
299char AMDGPULowerKernelArguments::ID = 0;
300
301FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
302 return new AMDGPULowerKernelArguments();
303}
304
305PreservedAnalyses
306AMDGPULowerKernelArgumentsPass::run(Function &F, FunctionAnalysisManager &AM) {
307 bool Changed = lowerKernelArguments(F, TM);
308 if (Changed) {
309 // TODO: Preserves a lot more.
310 PreservedAnalyses PA;
311 PA.preserveSet<CFGAnalyses>();
312 return PA;
313 }
314
315 return PreservedAnalyses::all();
316}
317

source code of llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp