1 | //===-- AMDGPUAtomicOptimizer.cpp -----------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// This pass optimizes atomic operations by using a single lane of a wavefront |
11 | /// to perform the atomic operation, thus reducing contention on that memory |
12 | /// location. |
13 | /// Atomic optimizer uses following strategies to compute scan and reduced |
14 | /// values |
15 | /// 1. DPP - |
16 | /// This is the most efficient implementation for scan. DPP uses Whole Wave |
17 | /// Mode (WWM) |
18 | /// 2. Iterative - |
19 | // An alternative implementation iterates over all active lanes |
20 | /// of Wavefront using llvm.cttz and performs scan using readlane & writelane |
21 | /// intrinsics |
22 | //===----------------------------------------------------------------------===// |
23 | |
24 | #include "AMDGPU.h" |
25 | #include "GCNSubtarget.h" |
26 | #include "llvm/Analysis/DomTreeUpdater.h" |
27 | #include "llvm/Analysis/UniformityAnalysis.h" |
28 | #include "llvm/CodeGen/TargetPassConfig.h" |
29 | #include "llvm/IR/IRBuilder.h" |
30 | #include "llvm/IR/InstVisitor.h" |
31 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
32 | #include "llvm/InitializePasses.h" |
33 | #include "llvm/Target/TargetMachine.h" |
34 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
35 | |
36 | #define DEBUG_TYPE "amdgpu-atomic-optimizer" |
37 | |
38 | using namespace llvm; |
39 | using namespace llvm::AMDGPU; |
40 | |
41 | namespace { |
42 | |
43 | struct ReplacementInfo { |
44 | Instruction *I; |
45 | AtomicRMWInst::BinOp Op; |
46 | unsigned ValIdx; |
47 | bool ValDivergent; |
48 | }; |
49 | |
50 | class AMDGPUAtomicOptimizer : public FunctionPass { |
51 | public: |
52 | static char ID; |
53 | ScanOptions ScanImpl; |
54 | AMDGPUAtomicOptimizer(ScanOptions ScanImpl) |
55 | : FunctionPass(ID), ScanImpl(ScanImpl) {} |
56 | |
57 | bool runOnFunction(Function &F) override; |
58 | |
59 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
60 | AU.addPreserved<DominatorTreeWrapperPass>(); |
61 | AU.addRequired<UniformityInfoWrapperPass>(); |
62 | AU.addRequired<TargetPassConfig>(); |
63 | } |
64 | }; |
65 | |
66 | class AMDGPUAtomicOptimizerImpl |
67 | : public InstVisitor<AMDGPUAtomicOptimizerImpl> { |
68 | private: |
69 | SmallVector<ReplacementInfo, 8> ToReplace; |
70 | const UniformityInfo *UA; |
71 | const DataLayout *DL; |
72 | DomTreeUpdater &DTU; |
73 | const GCNSubtarget *ST; |
74 | bool IsPixelShader; |
75 | ScanOptions ScanImpl; |
76 | |
77 | Value *buildReduction(IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V, |
78 | Value *const Identity) const; |
79 | Value *buildScan(IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V, |
80 | Value *const Identity) const; |
81 | Value *buildShiftRight(IRBuilder<> &B, Value *V, Value *const Identity) const; |
82 | |
83 | std::pair<Value *, Value *> |
84 | buildScanIteratively(IRBuilder<> &B, AtomicRMWInst::BinOp Op, |
85 | Value *const Identity, Value *V, Instruction &I, |
86 | BasicBlock *ComputeLoop, BasicBlock *ComputeEnd) const; |
87 | |
88 | void optimizeAtomic(Instruction &I, AtomicRMWInst::BinOp Op, unsigned ValIdx, |
89 | bool ValDivergent) const; |
90 | |
91 | public: |
92 | AMDGPUAtomicOptimizerImpl() = delete; |
93 | |
94 | AMDGPUAtomicOptimizerImpl(const UniformityInfo *UA, const DataLayout *DL, |
95 | DomTreeUpdater &DTU, const GCNSubtarget *ST, |
96 | bool IsPixelShader, ScanOptions ScanImpl) |
97 | : UA(UA), DL(DL), DTU(DTU), ST(ST), IsPixelShader(IsPixelShader), |
98 | ScanImpl(ScanImpl) {} |
99 | |
100 | bool run(Function &F); |
101 | |
102 | void visitAtomicRMWInst(AtomicRMWInst &I); |
103 | void visitIntrinsicInst(IntrinsicInst &I); |
104 | }; |
105 | |
106 | } // namespace |
107 | |
108 | char AMDGPUAtomicOptimizer::ID = 0; |
109 | |
110 | char &llvm::AMDGPUAtomicOptimizerID = AMDGPUAtomicOptimizer::ID; |
111 | |
112 | bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) { |
113 | if (skipFunction(F)) { |
114 | return false; |
115 | } |
116 | |
117 | const UniformityInfo *UA = |
118 | &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo(); |
119 | const DataLayout *DL = &F.getParent()->getDataLayout(); |
120 | |
121 | DominatorTreeWrapperPass *const DTW = |
122 | getAnalysisIfAvailable<DominatorTreeWrapperPass>(); |
123 | DomTreeUpdater DTU(DTW ? &DTW->getDomTree() : nullptr, |
124 | DomTreeUpdater::UpdateStrategy::Lazy); |
125 | |
126 | const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); |
127 | const TargetMachine &TM = TPC.getTM<TargetMachine>(); |
128 | const GCNSubtarget *ST = &TM.getSubtarget<GCNSubtarget>(F); |
129 | |
130 | bool IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS; |
131 | |
132 | return AMDGPUAtomicOptimizerImpl(UA, DL, DTU, ST, IsPixelShader, ScanImpl) |
133 | .run(F); |
134 | } |
135 | |
136 | PreservedAnalyses AMDGPUAtomicOptimizerPass::run(Function &F, |
137 | FunctionAnalysisManager &AM) { |
138 | |
139 | const auto *UA = &AM.getResult<UniformityInfoAnalysis>(IR&: F); |
140 | const DataLayout *DL = &F.getParent()->getDataLayout(); |
141 | |
142 | DomTreeUpdater DTU(&AM.getResult<DominatorTreeAnalysis>(IR&: F), |
143 | DomTreeUpdater::UpdateStrategy::Lazy); |
144 | const GCNSubtarget *ST = &TM.getSubtarget<GCNSubtarget>(F); |
145 | |
146 | bool IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS; |
147 | |
148 | bool IsChanged = |
149 | AMDGPUAtomicOptimizerImpl(UA, DL, DTU, ST, IsPixelShader, ScanImpl) |
150 | .run(F); |
151 | |
152 | if (!IsChanged) { |
153 | return PreservedAnalyses::all(); |
154 | } |
155 | |
156 | PreservedAnalyses PA; |
157 | PA.preserve<DominatorTreeAnalysis>(); |
158 | return PA; |
159 | } |
160 | |
161 | bool AMDGPUAtomicOptimizerImpl::run(Function &F) { |
162 | |
163 | // Scan option None disables the Pass |
164 | if (ScanImpl == ScanOptions::None) { |
165 | return false; |
166 | } |
167 | |
168 | visit(F); |
169 | |
170 | const bool Changed = !ToReplace.empty(); |
171 | |
172 | for (ReplacementInfo &Info : ToReplace) { |
173 | optimizeAtomic(I&: *Info.I, Op: Info.Op, ValIdx: Info.ValIdx, ValDivergent: Info.ValDivergent); |
174 | } |
175 | |
176 | ToReplace.clear(); |
177 | |
178 | return Changed; |
179 | } |
180 | |
181 | void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) { |
182 | // Early exit for unhandled address space atomic instructions. |
183 | switch (I.getPointerAddressSpace()) { |
184 | default: |
185 | return; |
186 | case AMDGPUAS::GLOBAL_ADDRESS: |
187 | case AMDGPUAS::LOCAL_ADDRESS: |
188 | break; |
189 | } |
190 | |
191 | AtomicRMWInst::BinOp Op = I.getOperation(); |
192 | |
193 | switch (Op) { |
194 | default: |
195 | return; |
196 | case AtomicRMWInst::Add: |
197 | case AtomicRMWInst::Sub: |
198 | case AtomicRMWInst::And: |
199 | case AtomicRMWInst::Or: |
200 | case AtomicRMWInst::Xor: |
201 | case AtomicRMWInst::Max: |
202 | case AtomicRMWInst::Min: |
203 | case AtomicRMWInst::UMax: |
204 | case AtomicRMWInst::UMin: |
205 | case AtomicRMWInst::FAdd: |
206 | case AtomicRMWInst::FSub: |
207 | case AtomicRMWInst::FMax: |
208 | case AtomicRMWInst::FMin: |
209 | break; |
210 | } |
211 | |
212 | // Only 32 and 64 bit floating point atomic ops are supported. |
213 | if (AtomicRMWInst::isFPOperation(Op) && |
214 | !(I.getType()->isFloatTy() || I.getType()->isDoubleTy())) { |
215 | return; |
216 | } |
217 | |
218 | const unsigned PtrIdx = 0; |
219 | const unsigned ValIdx = 1; |
220 | |
221 | // If the pointer operand is divergent, then each lane is doing an atomic |
222 | // operation on a different address, and we cannot optimize that. |
223 | if (UA->isDivergentUse(U: I.getOperandUse(i: PtrIdx))) { |
224 | return; |
225 | } |
226 | |
227 | const bool ValDivergent = UA->isDivergentUse(U: I.getOperandUse(i: ValIdx)); |
228 | |
229 | // If the value operand is divergent, each lane is contributing a different |
230 | // value to the atomic calculation. We can only optimize divergent values if |
231 | // we have DPP available on our subtarget, and the atomic operation is 32 |
232 | // bits. |
233 | if (ValDivergent && |
234 | (!ST->hasDPP() || DL->getTypeSizeInBits(Ty: I.getType()) != 32)) { |
235 | return; |
236 | } |
237 | |
238 | // If we get here, we can optimize the atomic using a single wavefront-wide |
239 | // atomic operation to do the calculation for the entire wavefront, so |
240 | // remember the instruction so we can come back to it. |
241 | const ReplacementInfo Info = {.I: &I, .Op: Op, .ValIdx: ValIdx, .ValDivergent: ValDivergent}; |
242 | |
243 | ToReplace.push_back(Elt: Info); |
244 | } |
245 | |
246 | void AMDGPUAtomicOptimizerImpl::visitIntrinsicInst(IntrinsicInst &I) { |
247 | AtomicRMWInst::BinOp Op; |
248 | |
249 | switch (I.getIntrinsicID()) { |
250 | default: |
251 | return; |
252 | case Intrinsic::amdgcn_buffer_atomic_add: |
253 | case Intrinsic::amdgcn_struct_buffer_atomic_add: |
254 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_add: |
255 | case Intrinsic::amdgcn_raw_buffer_atomic_add: |
256 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_add: |
257 | Op = AtomicRMWInst::Add; |
258 | break; |
259 | case Intrinsic::amdgcn_buffer_atomic_sub: |
260 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: |
261 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_sub: |
262 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: |
263 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_sub: |
264 | Op = AtomicRMWInst::Sub; |
265 | break; |
266 | case Intrinsic::amdgcn_buffer_atomic_and: |
267 | case Intrinsic::amdgcn_struct_buffer_atomic_and: |
268 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_and: |
269 | case Intrinsic::amdgcn_raw_buffer_atomic_and: |
270 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_and: |
271 | Op = AtomicRMWInst::And; |
272 | break; |
273 | case Intrinsic::amdgcn_buffer_atomic_or: |
274 | case Intrinsic::amdgcn_struct_buffer_atomic_or: |
275 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_or: |
276 | case Intrinsic::amdgcn_raw_buffer_atomic_or: |
277 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_or: |
278 | Op = AtomicRMWInst::Or; |
279 | break; |
280 | case Intrinsic::amdgcn_buffer_atomic_xor: |
281 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: |
282 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_xor: |
283 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: |
284 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_xor: |
285 | Op = AtomicRMWInst::Xor; |
286 | break; |
287 | case Intrinsic::amdgcn_buffer_atomic_smin: |
288 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: |
289 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_smin: |
290 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: |
291 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_smin: |
292 | Op = AtomicRMWInst::Min; |
293 | break; |
294 | case Intrinsic::amdgcn_buffer_atomic_umin: |
295 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: |
296 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_umin: |
297 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: |
298 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_umin: |
299 | Op = AtomicRMWInst::UMin; |
300 | break; |
301 | case Intrinsic::amdgcn_buffer_atomic_smax: |
302 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: |
303 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_smax: |
304 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: |
305 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_smax: |
306 | Op = AtomicRMWInst::Max; |
307 | break; |
308 | case Intrinsic::amdgcn_buffer_atomic_umax: |
309 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: |
310 | case Intrinsic::amdgcn_struct_ptr_buffer_atomic_umax: |
311 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: |
312 | case Intrinsic::amdgcn_raw_ptr_buffer_atomic_umax: |
313 | Op = AtomicRMWInst::UMax; |
314 | break; |
315 | } |
316 | |
317 | const unsigned ValIdx = 0; |
318 | |
319 | const bool ValDivergent = UA->isDivergentUse(U: I.getOperandUse(i: ValIdx)); |
320 | |
321 | // If the value operand is divergent, each lane is contributing a different |
322 | // value to the atomic calculation. We can only optimize divergent values if |
323 | // we have DPP available on our subtarget, and the atomic operation is 32 |
324 | // bits. |
325 | if (ValDivergent && |
326 | (!ST->hasDPP() || DL->getTypeSizeInBits(Ty: I.getType()) != 32)) { |
327 | return; |
328 | } |
329 | |
330 | // If any of the other arguments to the intrinsic are divergent, we can't |
331 | // optimize the operation. |
332 | for (unsigned Idx = 1; Idx < I.getNumOperands(); Idx++) { |
333 | if (UA->isDivergentUse(U: I.getOperandUse(i: Idx))) { |
334 | return; |
335 | } |
336 | } |
337 | |
338 | // If we get here, we can optimize the atomic using a single wavefront-wide |
339 | // atomic operation to do the calculation for the entire wavefront, so |
340 | // remember the instruction so we can come back to it. |
341 | const ReplacementInfo Info = {.I: &I, .Op: Op, .ValIdx: ValIdx, .ValDivergent: ValDivergent}; |
342 | |
343 | ToReplace.push_back(Elt: Info); |
344 | } |
345 | |
346 | // Use the builder to create the non-atomic counterpart of the specified |
347 | // atomicrmw binary op. |
348 | static Value *buildNonAtomicBinOp(IRBuilder<> &B, AtomicRMWInst::BinOp Op, |
349 | Value *LHS, Value *RHS) { |
350 | CmpInst::Predicate Pred; |
351 | |
352 | switch (Op) { |
353 | default: |
354 | llvm_unreachable("Unhandled atomic op" ); |
355 | case AtomicRMWInst::Add: |
356 | return B.CreateBinOp(Opc: Instruction::Add, LHS, RHS); |
357 | case AtomicRMWInst::FAdd: |
358 | return B.CreateFAdd(L: LHS, R: RHS); |
359 | case AtomicRMWInst::Sub: |
360 | return B.CreateBinOp(Opc: Instruction::Sub, LHS, RHS); |
361 | case AtomicRMWInst::FSub: |
362 | return B.CreateFSub(L: LHS, R: RHS); |
363 | case AtomicRMWInst::And: |
364 | return B.CreateBinOp(Opc: Instruction::And, LHS, RHS); |
365 | case AtomicRMWInst::Or: |
366 | return B.CreateBinOp(Opc: Instruction::Or, LHS, RHS); |
367 | case AtomicRMWInst::Xor: |
368 | return B.CreateBinOp(Opc: Instruction::Xor, LHS, RHS); |
369 | |
370 | case AtomicRMWInst::Max: |
371 | Pred = CmpInst::ICMP_SGT; |
372 | break; |
373 | case AtomicRMWInst::Min: |
374 | Pred = CmpInst::ICMP_SLT; |
375 | break; |
376 | case AtomicRMWInst::UMax: |
377 | Pred = CmpInst::ICMP_UGT; |
378 | break; |
379 | case AtomicRMWInst::UMin: |
380 | Pred = CmpInst::ICMP_ULT; |
381 | break; |
382 | case AtomicRMWInst::FMax: |
383 | return B.CreateMaxNum(LHS, RHS); |
384 | case AtomicRMWInst::FMin: |
385 | return B.CreateMinNum(LHS, RHS); |
386 | } |
387 | Value *Cond = B.CreateICmp(P: Pred, LHS, RHS); |
388 | return B.CreateSelect(C: Cond, True: LHS, False: RHS); |
389 | } |
390 | |
391 | // Use the builder to create a reduction of V across the wavefront, with all |
392 | // lanes active, returning the same result in all lanes. |
393 | Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B, |
394 | AtomicRMWInst::BinOp Op, |
395 | Value *V, |
396 | Value *const Identity) const { |
397 | Type *AtomicTy = V->getType(); |
398 | Type *IntNTy = B.getIntNTy(N: AtomicTy->getPrimitiveSizeInBits()); |
399 | Module *M = B.GetInsertBlock()->getModule(); |
400 | Function *UpdateDPP = |
401 | Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy); |
402 | |
403 | // Reduce within each row of 16 lanes. |
404 | for (unsigned Idx = 0; Idx < 4; Idx++) { |
405 | V = buildNonAtomicBinOp( |
406 | B, Op, LHS: V, |
407 | RHS: B.CreateCall(Callee: UpdateDPP, |
408 | Args: {Identity, V, B.getInt32(C: DPP::ROW_XMASK0 | 1 << Idx), |
409 | B.getInt32(C: 0xf), B.getInt32(C: 0xf), B.getFalse()})); |
410 | } |
411 | |
412 | // Reduce within each pair of rows (i.e. 32 lanes). |
413 | assert(ST->hasPermLaneX16()); |
414 | V = B.CreateBitCast(V, DestTy: IntNTy); |
415 | Value *Permlanex16Call = B.CreateIntrinsic( |
416 | Intrinsic::amdgcn_permlanex16, {}, |
417 | {V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()}); |
418 | V = buildNonAtomicBinOp(B, Op, LHS: B.CreateBitCast(V, DestTy: AtomicTy), |
419 | RHS: B.CreateBitCast(V: Permlanex16Call, DestTy: AtomicTy)); |
420 | if (ST->isWave32()) { |
421 | return V; |
422 | } |
423 | |
424 | if (ST->hasPermLane64()) { |
425 | // Reduce across the upper and lower 32 lanes. |
426 | V = B.CreateBitCast(V, DestTy: IntNTy); |
427 | Value *Permlane64Call = |
428 | B.CreateIntrinsic(Intrinsic::amdgcn_permlane64, {}, V); |
429 | return buildNonAtomicBinOp(B, Op, LHS: B.CreateBitCast(V, DestTy: AtomicTy), |
430 | RHS: B.CreateBitCast(V: Permlane64Call, DestTy: AtomicTy)); |
431 | } |
432 | |
433 | // Pick an arbitrary lane from 0..31 and an arbitrary lane from 32..63 and |
434 | // combine them with a scalar operation. |
435 | Function *ReadLane = |
436 | Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {}); |
437 | V = B.CreateBitCast(V, DestTy: IntNTy); |
438 | Value *Lane0 = B.CreateCall(Callee: ReadLane, Args: {V, B.getInt32(C: 0)}); |
439 | Value *Lane32 = B.CreateCall(Callee: ReadLane, Args: {V, B.getInt32(C: 32)}); |
440 | return buildNonAtomicBinOp(B, Op, LHS: B.CreateBitCast(V: Lane0, DestTy: AtomicTy), |
441 | RHS: B.CreateBitCast(V: Lane32, DestTy: AtomicTy)); |
442 | } |
443 | |
444 | // Use the builder to create an inclusive scan of V across the wavefront, with |
445 | // all lanes active. |
446 | Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B, |
447 | AtomicRMWInst::BinOp Op, Value *V, |
448 | Value *Identity) const { |
449 | Type *AtomicTy = V->getType(); |
450 | Type *IntNTy = B.getIntNTy(N: AtomicTy->getPrimitiveSizeInBits()); |
451 | |
452 | Module *M = B.GetInsertBlock()->getModule(); |
453 | Function *UpdateDPP = |
454 | Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy); |
455 | |
456 | for (unsigned Idx = 0; Idx < 4; Idx++) { |
457 | V = buildNonAtomicBinOp( |
458 | B, Op, LHS: V, |
459 | RHS: B.CreateCall(Callee: UpdateDPP, |
460 | Args: {Identity, V, B.getInt32(C: DPP::ROW_SHR0 | 1 << Idx), |
461 | B.getInt32(C: 0xf), B.getInt32(C: 0xf), B.getFalse()})); |
462 | } |
463 | if (ST->hasDPPBroadcasts()) { |
464 | // GFX9 has DPP row broadcast operations. |
465 | V = buildNonAtomicBinOp( |
466 | B, Op, LHS: V, |
467 | RHS: B.CreateCall(Callee: UpdateDPP, |
468 | Args: {Identity, V, B.getInt32(C: DPP::BCAST15), B.getInt32(C: 0xa), |
469 | B.getInt32(C: 0xf), B.getFalse()})); |
470 | V = buildNonAtomicBinOp( |
471 | B, Op, LHS: V, |
472 | RHS: B.CreateCall(Callee: UpdateDPP, |
473 | Args: {Identity, V, B.getInt32(C: DPP::BCAST31), B.getInt32(C: 0xc), |
474 | B.getInt32(C: 0xf), B.getFalse()})); |
475 | } else { |
476 | // On GFX10 all DPP operations are confined to a single row. To get cross- |
477 | // row operations we have to use permlane or readlane. |
478 | |
479 | // Combine lane 15 into lanes 16..31 (and, for wave 64, lane 47 into lanes |
480 | // 48..63). |
481 | assert(ST->hasPermLaneX16()); |
482 | V = B.CreateBitCast(V, DestTy: IntNTy); |
483 | Value *PermX = B.CreateIntrinsic( |
484 | Intrinsic::amdgcn_permlanex16, {}, |
485 | {V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()}); |
486 | |
487 | Value *UpdateDPPCall = |
488 | B.CreateCall(Callee: UpdateDPP, Args: {Identity, B.CreateBitCast(V: PermX, DestTy: AtomicTy), |
489 | B.getInt32(C: DPP::QUAD_PERM_ID), B.getInt32(C: 0xa), |
490 | B.getInt32(C: 0xf), B.getFalse()}); |
491 | V = buildNonAtomicBinOp(B, Op, LHS: B.CreateBitCast(V, DestTy: AtomicTy), RHS: UpdateDPPCall); |
492 | |
493 | if (!ST->isWave32()) { |
494 | // Combine lane 31 into lanes 32..63. |
495 | V = B.CreateBitCast(V, DestTy: IntNTy); |
496 | Value *const Lane31 = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {}, |
497 | {V, B.getInt32(31)}); |
498 | |
499 | Value *UpdateDPPCall = B.CreateCall( |
500 | Callee: UpdateDPP, Args: {Identity, Lane31, B.getInt32(C: DPP::QUAD_PERM_ID), |
501 | B.getInt32(C: 0xc), B.getInt32(C: 0xf), B.getFalse()}); |
502 | |
503 | V = buildNonAtomicBinOp(B, Op, LHS: B.CreateBitCast(V, DestTy: AtomicTy), |
504 | RHS: UpdateDPPCall); |
505 | } |
506 | } |
507 | return V; |
508 | } |
509 | |
510 | // Use the builder to create a shift right of V across the wavefront, with all |
511 | // lanes active, to turn an inclusive scan into an exclusive scan. |
512 | Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V, |
513 | Value *Identity) const { |
514 | Type *AtomicTy = V->getType(); |
515 | Type *IntNTy = B.getIntNTy(N: AtomicTy->getPrimitiveSizeInBits()); |
516 | |
517 | Module *M = B.GetInsertBlock()->getModule(); |
518 | Function *UpdateDPP = |
519 | Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy); |
520 | if (ST->hasDPPWavefrontShifts()) { |
521 | // GFX9 has DPP wavefront shift operations. |
522 | V = B.CreateCall(Callee: UpdateDPP, |
523 | Args: {Identity, V, B.getInt32(C: DPP::WAVE_SHR1), B.getInt32(C: 0xf), |
524 | B.getInt32(C: 0xf), B.getFalse()}); |
525 | } else { |
526 | Function *ReadLane = |
527 | Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {}); |
528 | Function *WriteLane = |
529 | Intrinsic::getDeclaration(M, Intrinsic::amdgcn_writelane, {}); |
530 | |
531 | // On GFX10 all DPP operations are confined to a single row. To get cross- |
532 | // row operations we have to use permlane or readlane. |
533 | Value *Old = V; |
534 | V = B.CreateCall(Callee: UpdateDPP, |
535 | Args: {Identity, V, B.getInt32(C: DPP::ROW_SHR0 + 1), |
536 | B.getInt32(C: 0xf), B.getInt32(C: 0xf), B.getFalse()}); |
537 | |
538 | // Copy the old lane 15 to the new lane 16. |
539 | V = B.CreateCall( |
540 | Callee: WriteLane, |
541 | Args: {B.CreateCall(Callee: ReadLane, Args: {B.CreateBitCast(V: Old, DestTy: IntNTy), B.getInt32(C: 15)}), |
542 | B.getInt32(C: 16), B.CreateBitCast(V, DestTy: IntNTy)}); |
543 | V = B.CreateBitCast(V, DestTy: AtomicTy); |
544 | if (!ST->isWave32()) { |
545 | // Copy the old lane 31 to the new lane 32. |
546 | V = B.CreateBitCast(V, DestTy: IntNTy); |
547 | V = B.CreateCall(Callee: WriteLane, |
548 | Args: {B.CreateCall(Callee: ReadLane, Args: {B.CreateBitCast(V: Old, DestTy: IntNTy), |
549 | B.getInt32(C: 31)}), |
550 | B.getInt32(C: 32), V}); |
551 | |
552 | // Copy the old lane 47 to the new lane 48. |
553 | V = B.CreateCall( |
554 | Callee: WriteLane, |
555 | Args: {B.CreateCall(Callee: ReadLane, Args: {Old, B.getInt32(C: 47)}), B.getInt32(C: 48), V}); |
556 | V = B.CreateBitCast(V, DestTy: AtomicTy); |
557 | } |
558 | } |
559 | |
560 | return V; |
561 | } |
562 | |
563 | // Use the builder to create an exclusive scan and compute the final reduced |
564 | // value using an iterative approach. This provides an alternative |
565 | // implementation to DPP which uses WMM for scan computations. This API iterate |
566 | // over active lanes to read, compute and update the value using |
567 | // readlane and writelane intrinsics. |
568 | std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively( |
569 | IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *const Identity, Value *V, |
570 | Instruction &I, BasicBlock *ComputeLoop, BasicBlock *ComputeEnd) const { |
571 | auto *Ty = I.getType(); |
572 | auto *WaveTy = B.getIntNTy(N: ST->getWavefrontSize()); |
573 | auto *EntryBB = I.getParent(); |
574 | auto NeedResult = !I.use_empty(); |
575 | |
576 | auto *Ballot = |
577 | B.CreateIntrinsic(Intrinsic::amdgcn_ballot, WaveTy, B.getTrue()); |
578 | |
579 | // Start inserting instructions for ComputeLoop block |
580 | B.SetInsertPoint(ComputeLoop); |
581 | // Phi nodes for Accumulator, Scan results destination, and Active Lanes |
582 | auto *Accumulator = B.CreatePHI(Ty, NumReservedValues: 2, Name: "Accumulator" ); |
583 | Accumulator->addIncoming(V: Identity, BB: EntryBB); |
584 | PHINode *OldValuePhi = nullptr; |
585 | if (NeedResult) { |
586 | OldValuePhi = B.CreatePHI(Ty, NumReservedValues: 2, Name: "OldValuePhi" ); |
587 | OldValuePhi->addIncoming(V: PoisonValue::get(T: Ty), BB: EntryBB); |
588 | } |
589 | auto *ActiveBits = B.CreatePHI(Ty: WaveTy, NumReservedValues: 2, Name: "ActiveBits" ); |
590 | ActiveBits->addIncoming(Ballot, EntryBB); |
591 | |
592 | // Use llvm.cttz instrinsic to find the lowest remaining active lane. |
593 | auto *FF1 = |
594 | B.CreateIntrinsic(Intrinsic::cttz, WaveTy, {ActiveBits, B.getTrue()}); |
595 | |
596 | Type *IntNTy = B.getIntNTy(N: Ty->getPrimitiveSizeInBits()); |
597 | auto *LaneIdxInt = B.CreateTrunc(V: FF1, DestTy: IntNTy); |
598 | |
599 | // Get the value required for atomic operation |
600 | V = B.CreateBitCast(V, DestTy: IntNTy); |
601 | Value *LaneValue = |
602 | B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {}, {V, LaneIdxInt}); |
603 | LaneValue = B.CreateBitCast(V: LaneValue, DestTy: Ty); |
604 | |
605 | // Perform writelane if intermediate scan results are required later in the |
606 | // kernel computations |
607 | Value *OldValue = nullptr; |
608 | if (NeedResult) { |
609 | OldValue = |
610 | B.CreateIntrinsic(Intrinsic::amdgcn_writelane, {}, |
611 | {B.CreateBitCast(Accumulator, IntNTy), LaneIdxInt, |
612 | B.CreateBitCast(OldValuePhi, IntNTy)}); |
613 | OldValue = B.CreateBitCast(V: OldValue, DestTy: Ty); |
614 | OldValuePhi->addIncoming(V: OldValue, BB: ComputeLoop); |
615 | } |
616 | |
617 | // Accumulate the results |
618 | auto *NewAccumulator = buildNonAtomicBinOp(B, Op, LHS: Accumulator, RHS: LaneValue); |
619 | Accumulator->addIncoming(V: NewAccumulator, BB: ComputeLoop); |
620 | |
621 | // Set bit to zero of current active lane so that for next iteration llvm.cttz |
622 | // return the next active lane |
623 | auto *Mask = B.CreateShl(ConstantInt::get(WaveTy, 1), FF1); |
624 | |
625 | auto *InverseMask = B.CreateXor(Mask, ConstantInt::get(WaveTy, -1)); |
626 | auto *NewActiveBits = B.CreateAnd(ActiveBits, InverseMask); |
627 | ActiveBits->addIncoming(NewActiveBits, ComputeLoop); |
628 | |
629 | // Branch out of the loop when all lanes are processed. |
630 | auto *IsEnd = B.CreateICmpEQ(LHS: NewActiveBits, RHS: ConstantInt::get(WaveTy, 0)); |
631 | B.CreateCondBr(IsEnd, ComputeEnd, ComputeLoop); |
632 | |
633 | B.SetInsertPoint(ComputeEnd); |
634 | |
635 | return {OldValue, NewAccumulator}; |
636 | } |
637 | |
638 | static Constant *getIdentityValueForAtomicOp(Type *const Ty, |
639 | AtomicRMWInst::BinOp Op) { |
640 | LLVMContext &C = Ty->getContext(); |
641 | const unsigned BitWidth = Ty->getPrimitiveSizeInBits(); |
642 | switch (Op) { |
643 | default: |
644 | llvm_unreachable("Unhandled atomic op" ); |
645 | case AtomicRMWInst::Add: |
646 | case AtomicRMWInst::Sub: |
647 | case AtomicRMWInst::Or: |
648 | case AtomicRMWInst::Xor: |
649 | case AtomicRMWInst::UMax: |
650 | return ConstantInt::get(Context&: C, V: APInt::getMinValue(numBits: BitWidth)); |
651 | case AtomicRMWInst::And: |
652 | case AtomicRMWInst::UMin: |
653 | return ConstantInt::get(Context&: C, V: APInt::getMaxValue(numBits: BitWidth)); |
654 | case AtomicRMWInst::Max: |
655 | return ConstantInt::get(Context&: C, V: APInt::getSignedMinValue(numBits: BitWidth)); |
656 | case AtomicRMWInst::Min: |
657 | return ConstantInt::get(Context&: C, V: APInt::getSignedMaxValue(numBits: BitWidth)); |
658 | case AtomicRMWInst::FAdd: |
659 | return ConstantFP::get(Context&: C, V: APFloat::getZero(Sem: Ty->getFltSemantics(), Negative: true)); |
660 | case AtomicRMWInst::FSub: |
661 | return ConstantFP::get(Context&: C, V: APFloat::getZero(Sem: Ty->getFltSemantics(), Negative: false)); |
662 | case AtomicRMWInst::FMin: |
663 | return ConstantFP::get(Context&: C, V: APFloat::getInf(Sem: Ty->getFltSemantics(), Negative: false)); |
664 | case AtomicRMWInst::FMax: |
665 | return ConstantFP::get(Context&: C, V: APFloat::getInf(Sem: Ty->getFltSemantics(), Negative: true)); |
666 | } |
667 | } |
668 | |
669 | static Value *buildMul(IRBuilder<> &B, Value *LHS, Value *RHS) { |
670 | const ConstantInt *CI = dyn_cast<ConstantInt>(Val: LHS); |
671 | return (CI && CI->isOne()) ? RHS : B.CreateMul(LHS, RHS); |
672 | } |
673 | |
674 | void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, |
675 | AtomicRMWInst::BinOp Op, |
676 | unsigned ValIdx, |
677 | bool ValDivergent) const { |
678 | // Start building just before the instruction. |
679 | IRBuilder<> B(&I); |
680 | |
681 | if (AtomicRMWInst::isFPOperation(Op)) { |
682 | B.setIsFPConstrained(I.getFunction()->hasFnAttribute(Attribute::StrictFP)); |
683 | } |
684 | |
685 | // If we are in a pixel shader, because of how we have to mask out helper |
686 | // lane invocations, we need to record the entry and exit BB's. |
687 | BasicBlock *PixelEntryBB = nullptr; |
688 | BasicBlock *PixelExitBB = nullptr; |
689 | |
690 | // If we're optimizing an atomic within a pixel shader, we need to wrap the |
691 | // entire atomic operation in a helper-lane check. We do not want any helper |
692 | // lanes that are around only for the purposes of derivatives to take part |
693 | // in any cross-lane communication, and we use a branch on whether the lane is |
694 | // live to do this. |
695 | if (IsPixelShader) { |
696 | // Record I's original position as the entry block. |
697 | PixelEntryBB = I.getParent(); |
698 | |
699 | Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {}); |
700 | Instruction *const NonHelperTerminator = |
701 | SplitBlockAndInsertIfThen(Cond, SplitBefore: &I, Unreachable: false, BranchWeights: nullptr, DTU: &DTU, LI: nullptr); |
702 | |
703 | // Record I's new position as the exit block. |
704 | PixelExitBB = I.getParent(); |
705 | |
706 | I.moveBefore(MovePos: NonHelperTerminator); |
707 | B.SetInsertPoint(&I); |
708 | } |
709 | |
710 | Type *const Ty = I.getType(); |
711 | Type *Int32Ty = B.getInt32Ty(); |
712 | Type *IntNTy = B.getIntNTy(N: Ty->getPrimitiveSizeInBits()); |
713 | bool isAtomicFloatingPointTy = Ty->isFloatingPointTy(); |
714 | const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty); |
715 | auto *const VecTy = FixedVectorType::get(ElementType: Int32Ty, NumElts: 2); |
716 | |
717 | // This is the value in the atomic operation we need to combine in order to |
718 | // reduce the number of atomic operations. |
719 | Value *V = I.getOperand(i: ValIdx); |
720 | |
721 | // We need to know how many lanes are active within the wavefront, and we do |
722 | // this by doing a ballot of active lanes. |
723 | Type *const WaveTy = B.getIntNTy(N: ST->getWavefrontSize()); |
724 | CallInst *const Ballot = |
725 | B.CreateIntrinsic(Intrinsic::amdgcn_ballot, WaveTy, B.getTrue()); |
726 | |
727 | // We need to know how many lanes are active within the wavefront that are |
728 | // below us. If we counted each lane linearly starting from 0, a lane is |
729 | // below us only if its associated index was less than ours. We do this by |
730 | // using the mbcnt intrinsic. |
731 | Value *Mbcnt; |
732 | if (ST->isWave32()) { |
733 | Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {}, |
734 | {Ballot, B.getInt32(0)}); |
735 | } else { |
736 | Value *const = B.CreateTrunc(V: Ballot, DestTy: Int32Ty); |
737 | Value *const = B.CreateTrunc(V: B.CreateLShr(LHS: Ballot, RHS: 32), DestTy: Int32Ty); |
738 | Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {}, |
739 | {ExtractLo, B.getInt32(0)}); |
740 | Mbcnt = |
741 | B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {}, {ExtractHi, Mbcnt}); |
742 | } |
743 | |
744 | Function *F = I.getFunction(); |
745 | LLVMContext &C = F->getContext(); |
746 | |
747 | // For atomic sub, perform scan with add operation and allow one lane to |
748 | // subtract the reduced value later. |
749 | AtomicRMWInst::BinOp ScanOp = Op; |
750 | if (Op == AtomicRMWInst::Sub) { |
751 | ScanOp = AtomicRMWInst::Add; |
752 | } else if (Op == AtomicRMWInst::FSub) { |
753 | ScanOp = AtomicRMWInst::FAdd; |
754 | } |
755 | Value *Identity = getIdentityValueForAtomicOp(Ty, Op: ScanOp); |
756 | |
757 | Value *ExclScan = nullptr; |
758 | Value *NewV = nullptr; |
759 | |
760 | const bool NeedResult = !I.use_empty(); |
761 | |
762 | BasicBlock *ComputeLoop = nullptr; |
763 | BasicBlock *ComputeEnd = nullptr; |
764 | // If we have a divergent value in each lane, we need to combine the value |
765 | // using DPP. |
766 | if (ValDivergent) { |
767 | if (ScanImpl == ScanOptions::DPP) { |
768 | // First we need to set all inactive invocations to the identity value, so |
769 | // that they can correctly contribute to the final result. |
770 | V = B.CreateBitCast(V, DestTy: IntNTy); |
771 | Identity = B.CreateBitCast(V: Identity, DestTy: IntNTy); |
772 | NewV = B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, IntNTy, |
773 | {V, Identity}); |
774 | NewV = B.CreateBitCast(V: NewV, DestTy: Ty); |
775 | V = B.CreateBitCast(V, DestTy: Ty); |
776 | Identity = B.CreateBitCast(V: Identity, DestTy: Ty); |
777 | if (!NeedResult && ST->hasPermLaneX16()) { |
778 | // On GFX10 the permlanex16 instruction helps us build a reduction |
779 | // without too many readlanes and writelanes, which are generally bad |
780 | // for performance. |
781 | NewV = buildReduction(B, Op: ScanOp, V: NewV, Identity); |
782 | } else { |
783 | NewV = buildScan(B, Op: ScanOp, V: NewV, Identity); |
784 | if (NeedResult) |
785 | ExclScan = buildShiftRight(B, V: NewV, Identity); |
786 | // Read the value from the last lane, which has accumulated the values |
787 | // of each active lane in the wavefront. This will be our new value |
788 | // which we will provide to the atomic operation. |
789 | Value *const LastLaneIdx = B.getInt32(C: ST->getWavefrontSize() - 1); |
790 | assert(TyBitWidth == 32); |
791 | NewV = B.CreateBitCast(V: NewV, DestTy: IntNTy); |
792 | NewV = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {}, |
793 | {NewV, LastLaneIdx}); |
794 | NewV = B.CreateBitCast(V: NewV, DestTy: Ty); |
795 | } |
796 | // Finally mark the readlanes in the WWM section. |
797 | NewV = B.CreateIntrinsic(Intrinsic::amdgcn_strict_wwm, Ty, NewV); |
798 | } else if (ScanImpl == ScanOptions::Iterative) { |
799 | // Alternative implementation for scan |
800 | ComputeLoop = BasicBlock::Create(Context&: C, Name: "ComputeLoop" , Parent: F); |
801 | ComputeEnd = BasicBlock::Create(Context&: C, Name: "ComputeEnd" , Parent: F); |
802 | std::tie(args&: ExclScan, args&: NewV) = buildScanIteratively(B, Op: ScanOp, Identity, V, I, |
803 | ComputeLoop, ComputeEnd); |
804 | } else { |
805 | llvm_unreachable("Atomic Optimzer is disabled for None strategy" ); |
806 | } |
807 | } else { |
808 | switch (Op) { |
809 | default: |
810 | llvm_unreachable("Unhandled atomic op" ); |
811 | |
812 | case AtomicRMWInst::Add: |
813 | case AtomicRMWInst::Sub: { |
814 | // The new value we will be contributing to the atomic operation is the |
815 | // old value times the number of active lanes. |
816 | Value *const Ctpop = B.CreateIntCast( |
817 | B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); |
818 | NewV = buildMul(B, LHS: V, RHS: Ctpop); |
819 | break; |
820 | } |
821 | case AtomicRMWInst::FAdd: |
822 | case AtomicRMWInst::FSub: { |
823 | Value *const Ctpop = B.CreateIntCast( |
824 | B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Int32Ty, false); |
825 | Value *const CtpopFP = B.CreateUIToFP(V: Ctpop, DestTy: Ty); |
826 | NewV = B.CreateFMul(L: V, R: CtpopFP); |
827 | break; |
828 | } |
829 | case AtomicRMWInst::And: |
830 | case AtomicRMWInst::Or: |
831 | case AtomicRMWInst::Max: |
832 | case AtomicRMWInst::Min: |
833 | case AtomicRMWInst::UMax: |
834 | case AtomicRMWInst::UMin: |
835 | case AtomicRMWInst::FMin: |
836 | case AtomicRMWInst::FMax: |
837 | // These operations with a uniform value are idempotent: doing the atomic |
838 | // operation multiple times has the same effect as doing it once. |
839 | NewV = V; |
840 | break; |
841 | |
842 | case AtomicRMWInst::Xor: |
843 | // The new value we will be contributing to the atomic operation is the |
844 | // old value times the parity of the number of active lanes. |
845 | Value *const Ctpop = B.CreateIntCast( |
846 | B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); |
847 | NewV = buildMul(B, LHS: V, RHS: B.CreateAnd(LHS: Ctpop, RHS: 1)); |
848 | break; |
849 | } |
850 | } |
851 | |
852 | // We only want a single lane to enter our new control flow, and we do this |
853 | // by checking if there are any active lanes below us. Only one lane will |
854 | // have 0 active lanes below us, so that will be the only one to progress. |
855 | Value *const Cond = B.CreateICmpEQ(LHS: Mbcnt, RHS: B.getInt32(C: 0)); |
856 | |
857 | // Store I's original basic block before we split the block. |
858 | BasicBlock *const OriginalBB = I.getParent(); |
859 | |
860 | // We need to introduce some new control flow to force a single lane to be |
861 | // active. We do this by splitting I's basic block at I, and introducing the |
862 | // new block such that: |
863 | // entry --> single_lane -\ |
864 | // \------------------> exit |
865 | Instruction *const SingleLaneTerminator = |
866 | SplitBlockAndInsertIfThen(Cond, SplitBefore: &I, Unreachable: false, BranchWeights: nullptr, DTU: &DTU, LI: nullptr); |
867 | |
868 | // At this point, we have split the I's block to allow one lane in wavefront |
869 | // to update the precomputed reduced value. Also, completed the codegen for |
870 | // new control flow i.e. iterative loop which perform reduction and scan using |
871 | // ComputeLoop and ComputeEnd. |
872 | // For the new control flow, we need to move branch instruction i.e. |
873 | // terminator created during SplitBlockAndInsertIfThen from I's block to |
874 | // ComputeEnd block. We also need to set up predecessor to next block when |
875 | // single lane done updating the final reduced value. |
876 | BasicBlock *Predecessor = nullptr; |
877 | if (ValDivergent && ScanImpl == ScanOptions::Iterative) { |
878 | // Move terminator from I's block to ComputeEnd block. |
879 | // |
880 | // OriginalBB is known to have a branch as terminator because |
881 | // SplitBlockAndInsertIfThen will have inserted one. |
882 | BranchInst *Terminator = cast<BranchInst>(Val: OriginalBB->getTerminator()); |
883 | B.SetInsertPoint(ComputeEnd); |
884 | Terminator->removeFromParent(); |
885 | B.Insert(I: Terminator); |
886 | |
887 | // Branch to ComputeLoop Block unconditionally from the I's block for |
888 | // iterative approach. |
889 | B.SetInsertPoint(OriginalBB); |
890 | B.CreateBr(Dest: ComputeLoop); |
891 | |
892 | // Update the dominator tree for new control flow. |
893 | SmallVector<DominatorTree::UpdateType, 6> DomTreeUpdates( |
894 | {{DominatorTree::Insert, OriginalBB, ComputeLoop}, |
895 | {DominatorTree::Insert, ComputeLoop, ComputeEnd}}); |
896 | |
897 | // We're moving the terminator from EntryBB to ComputeEnd, make sure we move |
898 | // the DT edges as well. |
899 | for (auto *Succ : Terminator->successors()) { |
900 | DomTreeUpdates.push_back(Elt: {DominatorTree::Insert, ComputeEnd, Succ}); |
901 | DomTreeUpdates.push_back(Elt: {DominatorTree::Delete, OriginalBB, Succ}); |
902 | } |
903 | |
904 | DTU.applyUpdates(Updates: DomTreeUpdates); |
905 | |
906 | Predecessor = ComputeEnd; |
907 | } else { |
908 | Predecessor = OriginalBB; |
909 | } |
910 | // Move the IR builder into single_lane next. |
911 | B.SetInsertPoint(SingleLaneTerminator); |
912 | |
913 | // Clone the original atomic operation into single lane, replacing the |
914 | // original value with our newly created one. |
915 | Instruction *const NewI = I.clone(); |
916 | B.Insert(I: NewI); |
917 | NewI->setOperand(i: ValIdx, Val: NewV); |
918 | |
919 | // Move the IR builder into exit next, and start inserting just before the |
920 | // original instruction. |
921 | B.SetInsertPoint(&I); |
922 | |
923 | if (NeedResult) { |
924 | // Create a PHI node to get our new atomic result into the exit block. |
925 | PHINode *const PHI = B.CreatePHI(Ty, NumReservedValues: 2); |
926 | PHI->addIncoming(V: PoisonValue::get(T: Ty), BB: Predecessor); |
927 | PHI->addIncoming(V: NewI, BB: SingleLaneTerminator->getParent()); |
928 | |
929 | // We need to broadcast the value who was the lowest active lane (the first |
930 | // lane) to all other lanes in the wavefront. We use an intrinsic for this, |
931 | // but have to handle 64-bit broadcasts with two calls to this intrinsic. |
932 | Value *BroadcastI = nullptr; |
933 | |
934 | if (TyBitWidth == 64) { |
935 | Value *CastedPhi = B.CreateBitCast(V: PHI, DestTy: IntNTy); |
936 | Value *const = B.CreateTrunc(V: CastedPhi, DestTy: Int32Ty); |
937 | Value *const = |
938 | B.CreateTrunc(V: B.CreateLShr(LHS: CastedPhi, RHS: 32), DestTy: Int32Ty); |
939 | CallInst *const ReadFirstLaneLo = |
940 | B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo); |
941 | CallInst *const ReadFirstLaneHi = |
942 | B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractHi); |
943 | Value *const PartialInsert = B.CreateInsertElement( |
944 | Vec: PoisonValue::get(T: VecTy), NewElt: ReadFirstLaneLo, Idx: B.getInt32(C: 0)); |
945 | Value *const Insert = |
946 | B.CreateInsertElement(Vec: PartialInsert, NewElt: ReadFirstLaneHi, Idx: B.getInt32(C: 1)); |
947 | BroadcastI = B.CreateBitCast(V: Insert, DestTy: Ty); |
948 | } else if (TyBitWidth == 32) { |
949 | Value *CastedPhi = B.CreateBitCast(V: PHI, DestTy: IntNTy); |
950 | BroadcastI = |
951 | B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, CastedPhi); |
952 | BroadcastI = B.CreateBitCast(V: BroadcastI, DestTy: Ty); |
953 | |
954 | } else { |
955 | llvm_unreachable("Unhandled atomic bit width" ); |
956 | } |
957 | |
958 | // Now that we have the result of our single atomic operation, we need to |
959 | // get our individual lane's slice into the result. We use the lane offset |
960 | // we previously calculated combined with the atomic result value we got |
961 | // from the first lane, to get our lane's index into the atomic result. |
962 | Value *LaneOffset = nullptr; |
963 | if (ValDivergent) { |
964 | if (ScanImpl == ScanOptions::DPP) { |
965 | LaneOffset = |
966 | B.CreateIntrinsic(Intrinsic::amdgcn_strict_wwm, Ty, ExclScan); |
967 | } else if (ScanImpl == ScanOptions::Iterative) { |
968 | LaneOffset = ExclScan; |
969 | } else { |
970 | llvm_unreachable("Atomic Optimzer is disabled for None strategy" ); |
971 | } |
972 | } else { |
973 | Mbcnt = isAtomicFloatingPointTy ? B.CreateUIToFP(V: Mbcnt, DestTy: Ty) |
974 | : B.CreateIntCast(V: Mbcnt, DestTy: Ty, isSigned: false); |
975 | switch (Op) { |
976 | default: |
977 | llvm_unreachable("Unhandled atomic op" ); |
978 | case AtomicRMWInst::Add: |
979 | case AtomicRMWInst::Sub: |
980 | LaneOffset = buildMul(B, LHS: V, RHS: Mbcnt); |
981 | break; |
982 | case AtomicRMWInst::And: |
983 | case AtomicRMWInst::Or: |
984 | case AtomicRMWInst::Max: |
985 | case AtomicRMWInst::Min: |
986 | case AtomicRMWInst::UMax: |
987 | case AtomicRMWInst::UMin: |
988 | case AtomicRMWInst::FMin: |
989 | case AtomicRMWInst::FMax: |
990 | LaneOffset = B.CreateSelect(C: Cond, True: Identity, False: V); |
991 | break; |
992 | case AtomicRMWInst::Xor: |
993 | LaneOffset = buildMul(B, LHS: V, RHS: B.CreateAnd(LHS: Mbcnt, RHS: 1)); |
994 | break; |
995 | case AtomicRMWInst::FAdd: |
996 | case AtomicRMWInst::FSub: { |
997 | LaneOffset = B.CreateFMul(L: V, R: Mbcnt); |
998 | break; |
999 | } |
1000 | } |
1001 | } |
1002 | Value *const Result = buildNonAtomicBinOp(B, Op, LHS: BroadcastI, RHS: LaneOffset); |
1003 | |
1004 | if (IsPixelShader) { |
1005 | // Need a final PHI to reconverge to above the helper lane branch mask. |
1006 | B.SetInsertPoint(TheBB: PixelExitBB, IP: PixelExitBB->getFirstNonPHIIt()); |
1007 | |
1008 | PHINode *const PHI = B.CreatePHI(Ty, NumReservedValues: 2); |
1009 | PHI->addIncoming(V: PoisonValue::get(T: Ty), BB: PixelEntryBB); |
1010 | PHI->addIncoming(V: Result, BB: I.getParent()); |
1011 | I.replaceAllUsesWith(V: PHI); |
1012 | } else { |
1013 | // Replace the original atomic instruction with the new one. |
1014 | I.replaceAllUsesWith(V: Result); |
1015 | } |
1016 | } |
1017 | |
1018 | // And delete the original. |
1019 | I.eraseFromParent(); |
1020 | } |
1021 | |
1022 | INITIALIZE_PASS_BEGIN(AMDGPUAtomicOptimizer, DEBUG_TYPE, |
1023 | "AMDGPU atomic optimizations" , false, false) |
1024 | INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass) |
1025 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) |
1026 | INITIALIZE_PASS_END(AMDGPUAtomicOptimizer, DEBUG_TYPE, |
1027 | "AMDGPU atomic optimizations" , false, false) |
1028 | |
1029 | FunctionPass *llvm::createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy) { |
1030 | return new AMDGPUAtomicOptimizer(ScanStrategy); |
1031 | } |
1032 | |