1 | //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// R600 Implementation of TargetInstrInfo. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "R600InstrInfo.h" |
15 | #include "AMDGPU.h" |
16 | #include "MCTargetDesc/R600MCTargetDesc.h" |
17 | #include "R600.h" |
18 | #include "R600Defines.h" |
19 | #include "R600Subtarget.h" |
20 | #include "llvm/ADT/SmallSet.h" |
21 | #include "llvm/CodeGen/MachineFrameInfo.h" |
22 | |
23 | using namespace llvm; |
24 | |
25 | #define GET_INSTRINFO_CTOR_DTOR |
26 | #include "R600GenDFAPacketizer.inc" |
27 | |
28 | #define GET_INSTRINFO_CTOR_DTOR |
29 | #define GET_INSTRMAP_INFO |
30 | #define GET_INSTRINFO_NAMED_OPS |
31 | #include "R600GenInstrInfo.inc" |
32 | |
33 | R600InstrInfo::R600InstrInfo(const R600Subtarget &ST) |
34 | : R600GenInstrInfo(-1, -1), RI(), ST(ST) {} |
35 | |
36 | bool R600InstrInfo::isVector(const MachineInstr &MI) const { |
37 | return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR; |
38 | } |
39 | |
40 | void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
41 | MachineBasicBlock::iterator MI, |
42 | const DebugLoc &DL, MCRegister DestReg, |
43 | MCRegister SrcReg, bool KillSrc) const { |
44 | unsigned VectorComponents = 0; |
45 | if ((R600::R600_Reg128RegClass.contains(DestReg) || |
46 | R600::R600_Reg128VerticalRegClass.contains(DestReg)) && |
47 | (R600::R600_Reg128RegClass.contains(SrcReg) || |
48 | R600::R600_Reg128VerticalRegClass.contains(SrcReg))) { |
49 | VectorComponents = 4; |
50 | } else if((R600::R600_Reg64RegClass.contains(DestReg) || |
51 | R600::R600_Reg64VerticalRegClass.contains(DestReg)) && |
52 | (R600::R600_Reg64RegClass.contains(SrcReg) || |
53 | R600::R600_Reg64VerticalRegClass.contains(SrcReg))) { |
54 | VectorComponents = 2; |
55 | } |
56 | |
57 | if (VectorComponents > 0) { |
58 | for (unsigned I = 0; I < VectorComponents; I++) { |
59 | unsigned SubRegIndex = R600RegisterInfo::getSubRegFromChannel(Channel: I); |
60 | buildDefaultInstruction(MBB, I: MI, R600::Opcode: MOV, |
61 | DstReg: RI.getSubReg(DestReg, SubRegIndex), |
62 | Src0Reg: RI.getSubReg(SrcReg, SubRegIndex)) |
63 | .addReg(DestReg, |
64 | RegState::Define | RegState::Implicit); |
65 | } |
66 | } else { |
67 | MachineInstr *NewMI = buildDefaultInstruction(MBB, I: MI, R600::Opcode: MOV, |
68 | DstReg: DestReg, Src0Reg: SrcReg); |
69 | NewMI->getOperand(getOperandIdx(*NewMI, R600::OpName::src0)) |
70 | .setIsKill(KillSrc); |
71 | } |
72 | } |
73 | |
74 | /// \returns true if \p MBBI can be moved into a new basic. |
75 | bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, |
76 | MachineBasicBlock::iterator MBBI) const { |
77 | for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(), |
78 | E = MBBI->operands_end(); I != E; ++I) { |
79 | if (I->isReg() && !I->getReg().isVirtual() && I->isUse() && |
80 | RI.isPhysRegLiveAcrossClauses(I->getReg())) |
81 | return false; |
82 | } |
83 | return true; |
84 | } |
85 | |
86 | bool R600InstrInfo::isMov(unsigned Opcode) const { |
87 | switch(Opcode) { |
88 | default: |
89 | return false; |
90 | case R600::MOV: |
91 | case R600::MOV_IMM_F32: |
92 | case R600::MOV_IMM_I32: |
93 | return true; |
94 | } |
95 | } |
96 | |
97 | bool R600InstrInfo::isReductionOp(unsigned Opcode) const { |
98 | return false; |
99 | } |
100 | |
101 | bool R600InstrInfo::isCubeOp(unsigned Opcode) const { |
102 | switch(Opcode) { |
103 | default: return false; |
104 | case R600::CUBE_r600_pseudo: |
105 | case R600::CUBE_r600_real: |
106 | case R600::CUBE_eg_pseudo: |
107 | case R600::CUBE_eg_real: |
108 | return true; |
109 | } |
110 | } |
111 | |
112 | bool R600InstrInfo::isALUInstr(unsigned Opcode) const { |
113 | unsigned TargetFlags = get(Opcode).TSFlags; |
114 | |
115 | return (TargetFlags & R600_InstFlag::ALU_INST); |
116 | } |
117 | |
118 | bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const { |
119 | unsigned TargetFlags = get(Opcode).TSFlags; |
120 | |
121 | return ((TargetFlags & R600_InstFlag::OP1) | |
122 | (TargetFlags & R600_InstFlag::OP2) | |
123 | (TargetFlags & R600_InstFlag::OP3)); |
124 | } |
125 | |
126 | bool R600InstrInfo::isLDSInstr(unsigned Opcode) const { |
127 | unsigned TargetFlags = get(Opcode).TSFlags; |
128 | |
129 | return ((TargetFlags & R600_InstFlag::LDS_1A) | |
130 | (TargetFlags & R600_InstFlag::LDS_1A1D) | |
131 | (TargetFlags & R600_InstFlag::LDS_1A2D)); |
132 | } |
133 | |
134 | bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const { |
135 | return isLDSInstr(Opcode) && getOperandIdx(Opcode, R600::OpName::dst) != -1; |
136 | } |
137 | |
138 | bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const { |
139 | if (isALUInstr(Opcode: MI.getOpcode())) |
140 | return true; |
141 | if (isVector(MI) || isCubeOp(Opcode: MI.getOpcode())) |
142 | return true; |
143 | switch (MI.getOpcode()) { |
144 | case R600::PRED_X: |
145 | case R600::INTERP_PAIR_XY: |
146 | case R600::INTERP_PAIR_ZW: |
147 | case R600::INTERP_VEC_LOAD: |
148 | case R600::COPY: |
149 | case R600::DOT_4: |
150 | return true; |
151 | default: |
152 | return false; |
153 | } |
154 | } |
155 | |
156 | bool R600InstrInfo::isTransOnly(unsigned Opcode) const { |
157 | if (ST.hasCaymanISA()) |
158 | return false; |
159 | return (get(Opcode).getSchedClass() == R600::Sched::TransALU); |
160 | } |
161 | |
162 | bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const { |
163 | return isTransOnly(Opcode: MI.getOpcode()); |
164 | } |
165 | |
166 | bool R600InstrInfo::isVectorOnly(unsigned Opcode) const { |
167 | return (get(Opcode).getSchedClass() == R600::Sched::VecALU); |
168 | } |
169 | |
170 | bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const { |
171 | return isVectorOnly(Opcode: MI.getOpcode()); |
172 | } |
173 | |
174 | bool R600InstrInfo::isExport(unsigned Opcode) const { |
175 | return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT); |
176 | } |
177 | |
178 | bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { |
179 | return ST.hasVertexCache() && IS_VTX(get(Opcode)); |
180 | } |
181 | |
182 | bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const { |
183 | const MachineFunction *MF = MI.getParent()->getParent(); |
184 | return !AMDGPU::isCompute(CC: MF->getFunction().getCallingConv()) && |
185 | usesVertexCache(Opcode: MI.getOpcode()); |
186 | } |
187 | |
188 | bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { |
189 | return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode)); |
190 | } |
191 | |
192 | bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const { |
193 | const MachineFunction *MF = MI.getParent()->getParent(); |
194 | return (AMDGPU::isCompute(CC: MF->getFunction().getCallingConv()) && |
195 | usesVertexCache(Opcode: MI.getOpcode())) || |
196 | usesTextureCache(Opcode: MI.getOpcode()); |
197 | } |
198 | |
199 | bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const { |
200 | switch (Opcode) { |
201 | case R600::KILLGT: |
202 | case R600::GROUP_BARRIER: |
203 | return true; |
204 | default: |
205 | return false; |
206 | } |
207 | } |
208 | |
209 | bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const { |
210 | return MI.findRegisterUseOperandIdx(R600::Reg: AR_X, TRI: &RI, isKill: false) != -1; |
211 | } |
212 | |
213 | bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const { |
214 | return MI.findRegisterDefOperandIdx(R600::Reg: AR_X, TRI: &RI, isDead: false, Overlap: false) != -1; |
215 | } |
216 | |
217 | bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const { |
218 | if (!isALUInstr(Opcode: MI.getOpcode())) { |
219 | return false; |
220 | } |
221 | for (MachineInstr::const_mop_iterator I = MI.operands_begin(), |
222 | E = MI.operands_end(); |
223 | I != E; ++I) { |
224 | if (!I->isReg() || !I->isUse() || I->getReg().isVirtual()) |
225 | continue; |
226 | |
227 | if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg())) |
228 | return true; |
229 | } |
230 | return false; |
231 | } |
232 | |
233 | int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const { |
234 | static const unsigned SrcSelTable[][2] = { |
235 | {R600::OpName::src0, R600::OpName::src0_sel}, |
236 | {R600::OpName::src1, R600::OpName::src1_sel}, |
237 | {R600::OpName::src2, R600::OpName::src2_sel}, |
238 | {R600::OpName::src0_X, R600::OpName::src0_sel_X}, |
239 | {R600::OpName::src0_Y, R600::OpName::src0_sel_Y}, |
240 | {R600::OpName::src0_Z, R600::OpName::src0_sel_Z}, |
241 | {R600::OpName::src0_W, R600::OpName::src0_sel_W}, |
242 | {R600::OpName::src1_X, R600::OpName::src1_sel_X}, |
243 | {R600::OpName::src1_Y, R600::OpName::src1_sel_Y}, |
244 | {R600::OpName::src1_Z, R600::OpName::src1_sel_Z}, |
245 | {R600::OpName::src1_W, R600::OpName::src1_sel_W} |
246 | }; |
247 | |
248 | for (const auto &Row : SrcSelTable) { |
249 | if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) { |
250 | return getOperandIdx(Opcode, Row[1]); |
251 | } |
252 | } |
253 | return -1; |
254 | } |
255 | |
256 | SmallVector<std::pair<MachineOperand *, int64_t>, 3> |
257 | R600InstrInfo::getSrcs(MachineInstr &MI) const { |
258 | SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result; |
259 | |
260 | if (MI.getOpcode() == R600::DOT_4) { |
261 | static const unsigned OpTable[8][2] = { |
262 | {R600::OpName::src0_X, R600::OpName::src0_sel_X}, |
263 | {R600::OpName::src0_Y, R600::OpName::src0_sel_Y}, |
264 | {R600::OpName::src0_Z, R600::OpName::src0_sel_Z}, |
265 | {R600::OpName::src0_W, R600::OpName::src0_sel_W}, |
266 | {R600::OpName::src1_X, R600::OpName::src1_sel_X}, |
267 | {R600::OpName::src1_Y, R600::OpName::src1_sel_Y}, |
268 | {R600::OpName::src1_Z, R600::OpName::src1_sel_Z}, |
269 | {R600::OpName::src1_W, R600::OpName::src1_sel_W}, |
270 | }; |
271 | |
272 | for (const auto &Op : OpTable) { |
273 | MachineOperand &MO = MI.getOperand(i: getOperandIdx(Opcode: MI.getOpcode(), Op: Op[0])); |
274 | Register Reg = MO.getReg(); |
275 | if (Reg == R600::ALU_CONST) { |
276 | MachineOperand &Sel = |
277 | MI.getOperand(i: getOperandIdx(Opcode: MI.getOpcode(), Op: Op[1])); |
278 | Result.push_back(Elt: std::pair(&MO, Sel.getImm())); |
279 | continue; |
280 | } |
281 | } |
282 | return Result; |
283 | } |
284 | |
285 | static const unsigned OpTable[3][2] = { |
286 | {R600::OpName::src0, R600::OpName::src0_sel}, |
287 | {R600::OpName::src1, R600::OpName::src1_sel}, |
288 | {R600::OpName::src2, R600::OpName::src2_sel}, |
289 | }; |
290 | |
291 | for (const auto &Op : OpTable) { |
292 | int SrcIdx = getOperandIdx(Opcode: MI.getOpcode(), Op: Op[0]); |
293 | if (SrcIdx < 0) |
294 | break; |
295 | MachineOperand &MO = MI.getOperand(i: SrcIdx); |
296 | Register Reg = MO.getReg(); |
297 | if (Reg == R600::ALU_CONST) { |
298 | MachineOperand &Sel = MI.getOperand(i: getOperandIdx(Opcode: MI.getOpcode(), Op: Op[1])); |
299 | Result.push_back(Elt: std::pair(&MO, Sel.getImm())); |
300 | continue; |
301 | } |
302 | if (Reg == R600::ALU_LITERAL_X) { |
303 | MachineOperand &Operand = |
304 | MI.getOperand(getOperandIdx(MI.getOpcode(), R600::OpName::literal)); |
305 | if (Operand.isImm()) { |
306 | Result.push_back(Elt: std::pair(&MO, Operand.getImm())); |
307 | continue; |
308 | } |
309 | assert(Operand.isGlobal()); |
310 | } |
311 | Result.push_back(Elt: std::pair(&MO, 0)); |
312 | } |
313 | return Result; |
314 | } |
315 | |
316 | std::vector<std::pair<int, unsigned>> |
317 | R600InstrInfo::(MachineInstr &MI, |
318 | const DenseMap<unsigned, unsigned> &PV, |
319 | unsigned &ConstCount) const { |
320 | ConstCount = 0; |
321 | const std::pair<int, unsigned> DummyPair(-1, 0); |
322 | std::vector<std::pair<int, unsigned>> Result; |
323 | unsigned i = 0; |
324 | for (const auto &Src : getSrcs(MI)) { |
325 | ++i; |
326 | Register Reg = Src.first->getReg(); |
327 | int Index = RI.getEncodingValue(Reg) & 0xff; |
328 | if (Reg == R600::OQAP) { |
329 | Result.push_back(x: std::pair(Index, 0U)); |
330 | } |
331 | if (PV.contains(Val: Reg)) { |
332 | // 255 is used to tells its a PS/PV reg |
333 | Result.push_back(x: std::pair(255, 0U)); |
334 | continue; |
335 | } |
336 | if (Index > 127) { |
337 | ConstCount++; |
338 | Result.push_back(x: DummyPair); |
339 | continue; |
340 | } |
341 | unsigned Chan = RI.getHWRegChan(reg: Reg); |
342 | Result.push_back(x: std::pair(Index, Chan)); |
343 | } |
344 | for (; i < 3; ++i) |
345 | Result.push_back(x: DummyPair); |
346 | return Result; |
347 | } |
348 | |
349 | static std::vector<std::pair<int, unsigned>> |
350 | Swizzle(std::vector<std::pair<int, unsigned>> Src, |
351 | R600InstrInfo::BankSwizzle Swz) { |
352 | if (Src[0] == Src[1]) |
353 | Src[1].first = -1; |
354 | switch (Swz) { |
355 | case R600InstrInfo::ALU_VEC_012_SCL_210: |
356 | break; |
357 | case R600InstrInfo::ALU_VEC_021_SCL_122: |
358 | std::swap(x&: Src[1], y&: Src[2]); |
359 | break; |
360 | case R600InstrInfo::ALU_VEC_102_SCL_221: |
361 | std::swap(x&: Src[0], y&: Src[1]); |
362 | break; |
363 | case R600InstrInfo::ALU_VEC_120_SCL_212: |
364 | std::swap(x&: Src[0], y&: Src[1]); |
365 | std::swap(x&: Src[0], y&: Src[2]); |
366 | break; |
367 | case R600InstrInfo::ALU_VEC_201: |
368 | std::swap(x&: Src[0], y&: Src[2]); |
369 | std::swap(x&: Src[0], y&: Src[1]); |
370 | break; |
371 | case R600InstrInfo::ALU_VEC_210: |
372 | std::swap(x&: Src[0], y&: Src[2]); |
373 | break; |
374 | } |
375 | return Src; |
376 | } |
377 | |
378 | static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) { |
379 | assert(Op < 3 && "Out of range swizzle index" ); |
380 | switch (Swz) { |
381 | case R600InstrInfo::ALU_VEC_012_SCL_210: { |
382 | unsigned Cycles[3] = { 2, 1, 0}; |
383 | return Cycles[Op]; |
384 | } |
385 | case R600InstrInfo::ALU_VEC_021_SCL_122: { |
386 | unsigned Cycles[3] = { 1, 2, 2}; |
387 | return Cycles[Op]; |
388 | } |
389 | case R600InstrInfo::ALU_VEC_120_SCL_212: { |
390 | unsigned Cycles[3] = { 2, 1, 2}; |
391 | return Cycles[Op]; |
392 | } |
393 | case R600InstrInfo::ALU_VEC_102_SCL_221: { |
394 | unsigned Cycles[3] = { 2, 2, 1}; |
395 | return Cycles[Op]; |
396 | } |
397 | default: |
398 | llvm_unreachable("Wrong Swizzle for Trans Slot" ); |
399 | } |
400 | } |
401 | |
402 | /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed |
403 | /// in the same Instruction Group while meeting read port limitations given a |
404 | /// Swz swizzle sequence. |
405 | unsigned R600InstrInfo::isLegalUpTo( |
406 | const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs, |
407 | const std::vector<R600InstrInfo::BankSwizzle> &Swz, |
408 | const std::vector<std::pair<int, unsigned>> &TransSrcs, |
409 | R600InstrInfo::BankSwizzle TransSwz) const { |
410 | int Vector[4][3]; |
411 | memset(s: Vector, c: -1, n: sizeof(Vector)); |
412 | for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) { |
413 | const std::vector<std::pair<int, unsigned>> &Srcs = |
414 | Swizzle(Src: IGSrcs[i], Swz: Swz[i]); |
415 | for (unsigned j = 0; j < 3; j++) { |
416 | const std::pair<int, unsigned> &Src = Srcs[j]; |
417 | if (Src.first < 0 || Src.first == 255) |
418 | continue; |
419 | if (Src.first == GET_REG_INDEX(RI.getEncodingValue(R600::OQAP))) { |
420 | if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 && |
421 | Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) { |
422 | // The value from output queue A (denoted by register OQAP) can |
423 | // only be fetched during the first cycle. |
424 | return false; |
425 | } |
426 | // OQAP does not count towards the normal read port restrictions |
427 | continue; |
428 | } |
429 | if (Vector[Src.second][j] < 0) |
430 | Vector[Src.second][j] = Src.first; |
431 | if (Vector[Src.second][j] != Src.first) |
432 | return i; |
433 | } |
434 | } |
435 | // Now check Trans Alu |
436 | for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) { |
437 | const std::pair<int, unsigned> &Src = TransSrcs[i]; |
438 | unsigned Cycle = getTransSwizzle(Swz: TransSwz, Op: i); |
439 | if (Src.first < 0) |
440 | continue; |
441 | if (Src.first == 255) |
442 | continue; |
443 | if (Vector[Src.second][Cycle] < 0) |
444 | Vector[Src.second][Cycle] = Src.first; |
445 | if (Vector[Src.second][Cycle] != Src.first) |
446 | return IGSrcs.size() - 1; |
447 | } |
448 | return IGSrcs.size(); |
449 | } |
450 | |
451 | /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next |
452 | /// (in lexicographic term) swizzle sequence assuming that all swizzles after |
453 | /// Idx can be skipped |
454 | static bool |
455 | NextPossibleSolution( |
456 | std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, |
457 | unsigned Idx) { |
458 | assert(Idx < SwzCandidate.size()); |
459 | int ResetIdx = Idx; |
460 | while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210) |
461 | ResetIdx --; |
462 | for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) { |
463 | SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210; |
464 | } |
465 | if (ResetIdx == -1) |
466 | return false; |
467 | int NextSwizzle = SwzCandidate[ResetIdx] + 1; |
468 | SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle; |
469 | return true; |
470 | } |
471 | |
472 | /// Enumerate all possible Swizzle sequence to find one that can meet all |
473 | /// read port requirements. |
474 | bool R600InstrInfo::FindSwizzleForVectorSlot( |
475 | const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs, |
476 | std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, |
477 | const std::vector<std::pair<int, unsigned>> &TransSrcs, |
478 | R600InstrInfo::BankSwizzle TransSwz) const { |
479 | unsigned ValidUpTo = 0; |
480 | do { |
481 | ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz); |
482 | if (ValidUpTo == IGSrcs.size()) |
483 | return true; |
484 | } while (NextPossibleSolution(SwzCandidate, Idx: ValidUpTo)); |
485 | return false; |
486 | } |
487 | |
488 | /// Instructions in Trans slot can't read gpr at cycle 0 if they also read |
489 | /// a const, and can't read a gpr at cycle 1 if they read 2 const. |
490 | static bool |
491 | isConstCompatible(R600InstrInfo::BankSwizzle TransSwz, |
492 | const std::vector<std::pair<int, unsigned>> &TransOps, |
493 | unsigned ConstCount) { |
494 | // TransALU can't read 3 constants |
495 | if (ConstCount > 2) |
496 | return false; |
497 | for (unsigned i = 0, e = TransOps.size(); i < e; ++i) { |
498 | const std::pair<int, unsigned> &Src = TransOps[i]; |
499 | unsigned Cycle = getTransSwizzle(Swz: TransSwz, Op: i); |
500 | if (Src.first < 0) |
501 | continue; |
502 | if (ConstCount > 0 && Cycle == 0) |
503 | return false; |
504 | if (ConstCount > 1 && Cycle == 1) |
505 | return false; |
506 | } |
507 | return true; |
508 | } |
509 | |
510 | bool |
511 | R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG, |
512 | const DenseMap<unsigned, unsigned> &PV, |
513 | std::vector<BankSwizzle> &ValidSwizzle, |
514 | bool isLastAluTrans) |
515 | const { |
516 | //Todo : support shared src0 - src1 operand |
517 | |
518 | std::vector<std::vector<std::pair<int, unsigned>>> IGSrcs; |
519 | ValidSwizzle.clear(); |
520 | unsigned ConstCount; |
521 | BankSwizzle TransBS = ALU_VEC_012_SCL_210; |
522 | for (MachineInstr *MI : IG) { |
523 | IGSrcs.push_back(ExtractSrcs(*MI, PV, ConstCount)); |
524 | unsigned Op = getOperandIdx(MI->getOpcode(), R600::OpName::bank_swizzle); |
525 | ValidSwizzle.push_back( |
526 | x: (R600InstrInfo::BankSwizzle)MI->getOperand(i: Op).getImm()); |
527 | } |
528 | std::vector<std::pair<int, unsigned>> TransOps; |
529 | if (!isLastAluTrans) |
530 | return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS); |
531 | |
532 | TransOps = std::move(IGSrcs.back()); |
533 | IGSrcs.pop_back(); |
534 | ValidSwizzle.pop_back(); |
535 | |
536 | static const R600InstrInfo::BankSwizzle TransSwz[] = { |
537 | ALU_VEC_012_SCL_210, |
538 | ALU_VEC_021_SCL_122, |
539 | ALU_VEC_120_SCL_212, |
540 | ALU_VEC_102_SCL_221 |
541 | }; |
542 | for (R600InstrInfo::BankSwizzle TransBS : TransSwz) { |
543 | if (!isConstCompatible(TransSwz: TransBS, TransOps, ConstCount)) |
544 | continue; |
545 | bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, |
546 | TransBS); |
547 | if (Result) { |
548 | ValidSwizzle.push_back(x: TransBS); |
549 | return true; |
550 | } |
551 | } |
552 | |
553 | return false; |
554 | } |
555 | |
556 | bool |
557 | R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) |
558 | const { |
559 | assert (Consts.size() <= 12 && "Too many operands in instructions group" ); |
560 | unsigned Pair1 = 0, Pair2 = 0; |
561 | for (unsigned Const : Consts) { |
562 | unsigned ReadConstHalf = Const & 2; |
563 | unsigned ReadConstIndex = Const & (~3); |
564 | unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf; |
565 | if (!Pair1) { |
566 | Pair1 = ReadHalfConst; |
567 | continue; |
568 | } |
569 | if (Pair1 == ReadHalfConst) |
570 | continue; |
571 | if (!Pair2) { |
572 | Pair2 = ReadHalfConst; |
573 | continue; |
574 | } |
575 | if (Pair2 != ReadHalfConst) |
576 | return false; |
577 | } |
578 | return true; |
579 | } |
580 | |
581 | bool |
582 | R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs) |
583 | const { |
584 | std::vector<unsigned> Consts; |
585 | SmallSet<int64_t, 4> Literals; |
586 | for (MachineInstr *MI : MIs) { |
587 | if (!isALUInstr(Opcode: MI->getOpcode())) |
588 | continue; |
589 | |
590 | for (const auto &Src : getSrcs(MI&: *MI)) { |
591 | if (Src.first->getReg() == R600::ALU_LITERAL_X) |
592 | Literals.insert(V: Src.second); |
593 | if (Literals.size() > 4) |
594 | return false; |
595 | if (Src.first->getReg() == R600::ALU_CONST) |
596 | Consts.push_back(x: Src.second); |
597 | if (R600::R600_KC0RegClass.contains(Src.first->getReg()) || |
598 | R600::R600_KC1RegClass.contains(Src.first->getReg())) { |
599 | unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff; |
600 | unsigned Chan = RI.getHWRegChan(reg: Src.first->getReg()); |
601 | Consts.push_back(x: (Index << 2) | Chan); |
602 | } |
603 | } |
604 | } |
605 | return fitsConstReadLimitations(Consts); |
606 | } |
607 | |
608 | DFAPacketizer * |
609 | R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const { |
610 | const InstrItineraryData *II = STI.getInstrItineraryData(); |
611 | return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II); |
612 | } |
613 | |
614 | static bool |
615 | isPredicateSetter(unsigned Opcode) { |
616 | switch (Opcode) { |
617 | case R600::PRED_X: |
618 | return true; |
619 | default: |
620 | return false; |
621 | } |
622 | } |
623 | |
624 | static MachineInstr * |
625 | findFirstPredicateSetterFrom(MachineBasicBlock &MBB, |
626 | MachineBasicBlock::iterator I) { |
627 | while (I != MBB.begin()) { |
628 | --I; |
629 | MachineInstr &MI = *I; |
630 | if (isPredicateSetter(Opcode: MI.getOpcode())) |
631 | return &MI; |
632 | } |
633 | |
634 | return nullptr; |
635 | } |
636 | |
637 | static |
638 | bool isJump(unsigned Opcode) { |
639 | return Opcode == R600::JUMP || Opcode == R600::JUMP_COND; |
640 | } |
641 | |
642 | static bool isBranch(unsigned Opcode) { |
643 | return Opcode == R600::BRANCH || Opcode == R600::BRANCH_COND_i32 || |
644 | Opcode == R600::BRANCH_COND_f32; |
645 | } |
646 | |
647 | bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
648 | MachineBasicBlock *&TBB, |
649 | MachineBasicBlock *&FBB, |
650 | SmallVectorImpl<MachineOperand> &Cond, |
651 | bool AllowModify) const { |
652 | // Most of the following comes from the ARM implementation of analyzeBranch |
653 | |
654 | // If the block has no terminators, it just falls into the block after it. |
655 | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
656 | if (I == MBB.end()) |
657 | return false; |
658 | |
659 | // R600::BRANCH* instructions are only available after isel and are not |
660 | // handled |
661 | if (isBranch(Opcode: I->getOpcode())) |
662 | return true; |
663 | if (!isJump(Opcode: I->getOpcode())) { |
664 | return false; |
665 | } |
666 | |
667 | // Remove successive JUMP |
668 | while (I != MBB.begin() && std::prev(I)->getOpcode() == R600::JUMP) { |
669 | MachineBasicBlock::iterator PriorI = std::prev(x: I); |
670 | if (AllowModify) |
671 | I->removeFromParent(); |
672 | I = PriorI; |
673 | } |
674 | MachineInstr &LastInst = *I; |
675 | |
676 | // If there is only one terminator instruction, process it. |
677 | unsigned LastOpc = LastInst.getOpcode(); |
678 | if (I == MBB.begin() || !isJump(Opcode: (--I)->getOpcode())) { |
679 | if (LastOpc == R600::JUMP) { |
680 | TBB = LastInst.getOperand(i: 0).getMBB(); |
681 | return false; |
682 | } else if (LastOpc == R600::JUMP_COND) { |
683 | auto predSet = I; |
684 | while (!isPredicateSetter(Opcode: predSet->getOpcode())) { |
685 | predSet = --I; |
686 | } |
687 | TBB = LastInst.getOperand(i: 0).getMBB(); |
688 | Cond.push_back(Elt: predSet->getOperand(i: 1)); |
689 | Cond.push_back(Elt: predSet->getOperand(i: 2)); |
690 | Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false)); |
691 | return false; |
692 | } |
693 | return true; // Can't handle indirect branch. |
694 | } |
695 | |
696 | // Get the instruction before it if it is a terminator. |
697 | MachineInstr &SecondLastInst = *I; |
698 | unsigned SecondLastOpc = SecondLastInst.getOpcode(); |
699 | |
700 | // If the block ends with a B and a Bcc, handle it. |
701 | if (SecondLastOpc == R600::JUMP_COND && LastOpc == R600::JUMP) { |
702 | auto predSet = --I; |
703 | while (!isPredicateSetter(Opcode: predSet->getOpcode())) { |
704 | predSet = --I; |
705 | } |
706 | TBB = SecondLastInst.getOperand(i: 0).getMBB(); |
707 | FBB = LastInst.getOperand(i: 0).getMBB(); |
708 | Cond.push_back(Elt: predSet->getOperand(i: 1)); |
709 | Cond.push_back(Elt: predSet->getOperand(i: 2)); |
710 | Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false)); |
711 | return false; |
712 | } |
713 | |
714 | // Otherwise, can't handle this. |
715 | return true; |
716 | } |
717 | |
718 | static |
719 | MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) { |
720 | for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend(); |
721 | It != E; ++It) { |
722 | if (It->getOpcode() == R600::CF_ALU || |
723 | It->getOpcode() == R600::CF_ALU_PUSH_BEFORE) |
724 | return It.getReverse(); |
725 | } |
726 | return MBB.end(); |
727 | } |
728 | |
729 | unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB, |
730 | MachineBasicBlock *TBB, |
731 | MachineBasicBlock *FBB, |
732 | ArrayRef<MachineOperand> Cond, |
733 | const DebugLoc &DL, |
734 | int *BytesAdded) const { |
735 | assert(TBB && "insertBranch must not be told to insert a fallthrough" ); |
736 | assert(!BytesAdded && "code size not handled" ); |
737 | |
738 | if (!FBB) { |
739 | if (Cond.empty()) { |
740 | BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(TBB); |
741 | return 1; |
742 | } else { |
743 | MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, I: MBB.end()); |
744 | assert(PredSet && "No previous predicate !" ); |
745 | addFlag(MI&: *PredSet, Operand: 0, MO_FLAG_PUSH); |
746 | PredSet->getOperand(i: 2).setImm(Cond[1].getImm()); |
747 | |
748 | BuildMI(&MBB, DL, get(R600::JUMP_COND)) |
749 | .addMBB(TBB) |
750 | .addReg(R600::PREDICATE_BIT, RegState::Kill); |
751 | MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); |
752 | if (CfAlu == MBB.end()) |
753 | return 1; |
754 | assert (CfAlu->getOpcode() == R600::CF_ALU); |
755 | CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE)); |
756 | return 1; |
757 | } |
758 | } else { |
759 | MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, I: MBB.end()); |
760 | assert(PredSet && "No previous predicate !" ); |
761 | addFlag(MI&: *PredSet, Operand: 0, MO_FLAG_PUSH); |
762 | PredSet->getOperand(i: 2).setImm(Cond[1].getImm()); |
763 | BuildMI(&MBB, DL, get(R600::JUMP_COND)) |
764 | .addMBB(TBB) |
765 | .addReg(R600::PREDICATE_BIT, RegState::Kill); |
766 | BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(FBB); |
767 | MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); |
768 | if (CfAlu == MBB.end()) |
769 | return 2; |
770 | assert (CfAlu->getOpcode() == R600::CF_ALU); |
771 | CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE)); |
772 | return 2; |
773 | } |
774 | } |
775 | |
776 | unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB, |
777 | int *BytesRemoved) const { |
778 | assert(!BytesRemoved && "code size not handled" ); |
779 | |
780 | // Note : we leave PRED* instructions there. |
781 | // They may be needed when predicating instructions. |
782 | |
783 | MachineBasicBlock::iterator I = MBB.end(); |
784 | |
785 | if (I == MBB.begin()) { |
786 | return 0; |
787 | } |
788 | --I; |
789 | switch (I->getOpcode()) { |
790 | default: |
791 | return 0; |
792 | case R600::JUMP_COND: { |
793 | MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); |
794 | clearFlag(MI&: *predSet, Operand: 0, MO_FLAG_PUSH); |
795 | I->eraseFromParent(); |
796 | MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); |
797 | if (CfAlu == MBB.end()) |
798 | break; |
799 | assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE); |
800 | CfAlu->setDesc(get(R600::CF_ALU)); |
801 | break; |
802 | } |
803 | case R600::JUMP: |
804 | I->eraseFromParent(); |
805 | break; |
806 | } |
807 | I = MBB.end(); |
808 | |
809 | if (I == MBB.begin()) { |
810 | return 1; |
811 | } |
812 | --I; |
813 | switch (I->getOpcode()) { |
814 | // FIXME: only one case?? |
815 | default: |
816 | return 1; |
817 | case R600::JUMP_COND: { |
818 | MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); |
819 | clearFlag(MI&: *predSet, Operand: 0, MO_FLAG_PUSH); |
820 | I->eraseFromParent(); |
821 | MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); |
822 | if (CfAlu == MBB.end()) |
823 | break; |
824 | assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE); |
825 | CfAlu->setDesc(get(R600::CF_ALU)); |
826 | break; |
827 | } |
828 | case R600::JUMP: |
829 | I->eraseFromParent(); |
830 | break; |
831 | } |
832 | return 2; |
833 | } |
834 | |
835 | bool R600InstrInfo::isPredicated(const MachineInstr &MI) const { |
836 | int idx = MI.findFirstPredOperandIdx(); |
837 | if (idx < 0) |
838 | return false; |
839 | |
840 | Register Reg = MI.getOperand(i: idx).getReg(); |
841 | switch (Reg) { |
842 | default: return false; |
843 | case R600::PRED_SEL_ONE: |
844 | case R600::PRED_SEL_ZERO: |
845 | case R600::PREDICATE_BIT: |
846 | return true; |
847 | } |
848 | } |
849 | |
850 | bool R600InstrInfo::isPredicable(const MachineInstr &MI) const { |
851 | // XXX: KILL* instructions can be predicated, but they must be the last |
852 | // instruction in a clause, so this means any instructions after them cannot |
853 | // be predicated. Until we have proper support for instruction clauses in the |
854 | // backend, we will mark KILL* instructions as unpredicable. |
855 | |
856 | if (MI.getOpcode() == R600::KILLGT) { |
857 | return false; |
858 | } else if (MI.getOpcode() == R600::CF_ALU) { |
859 | // If the clause start in the middle of MBB then the MBB has more |
860 | // than a single clause, unable to predicate several clauses. |
861 | if (MI.getParent()->begin() != MachineBasicBlock::const_iterator(MI)) |
862 | return false; |
863 | // TODO: We don't support KC merging atm |
864 | return MI.getOperand(i: 3).getImm() == 0 && MI.getOperand(i: 4).getImm() == 0; |
865 | } else if (isVector(MI)) { |
866 | return false; |
867 | } else { |
868 | return TargetInstrInfo::isPredicable(MI); |
869 | } |
870 | } |
871 | |
872 | bool |
873 | R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB, |
874 | unsigned NumCycles, |
875 | unsigned , |
876 | BranchProbability Probability) const{ |
877 | return true; |
878 | } |
879 | |
880 | bool |
881 | R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, |
882 | unsigned NumTCycles, |
883 | unsigned , |
884 | MachineBasicBlock &FMBB, |
885 | unsigned NumFCycles, |
886 | unsigned , |
887 | BranchProbability Probability) const { |
888 | return true; |
889 | } |
890 | |
891 | bool |
892 | R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB, |
893 | unsigned NumCycles, |
894 | BranchProbability Probability) |
895 | const { |
896 | return true; |
897 | } |
898 | |
899 | bool |
900 | R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, |
901 | MachineBasicBlock &FMBB) const { |
902 | return false; |
903 | } |
904 | |
905 | bool |
906 | R600InstrInfo::reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
907 | MachineOperand &MO = Cond[1]; |
908 | switch (MO.getImm()) { |
909 | case R600::PRED_SETE_INT: |
910 | MO.setImm(R600::PRED_SETNE_INT); |
911 | break; |
912 | case R600::PRED_SETNE_INT: |
913 | MO.setImm(R600::PRED_SETE_INT); |
914 | break; |
915 | case R600::PRED_SETE: |
916 | MO.setImm(R600::PRED_SETNE); |
917 | break; |
918 | case R600::PRED_SETNE: |
919 | MO.setImm(R600::PRED_SETE); |
920 | break; |
921 | default: |
922 | return true; |
923 | } |
924 | |
925 | MachineOperand &MO2 = Cond[2]; |
926 | switch (MO2.getReg()) { |
927 | case R600::PRED_SEL_ZERO: |
928 | MO2.setReg(R600::PRED_SEL_ONE); |
929 | break; |
930 | case R600::PRED_SEL_ONE: |
931 | MO2.setReg(R600::PRED_SEL_ZERO); |
932 | break; |
933 | default: |
934 | return true; |
935 | } |
936 | return false; |
937 | } |
938 | |
939 | bool R600InstrInfo::ClobbersPredicate(MachineInstr &MI, |
940 | std::vector<MachineOperand> &Pred, |
941 | bool SkipDead) const { |
942 | return isPredicateSetter(Opcode: MI.getOpcode()); |
943 | } |
944 | |
945 | bool R600InstrInfo::PredicateInstruction(MachineInstr &MI, |
946 | ArrayRef<MachineOperand> Pred) const { |
947 | int PIdx = MI.findFirstPredOperandIdx(); |
948 | |
949 | if (MI.getOpcode() == R600::CF_ALU) { |
950 | MI.getOperand(i: 8).setImm(0); |
951 | return true; |
952 | } |
953 | |
954 | if (MI.getOpcode() == R600::DOT_4) { |
955 | MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_X)) |
956 | .setReg(Pred[2].getReg()); |
957 | MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Y)) |
958 | .setReg(Pred[2].getReg()); |
959 | MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Z)) |
960 | .setReg(Pred[2].getReg()); |
961 | MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_W)) |
962 | .setReg(Pred[2].getReg()); |
963 | MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); |
964 | MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit); |
965 | return true; |
966 | } |
967 | |
968 | if (PIdx != -1) { |
969 | MachineOperand &PMO = MI.getOperand(i: PIdx); |
970 | PMO.setReg(Pred[2].getReg()); |
971 | MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); |
972 | MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit); |
973 | return true; |
974 | } |
975 | |
976 | return false; |
977 | } |
978 | |
979 | unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const { |
980 | return 2; |
981 | } |
982 | |
983 | unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData, |
984 | const MachineInstr &, |
985 | unsigned *PredCost) const { |
986 | if (PredCost) |
987 | *PredCost = 2; |
988 | return 2; |
989 | } |
990 | |
991 | unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex, |
992 | unsigned Channel) const { |
993 | assert(Channel == 0); |
994 | return RegIndex; |
995 | } |
996 | |
997 | bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
998 | switch (MI.getOpcode()) { |
999 | default: { |
1000 | MachineBasicBlock *MBB = MI.getParent(); |
1001 | int OffsetOpIdx = |
1002 | R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::addr); |
1003 | // addr is a custom operand with multiple MI operands, and only the |
1004 | // first MI operand is given a name. |
1005 | int RegOpIdx = OffsetOpIdx + 1; |
1006 | int ChanOpIdx = |
1007 | R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::chan); |
1008 | if (isRegisterLoad(MI)) { |
1009 | int DstOpIdx = |
1010 | R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::dst); |
1011 | unsigned RegIndex = MI.getOperand(i: RegOpIdx).getImm(); |
1012 | unsigned Channel = MI.getOperand(i: ChanOpIdx).getImm(); |
1013 | unsigned Address = calculateIndirectAddress(RegIndex, Channel); |
1014 | Register OffsetReg = MI.getOperand(i: OffsetOpIdx).getReg(); |
1015 | if (OffsetReg == R600::INDIRECT_BASE_ADDR) { |
1016 | buildMovInstr(MBB, I: MI, DstReg: MI.getOperand(i: DstOpIdx).getReg(), |
1017 | SrcReg: getIndirectAddrRegClass()->getRegister(i: Address)); |
1018 | } else { |
1019 | buildIndirectRead(MBB, I: MI, ValueReg: MI.getOperand(i: DstOpIdx).getReg(), Address, |
1020 | OffsetReg); |
1021 | } |
1022 | } else if (isRegisterStore(MI)) { |
1023 | int ValOpIdx = |
1024 | R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::val); |
1025 | unsigned RegIndex = MI.getOperand(i: RegOpIdx).getImm(); |
1026 | unsigned Channel = MI.getOperand(i: ChanOpIdx).getImm(); |
1027 | unsigned Address = calculateIndirectAddress(RegIndex, Channel); |
1028 | Register OffsetReg = MI.getOperand(i: OffsetOpIdx).getReg(); |
1029 | if (OffsetReg == R600::INDIRECT_BASE_ADDR) { |
1030 | buildMovInstr(MBB, I: MI, DstReg: getIndirectAddrRegClass()->getRegister(i: Address), |
1031 | SrcReg: MI.getOperand(i: ValOpIdx).getReg()); |
1032 | } else { |
1033 | buildIndirectWrite(MBB, I: MI, ValueReg: MI.getOperand(i: ValOpIdx).getReg(), |
1034 | Address: calculateIndirectAddress(RegIndex, Channel), |
1035 | OffsetReg); |
1036 | } |
1037 | } else { |
1038 | return false; |
1039 | } |
1040 | |
1041 | MBB->erase(I: MI); |
1042 | return true; |
1043 | } |
1044 | case R600::R600_EXTRACT_ELT_V2: |
1045 | case R600::R600_EXTRACT_ELT_V4: |
1046 | buildIndirectRead(MBB: MI.getParent(), I: MI, ValueReg: MI.getOperand(i: 0).getReg(), |
1047 | Address: RI.getHWRegIndex(Reg: MI.getOperand(i: 1).getReg()), // Address |
1048 | OffsetReg: MI.getOperand(i: 2).getReg(), |
1049 | AddrChan: RI.getHWRegChan(reg: MI.getOperand(i: 1).getReg())); |
1050 | break; |
1051 | case R600::R600_INSERT_ELT_V2: |
1052 | case R600::R600_INSERT_ELT_V4: |
1053 | buildIndirectWrite(MBB: MI.getParent(), I: MI, ValueReg: MI.getOperand(i: 2).getReg(), // Value |
1054 | Address: RI.getHWRegIndex(Reg: MI.getOperand(i: 1).getReg()), // Address |
1055 | OffsetReg: MI.getOperand(i: 3).getReg(), // Offset |
1056 | AddrChan: RI.getHWRegChan(reg: MI.getOperand(i: 1).getReg())); // Channel |
1057 | break; |
1058 | } |
1059 | MI.eraseFromParent(); |
1060 | return true; |
1061 | } |
1062 | |
1063 | void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved, |
1064 | const MachineFunction &MF, |
1065 | const R600RegisterInfo &TRI) const { |
1066 | const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>(); |
1067 | const R600FrameLowering *TFL = ST.getFrameLowering(); |
1068 | |
1069 | unsigned StackWidth = TFL->getStackWidth(MF); |
1070 | int End = getIndirectIndexEnd(MF); |
1071 | |
1072 | if (End == -1) |
1073 | return; |
1074 | |
1075 | for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) { |
1076 | for (unsigned Chan = 0; Chan < StackWidth; ++Chan) { |
1077 | unsigned Reg = R600::R600_TReg32RegClass.getRegister((4 * Index) + Chan); |
1078 | TRI.reserveRegisterTuples(Reserved, Reg); |
1079 | } |
1080 | } |
1081 | } |
1082 | |
1083 | const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const { |
1084 | return &R600::R600_TReg32_XRegClass; |
1085 | } |
1086 | |
1087 | MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, |
1088 | MachineBasicBlock::iterator I, |
1089 | unsigned ValueReg, unsigned Address, |
1090 | unsigned OffsetReg) const { |
1091 | return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, AddrChan: 0); |
1092 | } |
1093 | |
1094 | MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, |
1095 | MachineBasicBlock::iterator I, |
1096 | unsigned ValueReg, unsigned Address, |
1097 | unsigned OffsetReg, |
1098 | unsigned AddrChan) const { |
1099 | unsigned AddrReg; |
1100 | switch (AddrChan) { |
1101 | default: llvm_unreachable("Invalid Channel" ); |
1102 | case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break; |
1103 | case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break; |
1104 | case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break; |
1105 | case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break; |
1106 | } |
1107 | MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg, |
1108 | R600::AR_X, OffsetReg); |
1109 | setImmOperand(*MOVA, R600::OpName::write, 0); |
1110 | |
1111 | MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV, |
1112 | AddrReg, ValueReg) |
1113 | .addReg(R600::AR_X, |
1114 | RegState::Implicit | RegState::Kill); |
1115 | setImmOperand(*Mov, R600::OpName::dst_rel, 1); |
1116 | return Mov; |
1117 | } |
1118 | |
1119 | MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, |
1120 | MachineBasicBlock::iterator I, |
1121 | unsigned ValueReg, unsigned Address, |
1122 | unsigned OffsetReg) const { |
1123 | return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, AddrChan: 0); |
1124 | } |
1125 | |
1126 | MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, |
1127 | MachineBasicBlock::iterator I, |
1128 | unsigned ValueReg, unsigned Address, |
1129 | unsigned OffsetReg, |
1130 | unsigned AddrChan) const { |
1131 | unsigned AddrReg; |
1132 | switch (AddrChan) { |
1133 | default: llvm_unreachable("Invalid Channel" ); |
1134 | case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break; |
1135 | case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break; |
1136 | case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break; |
1137 | case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break; |
1138 | } |
1139 | MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg, |
1140 | R600::AR_X, |
1141 | OffsetReg); |
1142 | setImmOperand(*MOVA, R600::OpName::write, 0); |
1143 | MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV, |
1144 | ValueReg, |
1145 | AddrReg) |
1146 | .addReg(R600::AR_X, |
1147 | RegState::Implicit | RegState::Kill); |
1148 | setImmOperand(*Mov, R600::OpName::src0_rel, 1); |
1149 | |
1150 | return Mov; |
1151 | } |
1152 | |
1153 | int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const { |
1154 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
1155 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
1156 | int Offset = -1; |
1157 | |
1158 | if (MFI.getNumObjects() == 0) { |
1159 | return -1; |
1160 | } |
1161 | |
1162 | if (MRI.livein_empty()) { |
1163 | return 0; |
1164 | } |
1165 | |
1166 | const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass(); |
1167 | for (std::pair<unsigned, unsigned> LI : MRI.liveins()) { |
1168 | Register Reg = LI.first; |
1169 | if (Reg.isVirtual() || !IndirectRC->contains(Reg)) |
1170 | continue; |
1171 | |
1172 | unsigned RegIndex; |
1173 | unsigned RegEnd; |
1174 | for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd; |
1175 | ++RegIndex) { |
1176 | if (IndirectRC->getRegister(i: RegIndex) == (unsigned)Reg) |
1177 | break; |
1178 | } |
1179 | Offset = std::max(a: Offset, b: (int)RegIndex); |
1180 | } |
1181 | |
1182 | return Offset + 1; |
1183 | } |
1184 | |
1185 | int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const { |
1186 | int Offset = 0; |
1187 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
1188 | |
1189 | // Variable sized objects are not supported |
1190 | if (MFI.hasVarSizedObjects()) { |
1191 | return -1; |
1192 | } |
1193 | |
1194 | if (MFI.getNumObjects() == 0) { |
1195 | return -1; |
1196 | } |
1197 | |
1198 | const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>(); |
1199 | const R600FrameLowering *TFL = ST.getFrameLowering(); |
1200 | |
1201 | Register IgnoredFrameReg; |
1202 | Offset = TFL->getFrameIndexReference(MF, FI: -1, FrameReg&: IgnoredFrameReg).getFixed(); |
1203 | |
1204 | return getIndirectIndexBegin(MF) + Offset; |
1205 | } |
1206 | |
1207 | unsigned R600InstrInfo::getMaxAlusPerClause() const { |
1208 | return 115; |
1209 | } |
1210 | |
1211 | MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB, |
1212 | MachineBasicBlock::iterator I, |
1213 | unsigned Opcode, |
1214 | unsigned DstReg, |
1215 | unsigned Src0Reg, |
1216 | unsigned Src1Reg) const { |
1217 | MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(MBBI: I), get(Opcode), |
1218 | DstReg); // $dst |
1219 | |
1220 | if (Src1Reg) { |
1221 | MIB.addImm(Val: 0) // $update_exec_mask |
1222 | .addImm(Val: 0); // $update_predicate |
1223 | } |
1224 | MIB.addImm(Val: 1) // $write |
1225 | .addImm(Val: 0) // $omod |
1226 | .addImm(Val: 0) // $dst_rel |
1227 | .addImm(Val: 0) // $dst_clamp |
1228 | .addReg(RegNo: Src0Reg) // $src0 |
1229 | .addImm(Val: 0) // $src0_neg |
1230 | .addImm(Val: 0) // $src0_rel |
1231 | .addImm(Val: 0) // $src0_abs |
1232 | .addImm(Val: -1); // $src0_sel |
1233 | |
1234 | if (Src1Reg) { |
1235 | MIB.addReg(RegNo: Src1Reg) // $src1 |
1236 | .addImm(Val: 0) // $src1_neg |
1237 | .addImm(Val: 0) // $src1_rel |
1238 | .addImm(Val: 0) // $src1_abs |
1239 | .addImm(Val: -1); // $src1_sel |
1240 | } |
1241 | |
1242 | //XXX: The r600g finalizer expects this to be 1, once we've moved the |
1243 | //scheduling to the backend, we can change the default to 0. |
1244 | MIB.addImm(1) // $last |
1245 | .addReg(R600::PRED_SEL_OFF) // $pred_sel |
1246 | .addImm(0) // $literal |
1247 | .addImm(0); // $bank_swizzle |
1248 | |
1249 | return MIB; |
1250 | } |
1251 | |
1252 | #define OPERAND_CASE(Label) \ |
1253 | case Label: { \ |
1254 | static const unsigned Ops[] = \ |
1255 | { \ |
1256 | Label##_X, \ |
1257 | Label##_Y, \ |
1258 | Label##_Z, \ |
1259 | Label##_W \ |
1260 | }; \ |
1261 | return Ops[Slot]; \ |
1262 | } |
1263 | |
1264 | static unsigned getSlotedOps(unsigned Op, unsigned Slot) { |
1265 | switch (Op) { |
1266 | OPERAND_CASE(R600::OpName::update_exec_mask) |
1267 | OPERAND_CASE(R600::OpName::update_pred) |
1268 | OPERAND_CASE(R600::OpName::write) |
1269 | OPERAND_CASE(R600::OpName::omod) |
1270 | OPERAND_CASE(R600::OpName::dst_rel) |
1271 | OPERAND_CASE(R600::OpName::clamp) |
1272 | OPERAND_CASE(R600::OpName::src0) |
1273 | OPERAND_CASE(R600::OpName::src0_neg) |
1274 | OPERAND_CASE(R600::OpName::src0_rel) |
1275 | OPERAND_CASE(R600::OpName::src0_abs) |
1276 | OPERAND_CASE(R600::OpName::src0_sel) |
1277 | OPERAND_CASE(R600::OpName::src1) |
1278 | OPERAND_CASE(R600::OpName::src1_neg) |
1279 | OPERAND_CASE(R600::OpName::src1_rel) |
1280 | OPERAND_CASE(R600::OpName::src1_abs) |
1281 | OPERAND_CASE(R600::OpName::src1_sel) |
1282 | OPERAND_CASE(R600::OpName::pred_sel) |
1283 | default: |
1284 | llvm_unreachable("Wrong Operand" ); |
1285 | } |
1286 | } |
1287 | |
1288 | #undef OPERAND_CASE |
1289 | |
1290 | MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( |
1291 | MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg) |
1292 | const { |
1293 | assert (MI->getOpcode() == R600::DOT_4 && "Not Implemented" ); |
1294 | unsigned Opcode; |
1295 | if (ST.getGeneration() <= AMDGPUSubtarget::R700) |
1296 | Opcode = R600::DOT4_r600; |
1297 | else |
1298 | Opcode = R600::DOT4_eg; |
1299 | MachineBasicBlock::iterator I = MI; |
1300 | MachineOperand &Src0 = MI->getOperand( |
1301 | getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src0, Slot))); |
1302 | MachineOperand &Src1 = MI->getOperand( |
1303 | getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src1, Slot))); |
1304 | MachineInstr *MIB = buildDefaultInstruction( |
1305 | MBB, I, Opcode, DstReg, Src0Reg: Src0.getReg(), Src1Reg: Src1.getReg()); |
1306 | static const unsigned Operands[14] = { |
1307 | R600::OpName::update_exec_mask, |
1308 | R600::OpName::update_pred, |
1309 | R600::OpName::write, |
1310 | R600::OpName::omod, |
1311 | R600::OpName::dst_rel, |
1312 | R600::OpName::clamp, |
1313 | R600::OpName::src0_neg, |
1314 | R600::OpName::src0_rel, |
1315 | R600::OpName::src0_abs, |
1316 | R600::OpName::src0_sel, |
1317 | R600::OpName::src1_neg, |
1318 | R600::OpName::src1_rel, |
1319 | R600::OpName::src1_abs, |
1320 | R600::OpName::src1_sel, |
1321 | }; |
1322 | |
1323 | MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), |
1324 | getSlotedOps(R600::OpName::pred_sel, Slot))); |
1325 | MIB->getOperand(getOperandIdx(Opcode, R600::OpName::pred_sel)) |
1326 | .setReg(MO.getReg()); |
1327 | |
1328 | for (unsigned Operand : Operands) { |
1329 | MachineOperand &MO = MI->getOperand( |
1330 | i: getOperandIdx(Opcode: MI->getOpcode(), Op: getSlotedOps(Op: Operand, Slot))); |
1331 | assert (MO.isImm()); |
1332 | setImmOperand(MI&: *MIB, Op: Operand, Imm: MO.getImm()); |
1333 | } |
1334 | MIB->getOperand(i: 20).setImm(0); |
1335 | return MIB; |
1336 | } |
1337 | |
1338 | MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, |
1339 | MachineBasicBlock::iterator I, |
1340 | unsigned DstReg, |
1341 | uint64_t Imm) const { |
1342 | MachineInstr *MovImm = buildDefaultInstruction(BB, I, R600::MOV, DstReg, |
1343 | R600::ALU_LITERAL_X); |
1344 | setImmOperand(*MovImm, R600::OpName::literal, Imm); |
1345 | return MovImm; |
1346 | } |
1347 | |
1348 | MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB, |
1349 | MachineBasicBlock::iterator I, |
1350 | unsigned DstReg, unsigned SrcReg) const { |
1351 | return buildDefaultInstruction(*MBB, I, R600::MOV, DstReg, SrcReg); |
1352 | } |
1353 | |
1354 | int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const { |
1355 | return getOperandIdx(Opcode: MI.getOpcode(), Op); |
1356 | } |
1357 | |
1358 | int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const { |
1359 | return R600::getNamedOperandIdx(Opcode, Op); |
1360 | } |
1361 | |
1362 | void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op, |
1363 | int64_t Imm) const { |
1364 | int Idx = getOperandIdx(MI, Op); |
1365 | assert(Idx != -1 && "Operand not supported for this instruction." ); |
1366 | assert(MI.getOperand(Idx).isImm()); |
1367 | MI.getOperand(i: Idx).setImm(Imm); |
1368 | } |
1369 | |
1370 | //===----------------------------------------------------------------------===// |
1371 | // Instruction flag getters/setters |
1372 | //===----------------------------------------------------------------------===// |
1373 | |
1374 | MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx, |
1375 | unsigned Flag) const { |
1376 | unsigned TargetFlags = get(MI.getOpcode()).TSFlags; |
1377 | int FlagIndex = 0; |
1378 | if (Flag != 0) { |
1379 | // If we pass something other than the default value of Flag to this |
1380 | // function, it means we are want to set a flag on an instruction |
1381 | // that uses native encoding. |
1382 | assert(HAS_NATIVE_OPERANDS(TargetFlags)); |
1383 | bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3; |
1384 | switch (Flag) { |
1385 | case MO_FLAG_CLAMP: |
1386 | FlagIndex = getOperandIdx(MI, R600::OpName::clamp); |
1387 | break; |
1388 | case MO_FLAG_MASK: |
1389 | FlagIndex = getOperandIdx(MI, R600::OpName::write); |
1390 | break; |
1391 | case MO_FLAG_NOT_LAST: |
1392 | case MO_FLAG_LAST: |
1393 | FlagIndex = getOperandIdx(MI, R600::OpName::last); |
1394 | break; |
1395 | case MO_FLAG_NEG: |
1396 | switch (SrcIdx) { |
1397 | case 0: |
1398 | FlagIndex = getOperandIdx(MI, R600::OpName::src0_neg); |
1399 | break; |
1400 | case 1: |
1401 | FlagIndex = getOperandIdx(MI, R600::OpName::src1_neg); |
1402 | break; |
1403 | case 2: |
1404 | FlagIndex = getOperandIdx(MI, R600::OpName::src2_neg); |
1405 | break; |
1406 | } |
1407 | break; |
1408 | |
1409 | case MO_FLAG_ABS: |
1410 | assert(!IsOP3 && "Cannot set absolute value modifier for OP3 " |
1411 | "instructions." ); |
1412 | (void)IsOP3; |
1413 | switch (SrcIdx) { |
1414 | case 0: |
1415 | FlagIndex = getOperandIdx(MI, R600::OpName::src0_abs); |
1416 | break; |
1417 | case 1: |
1418 | FlagIndex = getOperandIdx(MI, R600::OpName::src1_abs); |
1419 | break; |
1420 | } |
1421 | break; |
1422 | |
1423 | default: |
1424 | FlagIndex = -1; |
1425 | break; |
1426 | } |
1427 | assert(FlagIndex != -1 && "Flag not supported for this instruction" ); |
1428 | } else { |
1429 | FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags); |
1430 | assert(FlagIndex != 0 && |
1431 | "Instruction flags not supported for this instruction" ); |
1432 | } |
1433 | |
1434 | MachineOperand &FlagOp = MI.getOperand(i: FlagIndex); |
1435 | assert(FlagOp.isImm()); |
1436 | return FlagOp; |
1437 | } |
1438 | |
1439 | void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand, |
1440 | unsigned Flag) const { |
1441 | unsigned TargetFlags = get(MI.getOpcode()).TSFlags; |
1442 | if (Flag == 0) { |
1443 | return; |
1444 | } |
1445 | if (HAS_NATIVE_OPERANDS(TargetFlags)) { |
1446 | MachineOperand &FlagOp = getFlagOp(MI, SrcIdx: Operand, Flag); |
1447 | if (Flag == MO_FLAG_NOT_LAST) { |
1448 | clearFlag(MI, Operand, MO_FLAG_LAST); |
1449 | } else if (Flag == MO_FLAG_MASK) { |
1450 | clearFlag(MI, Operand, Flag); |
1451 | } else { |
1452 | FlagOp.setImm(1); |
1453 | } |
1454 | } else { |
1455 | MachineOperand &FlagOp = getFlagOp(MI, SrcIdx: Operand); |
1456 | FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand))); |
1457 | } |
1458 | } |
1459 | |
1460 | void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand, |
1461 | unsigned Flag) const { |
1462 | unsigned TargetFlags = get(MI.getOpcode()).TSFlags; |
1463 | if (HAS_NATIVE_OPERANDS(TargetFlags)) { |
1464 | MachineOperand &FlagOp = getFlagOp(MI, SrcIdx: Operand, Flag); |
1465 | FlagOp.setImm(0); |
1466 | } else { |
1467 | MachineOperand &FlagOp = getFlagOp(MI); |
1468 | unsigned InstFlags = FlagOp.getImm(); |
1469 | InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand)); |
1470 | FlagOp.setImm(InstFlags); |
1471 | } |
1472 | } |
1473 | |