1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
11#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12#include "llvm/CodeGen/MachineFunction.h"
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetOpcodes.h"
19#include "llvm/CodeGen/TargetSubtargetInfo.h"
20#include "llvm/IR/DebugInfoMetadata.h"
21
22using namespace llvm;
23
24void MachineIRBuilder::setMF(MachineFunction &MF) {
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
40MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 return BuildMI(MF&: getMF(), MIMD: {getDL(), getPCSections(), getMMRAMetadata()},
42 MCID: getTII().get(Opcode));
43}
44
45MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46 getMBB().insert(I: getInsertPt(), MI: MIB);
47 recordInsertion(InsertedInstr: MIB);
48 return MIB;
49}
50
51MachineInstrBuilder
52MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
60 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
64MachineInstrBuilder
65MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
73 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
77MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(MIB: buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE)
86 .addFrameIndex(Idx: FI)
87 .addImm(Val: 0)
88 .addMetadata(MD: Variable)
89 .addMetadata(MD: Expr));
90}
91
92MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(Val: &C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(i_nocapture: 0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(Val: NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(Val: CI);
112 else
113 MIB.addImm(Val: CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(Val: NumericConstant)) {
115 MIB.addFPImm(Val: CFP);
116 } else if (isa<ConstantPointerNull>(Val: NumericConstant)) {
117 MIB.addImm(Val: 0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(RegNo: Register());
121 }
122
123 MIB.addImm(Val: 0).addMetadata(MD: Variable).addMetadata(MD: Expr);
124 return insertInstr(MIB);
125}
126
127MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(Opcode: TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(MD: Label);
134}
135
136MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(Opcode: TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(MRI&: *getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Val: Alignment.value());
144 return MIB;
145}
146
147MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(Opcode: TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(MRI&: *getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154}
155
156MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(Opcode: TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(MRI&: *getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167}
168
169MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(Opcode: TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(MRI&: *getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176}
177
178MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
179 unsigned JTI) {
180 return buildInstr(Opc: TargetOpcode::G_JUMP_TABLE, DstOps: {PtrTy}, SrcOps: {})
181 .addJumpTableIndex(Idx: JTI);
182}
183
184void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187}
188
189void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193}
194
195void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199}
200
201MachineInstrBuilder
202MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(Opc: TargetOpcode::G_PTR_ADD, DstOps: {Res}, SrcOps: {Op0, Op1}, Flags);
209}
210
211std::optional<MachineInstrBuilder>
212MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
213 const LLT ValueTy, uint64_t Value) {
214 assert(Res == 0 && "Res is a result argument");
215 assert(ValueTy.isScalar() && "invalid offset type");
216
217 if (Value == 0) {
218 Res = Op0;
219 return std::nullopt;
220 }
221
222 Res = getMRI()->createGenericVirtualRegister(Ty: getMRI()->getType(Reg: Op0));
223 auto Cst = buildConstant(Res: ValueTy, Val: Value);
224 return buildPtrAdd(Res, Op0, Op1: Cst.getReg(Idx: 0));
225}
226
227MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
228 const SrcOp &Op0,
229 uint32_t NumBits) {
230 LLT PtrTy = Res.getLLTTy(MRI: *getMRI());
231 LLT MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
232 Register MaskReg = getMRI()->createGenericVirtualRegister(Ty: MaskTy);
233 buildConstant(Res: MaskReg, Val: maskTrailingZeros<uint64_t>(N: NumBits));
234 return buildPtrMask(Res, Op0, Op1: MaskReg);
235}
236
237MachineInstrBuilder
238MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
239 const SrcOp &Op0) {
240 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
241 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
242
243 assert(ResTy.isVector() && "Res non vector type");
244
245 SmallVector<Register, 8> Regs;
246 if (Op0Ty.isVector()) {
247 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248 "Different vector element types");
249 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250 "Op0 has more elements");
251 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
252
253 for (auto Op : Unmerge.getInstr()->defs())
254 Regs.push_back(Elt: Op.getReg());
255 } else {
256 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257 "Op0 has more size");
258 Regs.push_back(Elt: Op0.getReg());
259 }
260 Register Undef =
261 buildUndef(Res: Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(Idx: 0);
262 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263 for (unsigned i = 0; i < NumberOfPadElts; ++i)
264 Regs.push_back(Elt: Undef);
265 return buildMergeLikeInstr(Res, Ops: Regs);
266}
267
268MachineInstrBuilder
269MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
270 const SrcOp &Op0) {
271 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
272 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
273
274 assert(Op0Ty.isVector() && "Non vector type");
275 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276 (ResTy.isVector() &&
277 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278 "Different vector element types");
279 assert(
280 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281 "Op0 has fewer elements");
282
283 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
284 if (ResTy.isScalar())
285 return buildCopy(Res, Op: Unmerge.getReg(Idx: 0));
286 SmallVector<Register, 8> Regs;
287 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288 Regs.push_back(Elt: Unmerge.getReg(Idx: i));
289 return buildMergeLikeInstr(Res, Ops: Regs);
290}
291
292MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
293 return buildInstr(Opcode: TargetOpcode::G_BR).addMBB(MBB: &Dest);
294}
295
296MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
297 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298 return buildInstr(Opcode: TargetOpcode::G_BRINDIRECT).addUse(RegNo: Tgt);
299}
300
301MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
302 unsigned JTI,
303 Register IndexReg) {
304 assert(getMRI()->getType(TablePtr).isPointer() &&
305 "Table reg must be a pointer");
306 return buildInstr(Opcode: TargetOpcode::G_BRJT)
307 .addUse(RegNo: TablePtr)
308 .addJumpTableIndex(Idx: JTI)
309 .addUse(RegNo: IndexReg);
310}
311
312MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
313 const SrcOp &Op) {
314 return buildInstr(Opc: TargetOpcode::COPY, DstOps: Res, SrcOps: Op);
315}
316
317MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
318 const ConstantInt &Val) {
319 LLT Ty = Res.getLLTTy(MRI: *getMRI());
320 LLT EltTy = Ty.getScalarType();
321 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
322 "creating constant with the wrong size");
323
324 assert(!Ty.isScalableVector() &&
325 "unexpected scalable vector in buildConstant");
326
327 if (Ty.isFixedVector()) {
328 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT)
329 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
330 .addCImm(Val: &Val);
331 return buildSplatBuildVector(Res, Src: Const);
332 }
333
334 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT);
335 Const->setDebugLoc(DebugLoc());
336 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
337 Const.addCImm(Val: &Val);
338 return Const;
339}
340
341MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
342 int64_t Val) {
343 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
344 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
345 ConstantInt *CI = ConstantInt::get(Ty: IntN, V: Val, IsSigned: true);
346 return buildConstant(Res, Val: *CI);
347}
348
349MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
350 const ConstantFP &Val) {
351 LLT Ty = Res.getLLTTy(MRI: *getMRI());
352 LLT EltTy = Ty.getScalarType();
353
354 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
355 == EltTy.getSizeInBits() &&
356 "creating fconstant with the wrong size");
357
358 assert(!Ty.isPointer() && "invalid operand type");
359
360 assert(!Ty.isScalableVector() &&
361 "unexpected scalable vector in buildFConstant");
362
363 if (Ty.isFixedVector()) {
364 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT)
365 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
366 .addFPImm(Val: &Val);
367
368 return buildSplatBuildVector(Res, Src: Const);
369 }
370
371 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT);
372 Const->setDebugLoc(DebugLoc());
373 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
374 Const.addFPImm(Val: &Val);
375 return Const;
376}
377
378MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
379 const APInt &Val) {
380 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(), V: Val);
381 return buildConstant(Res, Val: *CI);
382}
383
384MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
385 double Val) {
386 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
387 auto &Ctx = getMF().getFunction().getContext();
388 auto *CFP =
389 ConstantFP::get(Context&: Ctx, V: getAPFloatFromSize(Val, Size: DstTy.getScalarSizeInBits()));
390 return buildFConstant(Res, Val: *CFP);
391}
392
393MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
394 const APFloat &Val) {
395 auto &Ctx = getMF().getFunction().getContext();
396 auto *CFP = ConstantFP::get(Context&: Ctx, V: Val);
397 return buildFConstant(Res, Val: *CFP);
398}
399
400MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
401 MachineBasicBlock &Dest) {
402 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
403
404 auto MIB = buildInstr(Opcode: TargetOpcode::G_BRCOND);
405 Tst.addSrcToMIB(MIB);
406 MIB.addMBB(MBB: &Dest);
407 return MIB;
408}
409
410MachineInstrBuilder
411MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
412 MachinePointerInfo PtrInfo, Align Alignment,
413 MachineMemOperand::Flags MMOFlags,
414 const AAMDNodes &AAInfo) {
415 MMOFlags |= MachineMemOperand::MOLoad;
416 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
417
418 LLT Ty = Dst.getLLTTy(MRI: *getMRI());
419 MachineMemOperand *MMO =
420 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
421 return buildLoad(Res: Dst, Addr, MMO&: *MMO);
422}
423
424MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
425 const DstOp &Res,
426 const SrcOp &Addr,
427 MachineMemOperand &MMO) {
428 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
429 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
430
431 auto MIB = buildInstr(Opcode);
432 Res.addDefToMIB(MRI&: *getMRI(), MIB);
433 Addr.addSrcToMIB(MIB);
434 MIB.addMemOperand(MMO: &MMO);
435 return MIB;
436}
437
438MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
439 const DstOp &Dst, const SrcOp &BasePtr,
440 MachineMemOperand &BaseMMO, int64_t Offset) {
441 LLT LoadTy = Dst.getLLTTy(MRI: *getMRI());
442 MachineMemOperand *OffsetMMO =
443 getMF().getMachineMemOperand(MMO: &BaseMMO, Offset, Ty: LoadTy);
444
445 if (Offset == 0) // This may be a size or type changing load.
446 return buildLoad(Res: Dst, Addr: BasePtr, MMO&: *OffsetMMO);
447
448 LLT PtrTy = BasePtr.getLLTTy(MRI: *getMRI());
449 LLT OffsetTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
450 auto ConstOffset = buildConstant(Res: OffsetTy, Val: Offset);
451 auto Ptr = buildPtrAdd(Res: PtrTy, Op0: BasePtr, Op1: ConstOffset);
452 return buildLoad(Res: Dst, Addr: Ptr, MMO&: *OffsetMMO);
453}
454
455MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
456 const SrcOp &Addr,
457 MachineMemOperand &MMO) {
458 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
459 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
460
461 auto MIB = buildInstr(Opcode: TargetOpcode::G_STORE);
462 Val.addSrcToMIB(MIB);
463 Addr.addSrcToMIB(MIB);
464 MIB.addMemOperand(MMO: &MMO);
465 return MIB;
466}
467
468MachineInstrBuilder
469MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
470 MachinePointerInfo PtrInfo, Align Alignment,
471 MachineMemOperand::Flags MMOFlags,
472 const AAMDNodes &AAInfo) {
473 MMOFlags |= MachineMemOperand::MOStore;
474 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
475
476 LLT Ty = Val.getLLTTy(MRI: *getMRI());
477 MachineMemOperand *MMO =
478 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
479 return buildStore(Val, Addr, MMO&: *MMO);
480}
481
482MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
483 const SrcOp &Op) {
484 return buildInstr(Opc: TargetOpcode::G_ANYEXT, DstOps: Res, SrcOps: Op);
485}
486
487MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
488 const SrcOp &Op) {
489 return buildInstr(Opc: TargetOpcode::G_SEXT, DstOps: Res, SrcOps: Op);
490}
491
492MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
493 const SrcOp &Op) {
494 return buildInstr(Opc: TargetOpcode::G_ZEXT, DstOps: Res, SrcOps: Op);
495}
496
497unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
498 const auto *TLI = getMF().getSubtarget().getTargetLowering();
499 switch (TLI->getBooleanContents(isVec: IsVec, isFloat: IsFP)) {
500 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
501 return TargetOpcode::G_SEXT;
502 case TargetLoweringBase::ZeroOrOneBooleanContent:
503 return TargetOpcode::G_ZEXT;
504 default:
505 return TargetOpcode::G_ANYEXT;
506 }
507}
508
509MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
510 const SrcOp &Op,
511 bool IsFP) {
512 unsigned ExtOp = getBoolExtOp(IsVec: getMRI()->getType(Reg: Op.getReg()).isVector(), IsFP);
513 return buildInstr(Opc: ExtOp, DstOps: Res, SrcOps: Op);
514}
515
516MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
517 const SrcOp &Op,
518 bool IsVector,
519 bool IsFP) {
520 const auto *TLI = getMF().getSubtarget().getTargetLowering();
521 switch (TLI->getBooleanContents(isVec: IsVector, isFloat: IsFP)) {
522 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
523 return buildSExtInReg(Res, Op, ImmOp: 1);
524 case TargetLoweringBase::ZeroOrOneBooleanContent:
525 return buildZExtInReg(Res, Op, ImmOp: 1);
526 case TargetLoweringBase::UndefinedBooleanContent:
527 return buildCopy(Res, Op);
528 }
529
530 llvm_unreachable("unexpected BooleanContent");
531}
532
533MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
534 const DstOp &Res,
535 const SrcOp &Op) {
536 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
537 TargetOpcode::G_SEXT == ExtOpc) &&
538 "Expecting Extending Opc");
539 assert(Res.getLLTTy(*getMRI()).isScalar() ||
540 Res.getLLTTy(*getMRI()).isVector());
541 assert(Res.getLLTTy(*getMRI()).isScalar() ==
542 Op.getLLTTy(*getMRI()).isScalar());
543
544 unsigned Opcode = TargetOpcode::COPY;
545 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() >
546 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
547 Opcode = ExtOpc;
548 else if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() <
549 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
550 Opcode = TargetOpcode::G_TRUNC;
551 else
552 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
553
554 return buildInstr(Opc: Opcode, DstOps: Res, SrcOps: Op);
555}
556
557MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
558 const SrcOp &Op) {
559 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_SEXT, Res, Op);
560}
561
562MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
563 const SrcOp &Op) {
564 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ZEXT, Res, Op);
565}
566
567MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
568 const SrcOp &Op) {
569 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ANYEXT, Res, Op);
570}
571
572MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
573 const SrcOp &Op,
574 int64_t ImmOp) {
575 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
576 auto Mask = buildConstant(
577 Res: ResTy, Val: APInt::getLowBitsSet(numBits: ResTy.getScalarSizeInBits(), loBitsSet: ImmOp));
578 return buildAnd(Dst: Res, Src0: Op, Src1: Mask);
579}
580
581MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
582 const SrcOp &Src) {
583 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
584 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
585 if (SrcTy == DstTy)
586 return buildCopy(Res: Dst, Op: Src);
587
588 unsigned Opcode;
589 if (SrcTy.isPointer() && DstTy.isScalar())
590 Opcode = TargetOpcode::G_PTRTOINT;
591 else if (DstTy.isPointer() && SrcTy.isScalar())
592 Opcode = TargetOpcode::G_INTTOPTR;
593 else {
594 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
595 Opcode = TargetOpcode::G_BITCAST;
596 }
597
598 return buildInstr(Opc: Opcode, DstOps: Dst, SrcOps: Src);
599}
600
601MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
602 const SrcOp &Src,
603 uint64_t Index) {
604 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
605 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
606
607#ifndef NDEBUG
608 assert(SrcTy.isValid() && "invalid operand type");
609 assert(DstTy.isValid() && "invalid operand type");
610 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
611 "extracting off end of register");
612#endif
613
614 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
615 assert(Index == 0 && "insertion past the end of a register");
616 return buildCast(Dst, Src);
617 }
618
619 auto Extract = buildInstr(Opcode: TargetOpcode::G_EXTRACT);
620 Dst.addDefToMIB(MRI&: *getMRI(), MIB&: Extract);
621 Src.addSrcToMIB(MIB&: Extract);
622 Extract.addImm(Val: Index);
623 return Extract;
624}
625
626MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
627 return buildInstr(Opc: TargetOpcode::G_IMPLICIT_DEF, DstOps: {Res}, SrcOps: {});
628}
629
630MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
631 ArrayRef<Register> Ops) {
632 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
633 // we need some temporary storage for the DstOp objects. Here we use a
634 // sufficiently large SmallVector to not go through the heap.
635 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
636 assert(TmpVec.size() > 1);
637 return buildInstr(Opc: TargetOpcode::G_MERGE_VALUES, DstOps: Res, SrcOps: TmpVec);
638}
639
640MachineInstrBuilder
641MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
642 ArrayRef<Register> Ops) {
643 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
644 // we need some temporary storage for the DstOp objects. Here we use a
645 // sufficiently large SmallVector to not go through the heap.
646 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
647 assert(TmpVec.size() > 1);
648 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: TmpVec), DstOps: Res, SrcOps: TmpVec);
649}
650
651MachineInstrBuilder
652MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
653 std::initializer_list<SrcOp> Ops) {
654 assert(Ops.size() > 1);
655 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: Ops), DstOps: Res, SrcOps: Ops);
656}
657
658unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
659 ArrayRef<SrcOp> SrcOps) const {
660 if (DstOp.getLLTTy(MRI: *getMRI()).isVector()) {
661 if (SrcOps[0].getLLTTy(MRI: *getMRI()).isVector())
662 return TargetOpcode::G_CONCAT_VECTORS;
663 return TargetOpcode::G_BUILD_VECTOR;
664 }
665
666 return TargetOpcode::G_MERGE_VALUES;
667}
668
669MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
670 const SrcOp &Op) {
671 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
672 // we need some temporary storage for the DstOp objects. Here we use a
673 // sufficiently large SmallVector to not go through the heap.
674 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
675 assert(TmpVec.size() > 1);
676 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
677}
678
679MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
680 const SrcOp &Op) {
681 unsigned NumReg = Op.getLLTTy(MRI: *getMRI()).getSizeInBits() / Res.getSizeInBits();
682 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
683 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
684}
685
686MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
687 const SrcOp &Op) {
688 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
689 // we need some temporary storage for the DstOp objects. Here we use a
690 // sufficiently large SmallVector to not go through the heap.
691 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
692 assert(TmpVec.size() > 1);
693 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
694}
695
696MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
697 ArrayRef<Register> Ops) {
698 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
699 // we need some temporary storage for the DstOp objects. Here we use a
700 // sufficiently large SmallVector to not go through the heap.
701 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
702 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
703}
704
705MachineInstrBuilder
706MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
707 ArrayRef<APInt> Ops) {
708 SmallVector<SrcOp> TmpVec;
709 TmpVec.reserve(N: Ops.size());
710 LLT EltTy = Res.getLLTTy(MRI: *getMRI()).getElementType();
711 for (const auto &Op : Ops)
712 TmpVec.push_back(Elt: buildConstant(Res: EltTy, Val: Op));
713 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
714}
715
716MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
717 const SrcOp &Src) {
718 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(MRI: *getMRI()).getNumElements(), Src);
719 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
720}
721
722MachineInstrBuilder
723MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
724 ArrayRef<Register> Ops) {
725 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
726 // we need some temporary storage for the DstOp objects. Here we use a
727 // sufficiently large SmallVector to not go through the heap.
728 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
729 if (TmpVec[0].getLLTTy(MRI: *getMRI()).getSizeInBits() ==
730 Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits())
731 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
732 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR_TRUNC, DstOps: Res, SrcOps: TmpVec);
733}
734
735MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
736 const SrcOp &Src) {
737 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
738 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
739 "Expected Src to match Dst elt ty");
740 auto UndefVec = buildUndef(Res: DstTy);
741 auto Zero = buildConstant(Res: LLT::scalar(SizeInBits: 64), Val: 0);
742 auto InsElt = buildInsertVectorElement(Res: DstTy, Val: UndefVec, Elt: Src, Idx: Zero);
743 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
744 return buildShuffleVector(Res: DstTy, Src1: InsElt, Src2: UndefVec, Mask: ZeroMask);
745}
746
747MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
748 const SrcOp &Src) {
749 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
750 "Expected Src to match Dst elt ty");
751 return buildInstr(Opc: TargetOpcode::G_SPLAT_VECTOR, DstOps: Res, SrcOps: Src);
752}
753
754MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
755 const SrcOp &Src1,
756 const SrcOp &Src2,
757 ArrayRef<int> Mask) {
758 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
759 LLT Src1Ty = Src1.getLLTTy(MRI: *getMRI());
760 LLT Src2Ty = Src2.getLLTTy(MRI: *getMRI());
761 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
762 Mask.size());
763 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
764 DstTy.getElementType() == Src2Ty.getElementType());
765 (void)DstTy;
766 (void)Src1Ty;
767 (void)Src2Ty;
768 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
769 return buildInstr(Opc: TargetOpcode::G_SHUFFLE_VECTOR, DstOps: {Res}, SrcOps: {Src1, Src2})
770 .addShuffleMask(Val: MaskAlloc);
771}
772
773MachineInstrBuilder
774MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
775 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
776 // we need some temporary storage for the DstOp objects. Here we use a
777 // sufficiently large SmallVector to not go through the heap.
778 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
779 return buildInstr(Opc: TargetOpcode::G_CONCAT_VECTORS, DstOps: Res, SrcOps: TmpVec);
780}
781
782MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
783 const SrcOp &Src,
784 const SrcOp &Op,
785 unsigned Index) {
786 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
787 Res.getLLTTy(*getMRI()).getSizeInBits() &&
788 "insertion past the end of a register");
789
790 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() ==
791 Op.getLLTTy(MRI: *getMRI()).getSizeInBits()) {
792 return buildCast(Dst: Res, Src: Op);
793 }
794
795 return buildInstr(Opc: TargetOpcode::G_INSERT, DstOps: Res, SrcOps: {Src, Op, uint64_t(Index)});
796}
797
798MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
799 unsigned MinElts) {
800
801 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
802 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
803 ConstantInt *CI = ConstantInt::get(Ty: IntN, V: MinElts);
804 return buildVScale(Res, MinElts: *CI);
805}
806
807MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
808 const ConstantInt &MinElts) {
809 auto VScale = buildInstr(Opcode: TargetOpcode::G_VSCALE);
810 VScale->setDebugLoc(DebugLoc());
811 Res.addDefToMIB(MRI&: *getMRI(), MIB&: VScale);
812 VScale.addCImm(Val: &MinElts);
813 return VScale;
814}
815
816MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
817 const APInt &MinElts) {
818 ConstantInt *CI =
819 ConstantInt::get(Context&: getMF().getFunction().getContext(), V: MinElts);
820 return buildVScale(Res, MinElts: *CI);
821}
822
823static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
824 if (HasSideEffects && IsConvergent)
825 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
826 if (HasSideEffects)
827 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
828 if (IsConvergent)
829 return TargetOpcode::G_INTRINSIC_CONVERGENT;
830 return TargetOpcode::G_INTRINSIC;
831}
832
833MachineInstrBuilder
834MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
835 ArrayRef<Register> ResultRegs,
836 bool HasSideEffects, bool isConvergent) {
837 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
838 for (unsigned ResultReg : ResultRegs)
839 MIB.addDef(RegNo: ResultReg);
840 MIB.addIntrinsicID(ID);
841 return MIB;
842}
843
844MachineInstrBuilder
845MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
846 ArrayRef<Register> ResultRegs) {
847 auto Attrs = Intrinsic::getAttributes(C&: getContext(), id: ID);
848 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
849 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
850 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
851}
852
853MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
854 ArrayRef<DstOp> Results,
855 bool HasSideEffects,
856 bool isConvergent) {
857 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
858 for (DstOp Result : Results)
859 Result.addDefToMIB(MRI&: *getMRI(), MIB);
860 MIB.addIntrinsicID(ID);
861 return MIB;
862}
863
864MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
865 ArrayRef<DstOp> Results) {
866 auto Attrs = Intrinsic::getAttributes(C&: getContext(), id: ID);
867 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
868 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
869 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
870}
871
872MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
873 const SrcOp &Op) {
874 return buildInstr(Opc: TargetOpcode::G_TRUNC, DstOps: Res, SrcOps: Op);
875}
876
877MachineInstrBuilder
878MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
879 std::optional<unsigned> Flags) {
880 return buildInstr(Opc: TargetOpcode::G_FPTRUNC, DstOps: Res, SrcOps: Op, Flags);
881}
882
883MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
884 const DstOp &Res,
885 const SrcOp &Op0,
886 const SrcOp &Op1) {
887 return buildInstr(Opc: TargetOpcode::G_ICMP, DstOps: Res, SrcOps: {Pred, Op0, Op1});
888}
889
890MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
891 const DstOp &Res,
892 const SrcOp &Op0,
893 const SrcOp &Op1,
894 std::optional<unsigned> Flags) {
895
896 return buildInstr(Opc: TargetOpcode::G_FCMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
897}
898
899MachineInstrBuilder
900MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
901 const SrcOp &Op0, const SrcOp &Op1,
902 std::optional<unsigned> Flags) {
903
904 return buildInstr(Opc: TargetOpcode::G_SELECT, DstOps: {Res}, SrcOps: {Tst, Op0, Op1}, Flags);
905}
906
907MachineInstrBuilder MachineIRBuilder::buildInsertSubvector(const DstOp &Res,
908 const SrcOp &Src0,
909 const SrcOp &Src1,
910 unsigned Idx) {
911 return buildInstr(Opc: TargetOpcode::G_INSERT_SUBVECTOR, DstOps: Res,
912 SrcOps: {Src0, Src1, uint64_t(Idx)});
913}
914
915MachineInstrBuilder MachineIRBuilder::buildExtractSubvector(const DstOp &Res,
916 const SrcOp &Src,
917 unsigned Idx) {
918 return buildInstr(Opc: TargetOpcode::G_INSERT_SUBVECTOR, DstOps: Res,
919 SrcOps: {Src, uint64_t(Idx)});
920}
921
922MachineInstrBuilder
923MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
924 const SrcOp &Elt, const SrcOp &Idx) {
925 return buildInstr(Opc: TargetOpcode::G_INSERT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Elt, Idx});
926}
927
928MachineInstrBuilder
929MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
930 const SrcOp &Idx) {
931 return buildInstr(Opc: TargetOpcode::G_EXTRACT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Idx});
932}
933
934MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
935 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
936 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
937#ifndef NDEBUG
938 LLT OldValResTy = OldValRes.getLLTTy(MRI: *getMRI());
939 LLT SuccessResTy = SuccessRes.getLLTTy(MRI: *getMRI());
940 LLT AddrTy = Addr.getLLTTy(MRI: *getMRI());
941 LLT CmpValTy = CmpVal.getLLTTy(MRI: *getMRI());
942 LLT NewValTy = NewVal.getLLTTy(MRI: *getMRI());
943 assert(OldValResTy.isScalar() && "invalid operand type");
944 assert(SuccessResTy.isScalar() && "invalid operand type");
945 assert(AddrTy.isPointer() && "invalid operand type");
946 assert(CmpValTy.isValid() && "invalid operand type");
947 assert(NewValTy.isValid() && "invalid operand type");
948 assert(OldValResTy == CmpValTy && "type mismatch");
949 assert(OldValResTy == NewValTy && "type mismatch");
950#endif
951
952 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
953 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
954 SuccessRes.addDefToMIB(MRI&: *getMRI(), MIB);
955 Addr.addSrcToMIB(MIB);
956 CmpVal.addSrcToMIB(MIB);
957 NewVal.addSrcToMIB(MIB);
958 MIB.addMemOperand(MMO: &MMO);
959 return MIB;
960}
961
962MachineInstrBuilder
963MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
964 const SrcOp &CmpVal, const SrcOp &NewVal,
965 MachineMemOperand &MMO) {
966#ifndef NDEBUG
967 LLT OldValResTy = OldValRes.getLLTTy(MRI: *getMRI());
968 LLT AddrTy = Addr.getLLTTy(MRI: *getMRI());
969 LLT CmpValTy = CmpVal.getLLTTy(MRI: *getMRI());
970 LLT NewValTy = NewVal.getLLTTy(MRI: *getMRI());
971 assert(OldValResTy.isScalar() && "invalid operand type");
972 assert(AddrTy.isPointer() && "invalid operand type");
973 assert(CmpValTy.isValid() && "invalid operand type");
974 assert(NewValTy.isValid() && "invalid operand type");
975 assert(OldValResTy == CmpValTy && "type mismatch");
976 assert(OldValResTy == NewValTy && "type mismatch");
977#endif
978
979 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG);
980 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
981 Addr.addSrcToMIB(MIB);
982 CmpVal.addSrcToMIB(MIB);
983 NewVal.addSrcToMIB(MIB);
984 MIB.addMemOperand(MMO: &MMO);
985 return MIB;
986}
987
988MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
989 unsigned Opcode, const DstOp &OldValRes,
990 const SrcOp &Addr, const SrcOp &Val,
991 MachineMemOperand &MMO) {
992
993#ifndef NDEBUG
994 LLT OldValResTy = OldValRes.getLLTTy(MRI: *getMRI());
995 LLT AddrTy = Addr.getLLTTy(MRI: *getMRI());
996 LLT ValTy = Val.getLLTTy(MRI: *getMRI());
997 assert(OldValResTy.isScalar() && "invalid operand type");
998 assert(AddrTy.isPointer() && "invalid operand type");
999 assert(ValTy.isValid() && "invalid operand type");
1000 assert(OldValResTy == ValTy && "type mismatch");
1001 assert(MMO.isAtomic() && "not atomic mem operand");
1002#endif
1003
1004 auto MIB = buildInstr(Opcode);
1005 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1006 Addr.addSrcToMIB(MIB);
1007 Val.addSrcToMIB(MIB);
1008 MIB.addMemOperand(MMO: &MMO);
1009 return MIB;
1010}
1011
1012MachineInstrBuilder
1013MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
1014 Register Val, MachineMemOperand &MMO) {
1015 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1016 MMO);
1017}
1018MachineInstrBuilder
1019MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
1020 Register Val, MachineMemOperand &MMO) {
1021 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1022 MMO);
1023}
1024MachineInstrBuilder
1025MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
1026 Register Val, MachineMemOperand &MMO) {
1027 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1028 MMO);
1029}
1030MachineInstrBuilder
1031MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
1032 Register Val, MachineMemOperand &MMO) {
1033 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1034 MMO);
1035}
1036MachineInstrBuilder
1037MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
1038 Register Val, MachineMemOperand &MMO) {
1039 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1040 MMO);
1041}
1042MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
1043 Register Addr,
1044 Register Val,
1045 MachineMemOperand &MMO) {
1046 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1047 MMO);
1048}
1049MachineInstrBuilder
1050MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
1051 Register Val, MachineMemOperand &MMO) {
1052 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1053 MMO);
1054}
1055MachineInstrBuilder
1056MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1057 Register Val, MachineMemOperand &MMO) {
1058 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1059 MMO);
1060}
1061MachineInstrBuilder
1062MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1063 Register Val, MachineMemOperand &MMO) {
1064 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1065 MMO);
1066}
1067MachineInstrBuilder
1068MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1069 Register Val, MachineMemOperand &MMO) {
1070 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1071 MMO);
1072}
1073MachineInstrBuilder
1074MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1075 Register Val, MachineMemOperand &MMO) {
1076 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1077 MMO);
1078}
1079
1080MachineInstrBuilder
1081MachineIRBuilder::buildAtomicRMWFAdd(
1082 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1083 MachineMemOperand &MMO) {
1084 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1085 MMO);
1086}
1087
1088MachineInstrBuilder
1089MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1090 MachineMemOperand &MMO) {
1091 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1092 MMO);
1093}
1094
1095MachineInstrBuilder
1096MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1097 const SrcOp &Val, MachineMemOperand &MMO) {
1098 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1099 MMO);
1100}
1101
1102MachineInstrBuilder
1103MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1104 const SrcOp &Val, MachineMemOperand &MMO) {
1105 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1106 MMO);
1107}
1108
1109MachineInstrBuilder
1110MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1111 return buildInstr(Opcode: TargetOpcode::G_FENCE)
1112 .addImm(Val: Ordering)
1113 .addImm(Val: Scope);
1114}
1115
1116MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1117 unsigned RW,
1118 unsigned Locality,
1119 unsigned CacheType,
1120 MachineMemOperand &MMO) {
1121 auto MIB = buildInstr(Opcode: TargetOpcode::G_PREFETCH);
1122 Addr.addSrcToMIB(MIB);
1123 MIB.addImm(Val: RW).addImm(Val: Locality).addImm(Val: CacheType);
1124 MIB.addMemOperand(MMO: &MMO);
1125 return MIB;
1126}
1127
1128MachineInstrBuilder
1129MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1130#ifndef NDEBUG
1131 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1132#endif
1133
1134 return buildInstr(Opcode: TargetOpcode::G_BLOCK_ADDR).addDef(RegNo: Res).addBlockAddress(BA);
1135}
1136
1137void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1138 bool IsExtend) {
1139#ifndef NDEBUG
1140 if (DstTy.isVector()) {
1141 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1142 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1143 "different number of elements in a trunc/ext");
1144 } else
1145 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1146
1147 if (IsExtend)
1148 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1149 "invalid narrowing extend");
1150 else
1151 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1152 "invalid widening trunc");
1153#endif
1154}
1155
1156void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1157 const LLT Op0Ty, const LLT Op1Ty) {
1158#ifndef NDEBUG
1159 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1160 "invalid operand type");
1161 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1162 if (ResTy.isScalar() || ResTy.isPointer())
1163 assert(TstTy.isScalar() && "type mismatch");
1164 else
1165 assert((TstTy.isScalar() ||
1166 (TstTy.isVector() &&
1167 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1168 "type mismatch");
1169#endif
1170}
1171
1172MachineInstrBuilder
1173MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1174 ArrayRef<SrcOp> SrcOps,
1175 std::optional<unsigned> Flags) {
1176 switch (Opc) {
1177 default:
1178 break;
1179 case TargetOpcode::G_SELECT: {
1180 assert(DstOps.size() == 1 && "Invalid select");
1181 assert(SrcOps.size() == 3 && "Invalid select");
1182 validateSelectOp(
1183 ResTy: DstOps[0].getLLTTy(MRI: *getMRI()), TstTy: SrcOps[0].getLLTTy(MRI: *getMRI()),
1184 Op0Ty: SrcOps[1].getLLTTy(MRI: *getMRI()), Op1Ty: SrcOps[2].getLLTTy(MRI: *getMRI()));
1185 break;
1186 }
1187 case TargetOpcode::G_FNEG:
1188 case TargetOpcode::G_ABS:
1189 // All these are unary ops.
1190 assert(DstOps.size() == 1 && "Invalid Dst");
1191 assert(SrcOps.size() == 1 && "Invalid Srcs");
1192 validateUnaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1193 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()));
1194 break;
1195 case TargetOpcode::G_ADD:
1196 case TargetOpcode::G_AND:
1197 case TargetOpcode::G_MUL:
1198 case TargetOpcode::G_OR:
1199 case TargetOpcode::G_SUB:
1200 case TargetOpcode::G_XOR:
1201 case TargetOpcode::G_UDIV:
1202 case TargetOpcode::G_SDIV:
1203 case TargetOpcode::G_UREM:
1204 case TargetOpcode::G_SREM:
1205 case TargetOpcode::G_SMIN:
1206 case TargetOpcode::G_SMAX:
1207 case TargetOpcode::G_UMIN:
1208 case TargetOpcode::G_UMAX:
1209 case TargetOpcode::G_UADDSAT:
1210 case TargetOpcode::G_SADDSAT:
1211 case TargetOpcode::G_USUBSAT:
1212 case TargetOpcode::G_SSUBSAT: {
1213 // All these are binary ops.
1214 assert(DstOps.size() == 1 && "Invalid Dst");
1215 assert(SrcOps.size() == 2 && "Invalid Srcs");
1216 validateBinaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1217 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1218 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1219 break;
1220 }
1221 case TargetOpcode::G_SHL:
1222 case TargetOpcode::G_ASHR:
1223 case TargetOpcode::G_LSHR:
1224 case TargetOpcode::G_USHLSAT:
1225 case TargetOpcode::G_SSHLSAT: {
1226 assert(DstOps.size() == 1 && "Invalid Dst");
1227 assert(SrcOps.size() == 2 && "Invalid Srcs");
1228 validateShiftOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1229 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1230 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1231 break;
1232 }
1233 case TargetOpcode::G_SEXT:
1234 case TargetOpcode::G_ZEXT:
1235 case TargetOpcode::G_ANYEXT:
1236 assert(DstOps.size() == 1 && "Invalid Dst");
1237 assert(SrcOps.size() == 1 && "Invalid Srcs");
1238 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1239 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: true);
1240 break;
1241 case TargetOpcode::G_TRUNC:
1242 case TargetOpcode::G_FPTRUNC: {
1243 assert(DstOps.size() == 1 && "Invalid Dst");
1244 assert(SrcOps.size() == 1 && "Invalid Srcs");
1245 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1246 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: false);
1247 break;
1248 }
1249 case TargetOpcode::G_BITCAST: {
1250 assert(DstOps.size() == 1 && "Invalid Dst");
1251 assert(SrcOps.size() == 1 && "Invalid Srcs");
1252 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1253 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1254 break;
1255 }
1256 case TargetOpcode::COPY:
1257 assert(DstOps.size() == 1 && "Invalid Dst");
1258 // If the caller wants to add a subreg source it has to be done separately
1259 // so we may not have any SrcOps at this point yet.
1260 break;
1261 case TargetOpcode::G_FCMP:
1262 case TargetOpcode::G_ICMP: {
1263 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1264 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1265 // For F/ICMP, the first src operand is the predicate, followed by
1266 // the two comparands.
1267 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1268 "Expecting predicate");
1269 assert([&]() -> bool {
1270 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1271 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1272 : CmpInst::isFPPredicate(Pred);
1273 }() && "Invalid predicate");
1274 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1275 "Type mismatch");
1276 assert([&]() -> bool {
1277 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1278 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1279 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1280 return DstTy.isScalar();
1281 else
1282 return DstTy.isVector() &&
1283 DstTy.getElementCount() == Op0Ty.getElementCount();
1284 }() && "Type Mismatch");
1285 break;
1286 }
1287 case TargetOpcode::G_UNMERGE_VALUES: {
1288 assert(!DstOps.empty() && "Invalid trivial sequence");
1289 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1290 assert(llvm::all_of(DstOps,
1291 [&, this](const DstOp &Op) {
1292 return Op.getLLTTy(*getMRI()) ==
1293 DstOps[0].getLLTTy(*getMRI());
1294 }) &&
1295 "type mismatch in output list");
1296 assert((TypeSize::ScalarTy)DstOps.size() *
1297 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1298 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1299 "input operands do not cover output register");
1300 break;
1301 }
1302 case TargetOpcode::G_MERGE_VALUES: {
1303 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1304 assert(DstOps.size() == 1 && "Invalid Dst");
1305 assert(llvm::all_of(SrcOps,
1306 [&, this](const SrcOp &Op) {
1307 return Op.getLLTTy(*getMRI()) ==
1308 SrcOps[0].getLLTTy(*getMRI());
1309 }) &&
1310 "type mismatch in input list");
1311 assert((TypeSize::ScalarTy)SrcOps.size() *
1312 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1313 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1314 "input operands do not cover output register");
1315 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1316 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1317 break;
1318 }
1319 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1320 assert(DstOps.size() == 1 && "Invalid Dst size");
1321 assert(SrcOps.size() == 2 && "Invalid Src size");
1322 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1323 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1324 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1325 "Invalid operand type");
1326 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1327 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1328 DstOps[0].getLLTTy(*getMRI()) &&
1329 "Type mismatch");
1330 break;
1331 }
1332 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1333 assert(DstOps.size() == 1 && "Invalid dst size");
1334 assert(SrcOps.size() == 3 && "Invalid src size");
1335 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1336 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1337 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1338 SrcOps[1].getLLTTy(*getMRI()) &&
1339 "Type mismatch");
1340 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1341 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1342 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1343 "Type mismatch");
1344 break;
1345 }
1346 case TargetOpcode::G_BUILD_VECTOR: {
1347 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1348 "Must have at least 2 operands");
1349 assert(DstOps.size() == 1 && "Invalid DstOps");
1350 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1351 "Res type must be a vector");
1352 assert(llvm::all_of(SrcOps,
1353 [&, this](const SrcOp &Op) {
1354 return Op.getLLTTy(*getMRI()) ==
1355 SrcOps[0].getLLTTy(*getMRI());
1356 }) &&
1357 "type mismatch in input list");
1358 assert((TypeSize::ScalarTy)SrcOps.size() *
1359 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1360 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1361 "input scalars do not exactly cover the output vector register");
1362 break;
1363 }
1364 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1365 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1366 "Must have at least 2 operands");
1367 assert(DstOps.size() == 1 && "Invalid DstOps");
1368 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1369 "Res type must be a vector");
1370 assert(llvm::all_of(SrcOps,
1371 [&, this](const SrcOp &Op) {
1372 return Op.getLLTTy(*getMRI()) ==
1373 SrcOps[0].getLLTTy(*getMRI());
1374 }) &&
1375 "type mismatch in input list");
1376 break;
1377 }
1378 case TargetOpcode::G_CONCAT_VECTORS: {
1379 assert(DstOps.size() == 1 && "Invalid DstOps");
1380 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1381 "Must have at least 2 operands");
1382 assert(llvm::all_of(SrcOps,
1383 [&, this](const SrcOp &Op) {
1384 return (Op.getLLTTy(*getMRI()).isVector() &&
1385 Op.getLLTTy(*getMRI()) ==
1386 SrcOps[0].getLLTTy(*getMRI()));
1387 }) &&
1388 "type mismatch in input list");
1389 assert((TypeSize::ScalarTy)SrcOps.size() *
1390 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1391 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1392 "input vectors do not exactly cover the output vector register");
1393 break;
1394 }
1395 case TargetOpcode::G_UADDE: {
1396 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1397 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1398 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1399 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1400 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1401 "Invalid operand");
1402 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1403 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1404 "type mismatch");
1405 break;
1406 }
1407 }
1408
1409 auto MIB = buildInstr(Opcode: Opc);
1410 for (const DstOp &Op : DstOps)
1411 Op.addDefToMIB(MRI&: *getMRI(), MIB);
1412 for (const SrcOp &Op : SrcOps)
1413 Op.addSrcToMIB(MIB);
1414 if (Flags)
1415 MIB->setFlags(*Flags);
1416 return MIB;
1417}
1418

source code of llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp