1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
26#include "llvm/ADT/DepthFirstIterator.h"
27#include "llvm/ADT/PostOrderIterator.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SetOperations.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
34#include "llvm/CodeGen/CodeGenCommonISel.h"
35#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
36#include "llvm/CodeGen/LiveInterval.h"
37#include "llvm/CodeGen/LiveIntervals.h"
38#include "llvm/CodeGen/LiveRangeCalc.h"
39#include "llvm/CodeGen/LiveStacks.h"
40#include "llvm/CodeGen/LiveVariables.h"
41#include "llvm/CodeGen/MachineBasicBlock.h"
42#include "llvm/CodeGen/MachineConvergenceVerifier.h"
43#include "llvm/CodeGen/MachineDominators.h"
44#include "llvm/CodeGen/MachineFrameInfo.h"
45#include "llvm/CodeGen/MachineFunction.h"
46#include "llvm/CodeGen/MachineFunctionPass.h"
47#include "llvm/CodeGen/MachineInstr.h"
48#include "llvm/CodeGen/MachineInstrBundle.h"
49#include "llvm/CodeGen/MachineMemOperand.h"
50#include "llvm/CodeGen/MachineOperand.h"
51#include "llvm/CodeGen/MachineRegisterInfo.h"
52#include "llvm/CodeGen/PseudoSourceValue.h"
53#include "llvm/CodeGen/RegisterBank.h"
54#include "llvm/CodeGen/RegisterBankInfo.h"
55#include "llvm/CodeGen/SlotIndexes.h"
56#include "llvm/CodeGen/StackMaps.h"
57#include "llvm/CodeGen/TargetInstrInfo.h"
58#include "llvm/CodeGen/TargetLowering.h"
59#include "llvm/CodeGen/TargetOpcodes.h"
60#include "llvm/CodeGen/TargetRegisterInfo.h"
61#include "llvm/CodeGen/TargetSubtargetInfo.h"
62#include "llvm/CodeGenTypes/LowLevelType.h"
63#include "llvm/IR/BasicBlock.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/EHPersonalities.h"
66#include "llvm/IR/Function.h"
67#include "llvm/IR/InlineAsm.h"
68#include "llvm/IR/Instructions.h"
69#include "llvm/InitializePasses.h"
70#include "llvm/MC/LaneBitmask.h"
71#include "llvm/MC/MCAsmInfo.h"
72#include "llvm/MC/MCDwarf.h"
73#include "llvm/MC/MCInstrDesc.h"
74#include "llvm/MC/MCRegisterInfo.h"
75#include "llvm/MC/MCTargetOptions.h"
76#include "llvm/Pass.h"
77#include "llvm/Support/Casting.h"
78#include "llvm/Support/ErrorHandling.h"
79#include "llvm/Support/MathExtras.h"
80#include "llvm/Support/ModRef.h"
81#include "llvm/Support/raw_ostream.h"
82#include "llvm/Target/TargetMachine.h"
83#include <algorithm>
84#include <cassert>
85#include <cstddef>
86#include <cstdint>
87#include <iterator>
88#include <string>
89#include <utility>
90
91using namespace llvm;
92
93namespace {
94
95 struct MachineVerifier {
96 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
97
98 MachineVerifier(const char *b, LiveVariables *LiveVars,
99 LiveIntervals *LiveInts, LiveStacks *LiveStks,
100 SlotIndexes *Indexes)
101 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
102 Indexes(Indexes) {}
103
104 unsigned verify(const MachineFunction &MF);
105
106 Pass *const PASS = nullptr;
107 const char *Banner;
108 const MachineFunction *MF = nullptr;
109 const TargetMachine *TM = nullptr;
110 const TargetInstrInfo *TII = nullptr;
111 const TargetRegisterInfo *TRI = nullptr;
112 const MachineRegisterInfo *MRI = nullptr;
113 const RegisterBankInfo *RBI = nullptr;
114
115 unsigned foundErrors = 0;
116
117 // Avoid querying the MachineFunctionProperties for each operand.
118 bool isFunctionRegBankSelected = false;
119 bool isFunctionSelected = false;
120 bool isFunctionTracksDebugUserValues = false;
121
122 using RegVector = SmallVector<Register, 16>;
123 using RegMaskVector = SmallVector<const uint32_t *, 4>;
124 using RegSet = DenseSet<Register>;
125 using RegMap = DenseMap<Register, const MachineInstr *>;
126 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
127
128 const MachineInstr *FirstNonPHI = nullptr;
129 const MachineInstr *FirstTerminator = nullptr;
130 BlockSet FunctionBlocks;
131
132 BitVector regsReserved;
133 RegSet regsLive;
134 RegVector regsDefined, regsDead, regsKilled;
135 RegMaskVector regMasks;
136
137 SlotIndex lastIndex;
138
139 // Add Reg and any sub-registers to RV
140 void addRegWithSubRegs(RegVector &RV, Register Reg) {
141 RV.push_back(Elt: Reg);
142 if (Reg.isPhysical())
143 append_range(C&: RV, R: TRI->subregs(Reg: Reg.asMCReg()));
144 }
145
146 struct BBInfo {
147 // Is this MBB reachable from the MF entry point?
148 bool reachable = false;
149
150 // Vregs that must be live in because they are used without being
151 // defined. Map value is the user. vregsLiveIn doesn't include regs
152 // that only are used by PHI nodes.
153 RegMap vregsLiveIn;
154
155 // Regs killed in MBB. They may be defined again, and will then be in both
156 // regsKilled and regsLiveOut.
157 RegSet regsKilled;
158
159 // Regs defined in MBB and live out. Note that vregs passing through may
160 // be live out without being mentioned here.
161 RegSet regsLiveOut;
162
163 // Vregs that pass through MBB untouched. This set is disjoint from
164 // regsKilled and regsLiveOut.
165 RegSet vregsPassed;
166
167 // Vregs that must pass through MBB because they are needed by a successor
168 // block. This set is disjoint from regsLiveOut.
169 RegSet vregsRequired;
170
171 // Set versions of block's predecessor and successor lists.
172 BlockSet Preds, Succs;
173
174 BBInfo() = default;
175
176 // Add register to vregsRequired if it belongs there. Return true if
177 // anything changed.
178 bool addRequired(Register Reg) {
179 if (!Reg.isVirtual())
180 return false;
181 if (regsLiveOut.count(V: Reg))
182 return false;
183 return vregsRequired.insert(V: Reg).second;
184 }
185
186 // Same for a full set.
187 bool addRequired(const RegSet &RS) {
188 bool Changed = false;
189 for (Register Reg : RS)
190 Changed |= addRequired(Reg);
191 return Changed;
192 }
193
194 // Same for a full map.
195 bool addRequired(const RegMap &RM) {
196 bool Changed = false;
197 for (const auto &I : RM)
198 Changed |= addRequired(Reg: I.first);
199 return Changed;
200 }
201
202 // Live-out registers are either in regsLiveOut or vregsPassed.
203 bool isLiveOut(Register Reg) const {
204 return regsLiveOut.count(V: Reg) || vregsPassed.count(V: Reg);
205 }
206 };
207
208 // Extra register info per MBB.
209 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
210
211 bool isReserved(Register Reg) {
212 return Reg.id() < regsReserved.size() && regsReserved.test(Idx: Reg.id());
213 }
214
215 bool isAllocatable(Register Reg) const {
216 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(RegNo: Reg) &&
217 !regsReserved.test(Idx: Reg.id());
218 }
219
220 // Analysis information if available
221 LiveVariables *LiveVars = nullptr;
222 LiveIntervals *LiveInts = nullptr;
223 LiveStacks *LiveStks = nullptr;
224 SlotIndexes *Indexes = nullptr;
225
226 // This is calculated only when trying to verify convergence control tokens.
227 // Similar to the LLVM IR verifier, we calculate this locally instead of
228 // relying on the pass manager.
229 MachineDomTree DT;
230
231 void visitMachineFunctionBefore();
232 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
233 void visitMachineBundleBefore(const MachineInstr *MI);
234
235 /// Verify that all of \p MI's virtual register operands are scalars.
236 /// \returns True if all virtual register operands are scalar. False
237 /// otherwise.
238 bool verifyAllRegOpsScalar(const MachineInstr &MI,
239 const MachineRegisterInfo &MRI);
240 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
241
242 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
243 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
244 void verifyPreISelGenericInstruction(const MachineInstr *MI);
245
246 void visitMachineInstrBefore(const MachineInstr *MI);
247 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
248 void visitMachineBundleAfter(const MachineInstr *MI);
249 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
250 void visitMachineFunctionAfter();
251
252 void report(const char *msg, const MachineFunction *MF);
253 void report(const char *msg, const MachineBasicBlock *MBB);
254 void report(const char *msg, const MachineInstr *MI);
255 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
256 LLT MOVRegType = LLT{});
257 void report(const Twine &Msg, const MachineInstr *MI);
258
259 void report_context(const LiveInterval &LI) const;
260 void report_context(const LiveRange &LR, Register VRegUnit,
261 LaneBitmask LaneMask) const;
262 void report_context(const LiveRange::Segment &S) const;
263 void report_context(const VNInfo &VNI) const;
264 void report_context(SlotIndex Pos) const;
265 void report_context(MCPhysReg PhysReg) const;
266 void report_context_liverange(const LiveRange &LR) const;
267 void report_context_lanemask(LaneBitmask LaneMask) const;
268 void report_context_vreg(Register VReg) const;
269 void report_context_vreg_regunit(Register VRegOrUnit) const;
270
271 void verifyInlineAsm(const MachineInstr *MI);
272
273 void checkLiveness(const MachineOperand *MO, unsigned MONum);
274 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
275 SlotIndex UseIdx, const LiveRange &LR,
276 Register VRegOrUnit,
277 LaneBitmask LaneMask = LaneBitmask::getNone());
278 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
279 SlotIndex DefIdx, const LiveRange &LR,
280 Register VRegOrUnit, bool SubRangeCheck = false,
281 LaneBitmask LaneMask = LaneBitmask::getNone());
282
283 void markReachable(const MachineBasicBlock *MBB);
284 void calcRegsPassed();
285 void checkPHIOps(const MachineBasicBlock &MBB);
286
287 void calcRegsRequired();
288 void verifyLiveVariables();
289 void verifyLiveIntervals();
290 void verifyLiveInterval(const LiveInterval&);
291 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
292 LaneBitmask);
293 void verifyLiveRangeSegment(const LiveRange &,
294 const LiveRange::const_iterator I, Register,
295 LaneBitmask);
296 void verifyLiveRange(const LiveRange &, Register,
297 LaneBitmask LaneMask = LaneBitmask::getNone());
298
299 void verifyStackFrame();
300
301 void verifySlotIndexes() const;
302 void verifyProperties(const MachineFunction &MF);
303 };
304
305 struct MachineVerifierPass : public MachineFunctionPass {
306 static char ID; // Pass ID, replacement for typeid
307
308 const std::string Banner;
309
310 MachineVerifierPass(std::string banner = std::string())
311 : MachineFunctionPass(ID), Banner(std::move(banner)) {
312 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
313 }
314
315 void getAnalysisUsage(AnalysisUsage &AU) const override {
316 AU.addUsedIfAvailable<LiveStacks>();
317 AU.addUsedIfAvailable<LiveVariables>();
318 AU.addUsedIfAvailable<SlotIndexes>();
319 AU.addUsedIfAvailable<LiveIntervals>();
320 AU.setPreservesAll();
321 MachineFunctionPass::getAnalysisUsage(AU);
322 }
323
324 bool runOnMachineFunction(MachineFunction &MF) override {
325 // Skip functions that have known verification problems.
326 // FIXME: Remove this mechanism when all problematic passes have been
327 // fixed.
328 if (MF.getProperties().hasProperty(
329 P: MachineFunctionProperties::Property::FailsVerification))
330 return false;
331
332 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
333 if (FoundErrors)
334 report_fatal_error(reason: "Found "+Twine(FoundErrors)+" machine code errors.");
335 return false;
336 }
337 };
338
339} // end anonymous namespace
340
341char MachineVerifierPass::ID = 0;
342
343INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
344 "Verify generated machine code", false, false)
345
346FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
347 return new MachineVerifierPass(Banner);
348}
349
350void llvm::verifyMachineFunction(const std::string &Banner,
351 const MachineFunction &MF) {
352 // TODO: Use MFAM after porting below analyses.
353 // LiveVariables *LiveVars;
354 // LiveIntervals *LiveInts;
355 // LiveStacks *LiveStks;
356 // SlotIndexes *Indexes;
357 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
358 if (FoundErrors)
359 report_fatal_error(reason: "Found " + Twine(FoundErrors) + " machine code errors.");
360}
361
362bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
363 const {
364 MachineFunction &MF = const_cast<MachineFunction&>(*this);
365 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
366 if (AbortOnErrors && FoundErrors)
367 report_fatal_error(reason: "Found "+Twine(FoundErrors)+" machine code errors.");
368 return FoundErrors == 0;
369}
370
371bool MachineFunction::verify(LiveIntervals *LiveInts, SlotIndexes *Indexes,
372 const char *Banner, bool AbortOnErrors) const {
373 MachineFunction &MF = const_cast<MachineFunction &>(*this);
374 unsigned FoundErrors =
375 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
376 if (AbortOnErrors && FoundErrors)
377 report_fatal_error(reason: "Found " + Twine(FoundErrors) + " machine code errors.");
378 return FoundErrors == 0;
379}
380
381void MachineVerifier::verifySlotIndexes() const {
382 if (Indexes == nullptr)
383 return;
384
385 // Ensure the IdxMBB list is sorted by slot indexes.
386 SlotIndex Last;
387 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
388 E = Indexes->MBBIndexEnd(); I != E; ++I) {
389 assert(!Last.isValid() || I->first > Last);
390 Last = I->first;
391 }
392}
393
394void MachineVerifier::verifyProperties(const MachineFunction &MF) {
395 // If a pass has introduced virtual registers without clearing the
396 // NoVRegs property (or set it without allocating the vregs)
397 // then report an error.
398 if (MF.getProperties().hasProperty(
399 P: MachineFunctionProperties::Property::NoVRegs) &&
400 MRI->getNumVirtRegs())
401 report(msg: "Function has NoVRegs property but there are VReg operands", MF: &MF);
402}
403
404unsigned MachineVerifier::verify(const MachineFunction &MF) {
405 foundErrors = 0;
406
407 this->MF = &MF;
408 TM = &MF.getTarget();
409 TII = MF.getSubtarget().getInstrInfo();
410 TRI = MF.getSubtarget().getRegisterInfo();
411 RBI = MF.getSubtarget().getRegBankInfo();
412 MRI = &MF.getRegInfo();
413
414 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
415 P: MachineFunctionProperties::Property::FailedISel);
416
417 // If we're mid-GlobalISel and we already triggered the fallback path then
418 // it's expected that the MIR is somewhat broken but that's ok since we'll
419 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
420 if (isFunctionFailedISel)
421 return foundErrors;
422
423 isFunctionRegBankSelected = MF.getProperties().hasProperty(
424 P: MachineFunctionProperties::Property::RegBankSelected);
425 isFunctionSelected = MF.getProperties().hasProperty(
426 P: MachineFunctionProperties::Property::Selected);
427 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
428 P: MachineFunctionProperties::Property::TracksDebugUserValues);
429
430 if (PASS) {
431 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
432 // We don't want to verify LiveVariables if LiveIntervals is available.
433 if (!LiveInts)
434 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
435 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
436 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
437 }
438
439 verifySlotIndexes();
440
441 verifyProperties(MF);
442
443 visitMachineFunctionBefore();
444 for (const MachineBasicBlock &MBB : MF) {
445 visitMachineBasicBlockBefore(MBB: &MBB);
446 // Keep track of the current bundle header.
447 const MachineInstr *CurBundle = nullptr;
448 // Do we expect the next instruction to be part of the same bundle?
449 bool InBundle = false;
450
451 for (const MachineInstr &MI : MBB.instrs()) {
452 if (MI.getParent() != &MBB) {
453 report(msg: "Bad instruction parent pointer", MBB: &MBB);
454 errs() << "Instruction: " << MI;
455 continue;
456 }
457
458 // Check for consistent bundle flags.
459 if (InBundle && !MI.isBundledWithPred())
460 report(msg: "Missing BundledPred flag, "
461 "BundledSucc was set on predecessor",
462 MI: &MI);
463 if (!InBundle && MI.isBundledWithPred())
464 report(msg: "BundledPred flag is set, "
465 "but BundledSucc not set on predecessor",
466 MI: &MI);
467
468 // Is this a bundle header?
469 if (!MI.isInsideBundle()) {
470 if (CurBundle)
471 visitMachineBundleAfter(MI: CurBundle);
472 CurBundle = &MI;
473 visitMachineBundleBefore(MI: CurBundle);
474 } else if (!CurBundle)
475 report(msg: "No bundle header", MI: &MI);
476 visitMachineInstrBefore(MI: &MI);
477 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
478 const MachineOperand &Op = MI.getOperand(i: I);
479 if (Op.getParent() != &MI) {
480 // Make sure to use correct addOperand / removeOperand / ChangeTo
481 // functions when replacing operands of a MachineInstr.
482 report(msg: "Instruction has operand with wrong parent set", MI: &MI);
483 }
484
485 visitMachineOperand(MO: &Op, MONum: I);
486 }
487
488 // Was this the last bundled instruction?
489 InBundle = MI.isBundledWithSucc();
490 }
491 if (CurBundle)
492 visitMachineBundleAfter(MI: CurBundle);
493 if (InBundle)
494 report(msg: "BundledSucc flag set on last instruction in block", MI: &MBB.back());
495 visitMachineBasicBlockAfter(MBB: &MBB);
496 }
497 visitMachineFunctionAfter();
498
499 // Clean up.
500 regsLive.clear();
501 regsDefined.clear();
502 regsDead.clear();
503 regsKilled.clear();
504 regMasks.clear();
505 MBBInfoMap.clear();
506
507 return foundErrors;
508}
509
510void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
511 assert(MF);
512 errs() << '\n';
513 if (!foundErrors++) {
514 if (Banner)
515 errs() << "# " << Banner << '\n';
516 if (LiveInts != nullptr)
517 LiveInts->print(O&: errs());
518 else
519 MF->print(OS&: errs(), Indexes);
520 }
521 errs() << "*** Bad machine code: " << msg << " ***\n"
522 << "- function: " << MF->getName() << "\n";
523}
524
525void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
526 assert(MBB);
527 report(msg, MF: MBB->getParent());
528 errs() << "- basic block: " << printMBBReference(MBB: *MBB) << ' '
529 << MBB->getName() << " (" << (const void *)MBB << ')';
530 if (Indexes)
531 errs() << " [" << Indexes->getMBBStartIdx(mbb: MBB)
532 << ';' << Indexes->getMBBEndIdx(mbb: MBB) << ')';
533 errs() << '\n';
534}
535
536void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
537 assert(MI);
538 report(msg, MBB: MI->getParent());
539 errs() << "- instruction: ";
540 if (Indexes && Indexes->hasIndex(instr: *MI))
541 errs() << Indexes->getInstructionIndex(MI: *MI) << '\t';
542 MI->print(OS&: errs(), /*IsStandalone=*/true);
543}
544
545void MachineVerifier::report(const char *msg, const MachineOperand *MO,
546 unsigned MONum, LLT MOVRegType) {
547 assert(MO);
548 report(msg, MI: MO->getParent());
549 errs() << "- operand " << MONum << ": ";
550 MO->print(os&: errs(), TypeToPrint: MOVRegType, TRI);
551 errs() << "\n";
552}
553
554void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
555 report(msg: Msg.str().c_str(), MI);
556}
557
558void MachineVerifier::report_context(SlotIndex Pos) const {
559 errs() << "- at: " << Pos << '\n';
560}
561
562void MachineVerifier::report_context(const LiveInterval &LI) const {
563 errs() << "- interval: " << LI << '\n';
564}
565
566void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
567 LaneBitmask LaneMask) const {
568 report_context_liverange(LR);
569 report_context_vreg_regunit(VRegOrUnit: VRegUnit);
570 if (LaneMask.any())
571 report_context_lanemask(LaneMask);
572}
573
574void MachineVerifier::report_context(const LiveRange::Segment &S) const {
575 errs() << "- segment: " << S << '\n';
576}
577
578void MachineVerifier::report_context(const VNInfo &VNI) const {
579 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
580}
581
582void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
583 errs() << "- liverange: " << LR << '\n';
584}
585
586void MachineVerifier::report_context(MCPhysReg PReg) const {
587 errs() << "- p. register: " << printReg(Reg: PReg, TRI) << '\n';
588}
589
590void MachineVerifier::report_context_vreg(Register VReg) const {
591 errs() << "- v. register: " << printReg(Reg: VReg, TRI) << '\n';
592}
593
594void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
595 if (VRegOrUnit.isVirtual()) {
596 report_context_vreg(VReg: VRegOrUnit);
597 } else {
598 errs() << "- regunit: " << printRegUnit(Unit: VRegOrUnit, TRI) << '\n';
599 }
600}
601
602void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
603 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
604}
605
606void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
607 BBInfo &MInfo = MBBInfoMap[MBB];
608 if (!MInfo.reachable) {
609 MInfo.reachable = true;
610 for (const MachineBasicBlock *Succ : MBB->successors())
611 markReachable(MBB: Succ);
612 }
613}
614
615void MachineVerifier::visitMachineFunctionBefore() {
616 lastIndex = SlotIndex();
617 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
618 : TRI->getReservedRegs(MF: *MF);
619
620 if (!MF->empty())
621 markReachable(MBB: &MF->front());
622
623 // Build a set of the basic blocks in the function.
624 FunctionBlocks.clear();
625 for (const auto &MBB : *MF) {
626 FunctionBlocks.insert(Ptr: &MBB);
627 BBInfo &MInfo = MBBInfoMap[&MBB];
628
629 MInfo.Preds.insert(I: MBB.pred_begin(), E: MBB.pred_end());
630 if (MInfo.Preds.size() != MBB.pred_size())
631 report(msg: "MBB has duplicate entries in its predecessor list.", MBB: &MBB);
632
633 MInfo.Succs.insert(I: MBB.succ_begin(), E: MBB.succ_end());
634 if (MInfo.Succs.size() != MBB.succ_size())
635 report(msg: "MBB has duplicate entries in its successor list.", MBB: &MBB);
636 }
637
638 // Check that the register use lists are sane.
639 MRI->verifyUseLists();
640
641 if (!MF->empty())
642 verifyStackFrame();
643}
644
645void
646MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
647 FirstTerminator = nullptr;
648 FirstNonPHI = nullptr;
649
650 if (!MF->getProperties().hasProperty(
651 P: MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
652 // If this block has allocatable physical registers live-in, check that
653 // it is an entry block or landing pad.
654 for (const auto &LI : MBB->liveins()) {
655 if (isAllocatable(Reg: LI.PhysReg) && !MBB->isEHPad() &&
656 MBB->getIterator() != MBB->getParent()->begin() &&
657 !MBB->isInlineAsmBrIndirectTarget()) {
658 report(msg: "MBB has allocatable live-in, but isn't entry, landing-pad, or "
659 "inlineasm-br-indirect-target.",
660 MBB);
661 report_context(PReg: LI.PhysReg);
662 }
663 }
664 }
665
666 if (MBB->isIRBlockAddressTaken()) {
667 if (!MBB->getAddressTakenIRBlock()->hasAddressTaken())
668 report(msg: "ir-block-address-taken is associated with basic block not used by "
669 "a blockaddress.",
670 MBB);
671 }
672
673 // Count the number of landing pad successors.
674 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
675 for (const auto *succ : MBB->successors()) {
676 if (succ->isEHPad())
677 LandingPadSuccs.insert(Ptr: succ);
678 if (!FunctionBlocks.count(Ptr: succ))
679 report(msg: "MBB has successor that isn't part of the function.", MBB);
680 if (!MBBInfoMap[succ].Preds.count(Ptr: MBB)) {
681 report(msg: "Inconsistent CFG", MBB);
682 errs() << "MBB is not in the predecessor list of the successor "
683 << printMBBReference(MBB: *succ) << ".\n";
684 }
685 }
686
687 // Check the predecessor list.
688 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
689 if (!FunctionBlocks.count(Ptr: Pred))
690 report(msg: "MBB has predecessor that isn't part of the function.", MBB);
691 if (!MBBInfoMap[Pred].Succs.count(Ptr: MBB)) {
692 report(msg: "Inconsistent CFG", MBB);
693 errs() << "MBB is not in the successor list of the predecessor "
694 << printMBBReference(MBB: *Pred) << ".\n";
695 }
696 }
697
698 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
699 const BasicBlock *BB = MBB->getBasicBlock();
700 const Function &F = MF->getFunction();
701 if (LandingPadSuccs.size() > 1 &&
702 !(AsmInfo &&
703 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
704 BB && isa<SwitchInst>(Val: BB->getTerminator())) &&
705 !isScopedEHPersonality(Pers: classifyEHPersonality(Pers: F.getPersonalityFn())))
706 report(msg: "MBB has more than one landing pad successor", MBB);
707
708 // Call analyzeBranch. If it succeeds, there several more conditions to check.
709 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
710 SmallVector<MachineOperand, 4> Cond;
711 if (!TII->analyzeBranch(MBB&: *const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
712 Cond)) {
713 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
714 // check whether its answers match up with reality.
715 if (!TBB && !FBB) {
716 // Block falls through to its successor.
717 if (!MBB->empty() && MBB->back().isBarrier() &&
718 !TII->isPredicated(MI: MBB->back())) {
719 report(msg: "MBB exits via unconditional fall-through but ends with a "
720 "barrier instruction!", MBB);
721 }
722 if (!Cond.empty()) {
723 report(msg: "MBB exits via unconditional fall-through but has a condition!",
724 MBB);
725 }
726 } else if (TBB && !FBB && Cond.empty()) {
727 // Block unconditionally branches somewhere.
728 if (MBB->empty()) {
729 report(msg: "MBB exits via unconditional branch but doesn't contain "
730 "any instructions!", MBB);
731 } else if (!MBB->back().isBarrier()) {
732 report(msg: "MBB exits via unconditional branch but doesn't end with a "
733 "barrier instruction!", MBB);
734 } else if (!MBB->back().isTerminator()) {
735 report(msg: "MBB exits via unconditional branch but the branch isn't a "
736 "terminator instruction!", MBB);
737 }
738 } else if (TBB && !FBB && !Cond.empty()) {
739 // Block conditionally branches somewhere, otherwise falls through.
740 if (MBB->empty()) {
741 report(msg: "MBB exits via conditional branch/fall-through but doesn't "
742 "contain any instructions!", MBB);
743 } else if (MBB->back().isBarrier()) {
744 report(msg: "MBB exits via conditional branch/fall-through but ends with a "
745 "barrier instruction!", MBB);
746 } else if (!MBB->back().isTerminator()) {
747 report(msg: "MBB exits via conditional branch/fall-through but the branch "
748 "isn't a terminator instruction!", MBB);
749 }
750 } else if (TBB && FBB) {
751 // Block conditionally branches somewhere, otherwise branches
752 // somewhere else.
753 if (MBB->empty()) {
754 report(msg: "MBB exits via conditional branch/branch but doesn't "
755 "contain any instructions!", MBB);
756 } else if (!MBB->back().isBarrier()) {
757 report(msg: "MBB exits via conditional branch/branch but doesn't end with a "
758 "barrier instruction!", MBB);
759 } else if (!MBB->back().isTerminator()) {
760 report(msg: "MBB exits via conditional branch/branch but the branch "
761 "isn't a terminator instruction!", MBB);
762 }
763 if (Cond.empty()) {
764 report(msg: "MBB exits via conditional branch/branch but there's no "
765 "condition!", MBB);
766 }
767 } else {
768 report(msg: "analyzeBranch returned invalid data!", MBB);
769 }
770
771 // Now check that the successors match up with the answers reported by
772 // analyzeBranch.
773 if (TBB && !MBB->isSuccessor(MBB: TBB))
774 report(msg: "MBB exits via jump or conditional branch, but its target isn't a "
775 "CFG successor!",
776 MBB);
777 if (FBB && !MBB->isSuccessor(MBB: FBB))
778 report(msg: "MBB exits via conditional branch, but its target isn't a CFG "
779 "successor!",
780 MBB);
781
782 // There might be a fallthrough to the next block if there's either no
783 // unconditional true branch, or if there's a condition, and one of the
784 // branches is missing.
785 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
786
787 // A conditional fallthrough must be an actual CFG successor, not
788 // unreachable. (Conversely, an unconditional fallthrough might not really
789 // be a successor, because the block might end in unreachable.)
790 if (!Cond.empty() && !FBB) {
791 MachineFunction::const_iterator MBBI = std::next(x: MBB->getIterator());
792 if (MBBI == MF->end()) {
793 report(msg: "MBB conditionally falls through out of function!", MBB);
794 } else if (!MBB->isSuccessor(MBB: &*MBBI))
795 report(msg: "MBB exits via conditional branch/fall-through but the CFG "
796 "successors don't match the actual successors!",
797 MBB);
798 }
799
800 // Verify that there aren't any extra un-accounted-for successors.
801 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
802 // If this successor is one of the branch targets, it's okay.
803 if (SuccMBB == TBB || SuccMBB == FBB)
804 continue;
805 // If we might have a fallthrough, and the successor is the fallthrough
806 // block, that's also ok.
807 if (Fallthrough && SuccMBB == MBB->getNextNode())
808 continue;
809 // Also accept successors which are for exception-handling or might be
810 // inlineasm_br targets.
811 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
812 continue;
813 report(msg: "MBB has unexpected successors which are not branch targets, "
814 "fallthrough, EHPads, or inlineasm_br targets.",
815 MBB);
816 }
817 }
818
819 regsLive.clear();
820 if (MRI->tracksLiveness()) {
821 for (const auto &LI : MBB->liveins()) {
822 if (!Register::isPhysicalRegister(Reg: LI.PhysReg)) {
823 report(msg: "MBB live-in list contains non-physical register", MBB);
824 continue;
825 }
826 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(Reg: LI.PhysReg))
827 regsLive.insert(V: SubReg);
828 }
829 }
830
831 const MachineFrameInfo &MFI = MF->getFrameInfo();
832 BitVector PR = MFI.getPristineRegs(MF: *MF);
833 for (unsigned I : PR.set_bits()) {
834 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(Reg: I))
835 regsLive.insert(V: SubReg);
836 }
837
838 regsKilled.clear();
839 regsDefined.clear();
840
841 if (Indexes)
842 lastIndex = Indexes->getMBBStartIdx(mbb: MBB);
843}
844
845// This function gets called for all bundle headers, including normal
846// stand-alone unbundled instructions.
847void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
848 if (Indexes && Indexes->hasIndex(instr: *MI)) {
849 SlotIndex idx = Indexes->getInstructionIndex(MI: *MI);
850 if (!(idx > lastIndex)) {
851 report(msg: "Instruction index out of order", MI);
852 errs() << "Last instruction was at " << lastIndex << '\n';
853 }
854 lastIndex = idx;
855 }
856
857 // Ensure non-terminators don't follow terminators.
858 if (MI->isTerminator()) {
859 if (!FirstTerminator)
860 FirstTerminator = MI;
861 } else if (FirstTerminator) {
862 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
863 // precede non-terminators.
864 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
865 report(msg: "Non-terminator instruction after the first terminator", MI);
866 errs() << "First terminator was:\t" << *FirstTerminator;
867 }
868 }
869}
870
871// The operands on an INLINEASM instruction must follow a template.
872// Verify that the flag operands make sense.
873void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
874 // The first two operands on INLINEASM are the asm string and global flags.
875 if (MI->getNumOperands() < 2) {
876 report(msg: "Too few operands on inline asm", MI);
877 return;
878 }
879 if (!MI->getOperand(i: 0).isSymbol())
880 report(msg: "Asm string must be an external symbol", MI);
881 if (!MI->getOperand(i: 1).isImm())
882 report(msg: "Asm flags must be an immediate", MI);
883 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
884 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
885 // and Extra_IsConvergent = 32.
886 if (!isUInt<6>(x: MI->getOperand(i: 1).getImm()))
887 report(msg: "Unknown asm flags", MO: &MI->getOperand(i: 1), MONum: 1);
888
889 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
890
891 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
892 unsigned NumOps;
893 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
894 const MachineOperand &MO = MI->getOperand(i: OpNo);
895 // There may be implicit ops after the fixed operands.
896 if (!MO.isImm())
897 break;
898 const InlineAsm::Flag F(MO.getImm());
899 NumOps = 1 + F.getNumOperandRegisters();
900 }
901
902 if (OpNo > MI->getNumOperands())
903 report(msg: "Missing operands in last group", MI);
904
905 // An optional MDNode follows the groups.
906 if (OpNo < MI->getNumOperands() && MI->getOperand(i: OpNo).isMetadata())
907 ++OpNo;
908
909 // All trailing operands must be implicit registers.
910 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
911 const MachineOperand &MO = MI->getOperand(i: OpNo);
912 if (!MO.isReg() || !MO.isImplicit())
913 report(msg: "Expected implicit register after groups", MO: &MO, MONum: OpNo);
914 }
915
916 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
917 const MachineBasicBlock *MBB = MI->getParent();
918
919 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
920 i != e; ++i) {
921 const MachineOperand &MO = MI->getOperand(i);
922
923 if (!MO.isMBB())
924 continue;
925
926 // Check the successor & predecessor lists look ok, assume they are
927 // not. Find the indirect target without going through the successors.
928 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
929 if (!IndirectTargetMBB) {
930 report(msg: "INLINEASM_BR indirect target does not exist", MO: &MO, MONum: i);
931 break;
932 }
933
934 if (!MBB->isSuccessor(MBB: IndirectTargetMBB))
935 report(msg: "INLINEASM_BR indirect target missing from successor list", MO: &MO,
936 MONum: i);
937
938 if (!IndirectTargetMBB->isPredecessor(MBB))
939 report(msg: "INLINEASM_BR indirect target predecessor list missing parent",
940 MO: &MO, MONum: i);
941 }
942 }
943}
944
945bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
946 const MachineRegisterInfo &MRI) {
947 if (none_of(Range: MI.explicit_operands(), P: [&MRI](const MachineOperand &Op) {
948 if (!Op.isReg())
949 return false;
950 const auto Reg = Op.getReg();
951 if (Reg.isPhysical())
952 return false;
953 return !MRI.getType(Reg).isScalar();
954 }))
955 return true;
956 report(msg: "All register operands must have scalar types", MI: &MI);
957 return false;
958}
959
960/// Check that types are consistent when two operands need to have the same
961/// number of vector elements.
962/// \return true if the types are valid.
963bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
964 const MachineInstr *MI) {
965 if (Ty0.isVector() != Ty1.isVector()) {
966 report(msg: "operand types must be all-vector or all-scalar", MI);
967 // Generally we try to report as many issues as possible at once, but in
968 // this case it's not clear what should we be comparing the size of the
969 // scalar with: the size of the whole vector or its lane. Instead of
970 // making an arbitrary choice and emitting not so helpful message, let's
971 // avoid the extra noise and stop here.
972 return false;
973 }
974
975 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
976 report(msg: "operand types must preserve number of vector elements", MI);
977 return false;
978 }
979
980 return true;
981}
982
983bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
984 auto Opcode = MI->getOpcode();
985 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
986 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
987 unsigned IntrID = cast<GIntrinsic>(Val: MI)->getIntrinsicID();
988 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
989 AttributeList Attrs = Intrinsic::getAttributes(
990 C&: MF->getFunction().getContext(), id: static_cast<Intrinsic::ID>(IntrID));
991 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
992 if (NoSideEffects && DeclHasSideEffects) {
993 report(Msg: Twine(TII->getName(Opcode),
994 " used with intrinsic that accesses memory"),
995 MI);
996 return false;
997 }
998 if (!NoSideEffects && !DeclHasSideEffects) {
999 report(Msg: Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1000 return false;
1001 }
1002 }
1003
1004 return true;
1005}
1006
1007bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1008 auto Opcode = MI->getOpcode();
1009 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1010 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1011 unsigned IntrID = cast<GIntrinsic>(Val: MI)->getIntrinsicID();
1012 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1013 AttributeList Attrs = Intrinsic::getAttributes(
1014 C&: MF->getFunction().getContext(), id: static_cast<Intrinsic::ID>(IntrID));
1015 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1016 if (NotConvergent && DeclIsConvergent) {
1017 report(Msg: Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1018 MI);
1019 return false;
1020 }
1021 if (!NotConvergent && !DeclIsConvergent) {
1022 report(
1023 Msg: Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1024 MI);
1025 return false;
1026 }
1027 }
1028
1029 return true;
1030}
1031
1032void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1033 if (isFunctionSelected)
1034 report(msg: "Unexpected generic instruction in a Selected function", MI);
1035
1036 const MCInstrDesc &MCID = MI->getDesc();
1037 unsigned NumOps = MI->getNumOperands();
1038
1039 // Branches must reference a basic block if they are not indirect
1040 if (MI->isBranch() && !MI->isIndirectBranch()) {
1041 bool HasMBB = false;
1042 for (const MachineOperand &Op : MI->operands()) {
1043 if (Op.isMBB()) {
1044 HasMBB = true;
1045 break;
1046 }
1047 }
1048
1049 if (!HasMBB) {
1050 report(msg: "Branch instruction is missing a basic block operand or "
1051 "isIndirectBranch property",
1052 MI);
1053 }
1054 }
1055
1056 // Check types.
1057 SmallVector<LLT, 4> Types;
1058 for (unsigned I = 0, E = std::min(a: MCID.getNumOperands(), b: NumOps);
1059 I != E; ++I) {
1060 if (!MCID.operands()[I].isGenericType())
1061 continue;
1062 // Generic instructions specify type equality constraints between some of
1063 // their operands. Make sure these are consistent.
1064 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1065 Types.resize(N: std::max(a: TypeIdx + 1, b: Types.size()));
1066
1067 const MachineOperand *MO = &MI->getOperand(i: I);
1068 if (!MO->isReg()) {
1069 report(msg: "generic instruction must use register operands", MI);
1070 continue;
1071 }
1072
1073 LLT OpTy = MRI->getType(Reg: MO->getReg());
1074 // Don't report a type mismatch if there is no actual mismatch, only a
1075 // type missing, to reduce noise:
1076 if (OpTy.isValid()) {
1077 // Only the first valid type for a type index will be printed: don't
1078 // overwrite it later so it's always clear which type was expected:
1079 if (!Types[TypeIdx].isValid())
1080 Types[TypeIdx] = OpTy;
1081 else if (Types[TypeIdx] != OpTy)
1082 report(msg: "Type mismatch in generic instruction", MO, MONum: I, MOVRegType: OpTy);
1083 } else {
1084 // Generic instructions must have types attached to their operands.
1085 report(msg: "Generic instruction is missing a virtual register type", MO, MONum: I);
1086 }
1087 }
1088
1089 // Generic opcodes must not have physical register operands.
1090 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1091 const MachineOperand *MO = &MI->getOperand(i: I);
1092 if (MO->isReg() && MO->getReg().isPhysical())
1093 report(msg: "Generic instruction cannot have physical register", MO, MONum: I);
1094 }
1095
1096 // Avoid out of bounds in checks below. This was already reported earlier.
1097 if (MI->getNumOperands() < MCID.getNumOperands())
1098 return;
1099
1100 StringRef ErrorInfo;
1101 if (!TII->verifyInstruction(MI: *MI, ErrInfo&: ErrorInfo))
1102 report(msg: ErrorInfo.data(), MI);
1103
1104 // Verify properties of various specific instruction types
1105 unsigned Opc = MI->getOpcode();
1106 switch (Opc) {
1107 case TargetOpcode::G_ASSERT_SEXT:
1108 case TargetOpcode::G_ASSERT_ZEXT: {
1109 std::string OpcName =
1110 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1111 if (!MI->getOperand(i: 2).isImm()) {
1112 report(Msg: Twine(OpcName, " expects an immediate operand #2"), MI);
1113 break;
1114 }
1115
1116 Register Dst = MI->getOperand(i: 0).getReg();
1117 Register Src = MI->getOperand(i: 1).getReg();
1118 LLT SrcTy = MRI->getType(Reg: Src);
1119 int64_t Imm = MI->getOperand(i: 2).getImm();
1120 if (Imm <= 0) {
1121 report(Msg: Twine(OpcName, " size must be >= 1"), MI);
1122 break;
1123 }
1124
1125 if (Imm >= SrcTy.getScalarSizeInBits()) {
1126 report(Msg: Twine(OpcName, " size must be less than source bit width"), MI);
1127 break;
1128 }
1129
1130 const RegisterBank *SrcRB = RBI->getRegBank(Reg: Src, MRI: *MRI, TRI: *TRI);
1131 const RegisterBank *DstRB = RBI->getRegBank(Reg: Dst, MRI: *MRI, TRI: *TRI);
1132
1133 // Allow only the source bank to be set.
1134 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1135 report(Msg: Twine(OpcName, " cannot change register bank"), MI);
1136 break;
1137 }
1138
1139 // Don't allow a class change. Do allow member class->regbank.
1140 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Reg: Dst);
1141 if (DstRC && DstRC != MRI->getRegClassOrNull(Reg: Src)) {
1142 report(
1143 Msg: Twine(OpcName, " source and destination register classes must match"),
1144 MI);
1145 break;
1146 }
1147
1148 break;
1149 }
1150
1151 case TargetOpcode::G_CONSTANT:
1152 case TargetOpcode::G_FCONSTANT: {
1153 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1154 if (DstTy.isVector())
1155 report(msg: "Instruction cannot use a vector result type", MI);
1156
1157 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1158 if (!MI->getOperand(i: 1).isCImm()) {
1159 report(msg: "G_CONSTANT operand must be cimm", MI);
1160 break;
1161 }
1162
1163 const ConstantInt *CI = MI->getOperand(i: 1).getCImm();
1164 if (CI->getBitWidth() != DstTy.getSizeInBits())
1165 report(msg: "inconsistent constant size", MI);
1166 } else {
1167 if (!MI->getOperand(i: 1).isFPImm()) {
1168 report(msg: "G_FCONSTANT operand must be fpimm", MI);
1169 break;
1170 }
1171 const ConstantFP *CF = MI->getOperand(i: 1).getFPImm();
1172
1173 if (APFloat::getSizeInBits(Sem: CF->getValueAPF().getSemantics()) !=
1174 DstTy.getSizeInBits()) {
1175 report(msg: "inconsistent constant size", MI);
1176 }
1177 }
1178
1179 break;
1180 }
1181 case TargetOpcode::G_LOAD:
1182 case TargetOpcode::G_STORE:
1183 case TargetOpcode::G_ZEXTLOAD:
1184 case TargetOpcode::G_SEXTLOAD: {
1185 LLT ValTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1186 LLT PtrTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1187 if (!PtrTy.isPointer())
1188 report(msg: "Generic memory instruction must access a pointer", MI);
1189
1190 // Generic loads and stores must have a single MachineMemOperand
1191 // describing that access.
1192 if (!MI->hasOneMemOperand()) {
1193 report(msg: "Generic instruction accessing memory must have one mem operand",
1194 MI);
1195 } else {
1196 const MachineMemOperand &MMO = **MI->memoperands_begin();
1197 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1198 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1199 if (TypeSize::isKnownGE(LHS: MMO.getSizeInBits().getValue(),
1200 RHS: ValTy.getSizeInBits()))
1201 report(msg: "Generic extload must have a narrower memory type", MI);
1202 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1203 if (TypeSize::isKnownGT(LHS: MMO.getSize().getValue(),
1204 RHS: ValTy.getSizeInBytes()))
1205 report(msg: "load memory size cannot exceed result size", MI);
1206 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1207 if (TypeSize::isKnownLT(LHS: ValTy.getSizeInBytes(),
1208 RHS: MMO.getSize().getValue()))
1209 report(msg: "store memory size cannot exceed value size", MI);
1210 }
1211
1212 const AtomicOrdering Order = MMO.getSuccessOrdering();
1213 if (Opc == TargetOpcode::G_STORE) {
1214 if (Order == AtomicOrdering::Acquire ||
1215 Order == AtomicOrdering::AcquireRelease)
1216 report(msg: "atomic store cannot use acquire ordering", MI);
1217
1218 } else {
1219 if (Order == AtomicOrdering::Release ||
1220 Order == AtomicOrdering::AcquireRelease)
1221 report(msg: "atomic load cannot use release ordering", MI);
1222 }
1223 }
1224
1225 break;
1226 }
1227 case TargetOpcode::G_PHI: {
1228 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1229 if (!DstTy.isValid() || !all_of(Range: drop_begin(RangeOrContainer: MI->operands()),
1230 P: [this, &DstTy](const MachineOperand &MO) {
1231 if (!MO.isReg())
1232 return true;
1233 LLT Ty = MRI->getType(Reg: MO.getReg());
1234 if (!Ty.isValid() || (Ty != DstTy))
1235 return false;
1236 return true;
1237 }))
1238 report(msg: "Generic Instruction G_PHI has operands with incompatible/missing "
1239 "types",
1240 MI);
1241 break;
1242 }
1243 case TargetOpcode::G_BITCAST: {
1244 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1245 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1246 if (!DstTy.isValid() || !SrcTy.isValid())
1247 break;
1248
1249 if (SrcTy.isPointer() != DstTy.isPointer())
1250 report(msg: "bitcast cannot convert between pointers and other types", MI);
1251
1252 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1253 report(msg: "bitcast sizes must match", MI);
1254
1255 if (SrcTy == DstTy)
1256 report(msg: "bitcast must change the type", MI);
1257
1258 break;
1259 }
1260 case TargetOpcode::G_INTTOPTR:
1261 case TargetOpcode::G_PTRTOINT:
1262 case TargetOpcode::G_ADDRSPACE_CAST: {
1263 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1264 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1265 if (!DstTy.isValid() || !SrcTy.isValid())
1266 break;
1267
1268 verifyVectorElementMatch(Ty0: DstTy, Ty1: SrcTy, MI);
1269
1270 DstTy = DstTy.getScalarType();
1271 SrcTy = SrcTy.getScalarType();
1272
1273 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1274 if (!DstTy.isPointer())
1275 report(msg: "inttoptr result type must be a pointer", MI);
1276 if (SrcTy.isPointer())
1277 report(msg: "inttoptr source type must not be a pointer", MI);
1278 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1279 if (!SrcTy.isPointer())
1280 report(msg: "ptrtoint source type must be a pointer", MI);
1281 if (DstTy.isPointer())
1282 report(msg: "ptrtoint result type must not be a pointer", MI);
1283 } else {
1284 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1285 if (!SrcTy.isPointer() || !DstTy.isPointer())
1286 report(msg: "addrspacecast types must be pointers", MI);
1287 else {
1288 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1289 report(msg: "addrspacecast must convert different address spaces", MI);
1290 }
1291 }
1292
1293 break;
1294 }
1295 case TargetOpcode::G_PTR_ADD: {
1296 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1297 LLT PtrTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1298 LLT OffsetTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1299 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1300 break;
1301
1302 if (!PtrTy.isPointerOrPointerVector())
1303 report(msg: "gep first operand must be a pointer", MI);
1304
1305 if (OffsetTy.isPointerOrPointerVector())
1306 report(msg: "gep offset operand must not be a pointer", MI);
1307
1308 if (PtrTy.isPointerOrPointerVector()) {
1309 const DataLayout &DL = MF->getDataLayout();
1310 unsigned AS = PtrTy.getAddressSpace();
1311 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1312 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1313 report(msg: "gep offset operand must match index size for address space",
1314 MI);
1315 }
1316 }
1317
1318 // TODO: Is the offset allowed to be a scalar with a vector?
1319 break;
1320 }
1321 case TargetOpcode::G_PTRMASK: {
1322 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1323 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1324 LLT MaskTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1325 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1326 break;
1327
1328 if (!DstTy.isPointerOrPointerVector())
1329 report(msg: "ptrmask result type must be a pointer", MI);
1330
1331 if (!MaskTy.getScalarType().isScalar())
1332 report(msg: "ptrmask mask type must be an integer", MI);
1333
1334 verifyVectorElementMatch(Ty0: DstTy, Ty1: MaskTy, MI);
1335 break;
1336 }
1337 case TargetOpcode::G_SEXT:
1338 case TargetOpcode::G_ZEXT:
1339 case TargetOpcode::G_ANYEXT:
1340 case TargetOpcode::G_TRUNC:
1341 case TargetOpcode::G_FPEXT:
1342 case TargetOpcode::G_FPTRUNC: {
1343 // Number of operands and presense of types is already checked (and
1344 // reported in case of any issues), so no need to report them again. As
1345 // we're trying to report as many issues as possible at once, however, the
1346 // instructions aren't guaranteed to have the right number of operands or
1347 // types attached to them at this point
1348 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1349 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1350 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1351 if (!DstTy.isValid() || !SrcTy.isValid())
1352 break;
1353
1354 if (DstTy.isPointerOrPointerVector() || SrcTy.isPointerOrPointerVector())
1355 report(msg: "Generic extend/truncate can not operate on pointers", MI);
1356
1357 verifyVectorElementMatch(Ty0: DstTy, Ty1: SrcTy, MI);
1358
1359 unsigned DstSize = DstTy.getScalarSizeInBits();
1360 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1361 switch (MI->getOpcode()) {
1362 default:
1363 if (DstSize <= SrcSize)
1364 report(msg: "Generic extend has destination type no larger than source", MI);
1365 break;
1366 case TargetOpcode::G_TRUNC:
1367 case TargetOpcode::G_FPTRUNC:
1368 if (DstSize >= SrcSize)
1369 report(msg: "Generic truncate has destination type no smaller than source",
1370 MI);
1371 break;
1372 }
1373 break;
1374 }
1375 case TargetOpcode::G_SELECT: {
1376 LLT SelTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1377 LLT CondTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1378 if (!SelTy.isValid() || !CondTy.isValid())
1379 break;
1380
1381 // Scalar condition select on a vector is valid.
1382 if (CondTy.isVector())
1383 verifyVectorElementMatch(Ty0: SelTy, Ty1: CondTy, MI);
1384 break;
1385 }
1386 case TargetOpcode::G_MERGE_VALUES: {
1387 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1388 // e.g. s2N = MERGE sN, sN
1389 // Merging multiple scalars into a vector is not allowed, should use
1390 // G_BUILD_VECTOR for that.
1391 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1392 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1393 if (DstTy.isVector() || SrcTy.isVector())
1394 report(msg: "G_MERGE_VALUES cannot operate on vectors", MI);
1395
1396 const unsigned NumOps = MI->getNumOperands();
1397 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1398 report(msg: "G_MERGE_VALUES result size is inconsistent", MI);
1399
1400 for (unsigned I = 2; I != NumOps; ++I) {
1401 if (MRI->getType(Reg: MI->getOperand(i: I).getReg()) != SrcTy)
1402 report(msg: "G_MERGE_VALUES source types do not match", MI);
1403 }
1404
1405 break;
1406 }
1407 case TargetOpcode::G_UNMERGE_VALUES: {
1408 unsigned NumDsts = MI->getNumOperands() - 1;
1409 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1410 for (unsigned i = 1; i < NumDsts; ++i) {
1411 if (MRI->getType(Reg: MI->getOperand(i).getReg()) != DstTy) {
1412 report(msg: "G_UNMERGE_VALUES destination types do not match", MI);
1413 break;
1414 }
1415 }
1416
1417 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: NumDsts).getReg());
1418 if (DstTy.isVector()) {
1419 // This case is the converse of G_CONCAT_VECTORS.
1420 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1421 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1422 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1423 report(msg: "G_UNMERGE_VALUES source operand does not match vector "
1424 "destination operands",
1425 MI);
1426 } else if (SrcTy.isVector()) {
1427 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1428 // mismatched types as long as the total size matches:
1429 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1430 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1431 report(msg: "G_UNMERGE_VALUES vector source operand does not match scalar "
1432 "destination operands",
1433 MI);
1434 } else {
1435 // This case is the converse of G_MERGE_VALUES.
1436 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1437 report(msg: "G_UNMERGE_VALUES scalar source operand does not match scalar "
1438 "destination operands",
1439 MI);
1440 }
1441 }
1442 break;
1443 }
1444 case TargetOpcode::G_BUILD_VECTOR: {
1445 // Source types must be scalars, dest type a vector. Total size of scalars
1446 // must match the dest vector size.
1447 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1448 LLT SrcEltTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1449 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1450 report(msg: "G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1451 break;
1452 }
1453
1454 if (DstTy.getElementType() != SrcEltTy)
1455 report(msg: "G_BUILD_VECTOR result element type must match source type", MI);
1456
1457 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1458 report(msg: "G_BUILD_VECTOR must have an operand for each elemement", MI);
1459
1460 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI->operands(), N: 2))
1461 if (MRI->getType(Reg: MI->getOperand(i: 1).getReg()) != MRI->getType(Reg: MO.getReg()))
1462 report(msg: "G_BUILD_VECTOR source operand types are not homogeneous", MI);
1463
1464 break;
1465 }
1466 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1467 // Source types must be scalars, dest type a vector. Scalar types must be
1468 // larger than the dest vector elt type, as this is a truncating operation.
1469 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1470 LLT SrcEltTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1471 if (!DstTy.isVector() || SrcEltTy.isVector())
1472 report(msg: "G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1473 MI);
1474 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI->operands(), N: 2))
1475 if (MRI->getType(Reg: MI->getOperand(i: 1).getReg()) != MRI->getType(Reg: MO.getReg()))
1476 report(msg: "G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1477 MI);
1478 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1479 report(msg: "G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1480 "dest elt type",
1481 MI);
1482 break;
1483 }
1484 case TargetOpcode::G_CONCAT_VECTORS: {
1485 // Source types should be vectors, and total size should match the dest
1486 // vector size.
1487 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1488 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1489 if (!DstTy.isVector() || !SrcTy.isVector())
1490 report(msg: "G_CONCAT_VECTOR requires vector source and destination operands",
1491 MI);
1492
1493 if (MI->getNumOperands() < 3)
1494 report(msg: "G_CONCAT_VECTOR requires at least 2 source operands", MI);
1495
1496 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI->operands(), N: 2))
1497 if (MRI->getType(Reg: MI->getOperand(i: 1).getReg()) != MRI->getType(Reg: MO.getReg()))
1498 report(msg: "G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1499 if (DstTy.getElementCount() !=
1500 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1501 report(msg: "G_CONCAT_VECTOR num dest and source elements should match", MI);
1502 break;
1503 }
1504 case TargetOpcode::G_ICMP:
1505 case TargetOpcode::G_FCMP: {
1506 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1507 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1508
1509 if ((DstTy.isVector() != SrcTy.isVector()) ||
1510 (DstTy.isVector() &&
1511 DstTy.getElementCount() != SrcTy.getElementCount()))
1512 report(msg: "Generic vector icmp/fcmp must preserve number of lanes", MI);
1513
1514 break;
1515 }
1516 case TargetOpcode::G_EXTRACT: {
1517 const MachineOperand &SrcOp = MI->getOperand(i: 1);
1518 if (!SrcOp.isReg()) {
1519 report(msg: "extract source must be a register", MI);
1520 break;
1521 }
1522
1523 const MachineOperand &OffsetOp = MI->getOperand(i: 2);
1524 if (!OffsetOp.isImm()) {
1525 report(msg: "extract offset must be a constant", MI);
1526 break;
1527 }
1528
1529 unsigned DstSize = MRI->getType(Reg: MI->getOperand(i: 0).getReg()).getSizeInBits();
1530 unsigned SrcSize = MRI->getType(Reg: SrcOp.getReg()).getSizeInBits();
1531 if (SrcSize == DstSize)
1532 report(msg: "extract source must be larger than result", MI);
1533
1534 if (DstSize + OffsetOp.getImm() > SrcSize)
1535 report(msg: "extract reads past end of register", MI);
1536 break;
1537 }
1538 case TargetOpcode::G_INSERT: {
1539 const MachineOperand &SrcOp = MI->getOperand(i: 2);
1540 if (!SrcOp.isReg()) {
1541 report(msg: "insert source must be a register", MI);
1542 break;
1543 }
1544
1545 const MachineOperand &OffsetOp = MI->getOperand(i: 3);
1546 if (!OffsetOp.isImm()) {
1547 report(msg: "insert offset must be a constant", MI);
1548 break;
1549 }
1550
1551 unsigned DstSize = MRI->getType(Reg: MI->getOperand(i: 0).getReg()).getSizeInBits();
1552 unsigned SrcSize = MRI->getType(Reg: SrcOp.getReg()).getSizeInBits();
1553
1554 if (DstSize <= SrcSize)
1555 report(msg: "inserted size must be smaller than total register", MI);
1556
1557 if (SrcSize + OffsetOp.getImm() > DstSize)
1558 report(msg: "insert writes past end of register", MI);
1559
1560 break;
1561 }
1562 case TargetOpcode::G_JUMP_TABLE: {
1563 if (!MI->getOperand(i: 1).isJTI())
1564 report(msg: "G_JUMP_TABLE source operand must be a jump table index", MI);
1565 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1566 if (!DstTy.isPointer())
1567 report(msg: "G_JUMP_TABLE dest operand must have a pointer type", MI);
1568 break;
1569 }
1570 case TargetOpcode::G_BRJT: {
1571 if (!MRI->getType(Reg: MI->getOperand(i: 0).getReg()).isPointer())
1572 report(msg: "G_BRJT src operand 0 must be a pointer type", MI);
1573
1574 if (!MI->getOperand(i: 1).isJTI())
1575 report(msg: "G_BRJT src operand 1 must be a jump table index", MI);
1576
1577 const auto &IdxOp = MI->getOperand(i: 2);
1578 if (!IdxOp.isReg() || MRI->getType(Reg: IdxOp.getReg()).isPointer())
1579 report(msg: "G_BRJT src operand 2 must be a scalar reg type", MI);
1580 break;
1581 }
1582 case TargetOpcode::G_INTRINSIC:
1583 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1584 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1585 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1586 // TODO: Should verify number of def and use operands, but the current
1587 // interface requires passing in IR types for mangling.
1588 const MachineOperand &IntrIDOp = MI->getOperand(i: MI->getNumExplicitDefs());
1589 if (!IntrIDOp.isIntrinsicID()) {
1590 report(msg: "G_INTRINSIC first src operand must be an intrinsic ID", MI);
1591 break;
1592 }
1593
1594 if (!verifyGIntrinsicSideEffects(MI))
1595 break;
1596 if (!verifyGIntrinsicConvergence(MI))
1597 break;
1598
1599 break;
1600 }
1601 case TargetOpcode::G_SEXT_INREG: {
1602 if (!MI->getOperand(i: 2).isImm()) {
1603 report(msg: "G_SEXT_INREG expects an immediate operand #2", MI);
1604 break;
1605 }
1606
1607 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1608 int64_t Imm = MI->getOperand(i: 2).getImm();
1609 if (Imm <= 0)
1610 report(msg: "G_SEXT_INREG size must be >= 1", MI);
1611 if (Imm >= SrcTy.getScalarSizeInBits())
1612 report(msg: "G_SEXT_INREG size must be less than source bit width", MI);
1613 break;
1614 }
1615 case TargetOpcode::G_BSWAP: {
1616 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1617 if (DstTy.getScalarSizeInBits() % 16 != 0)
1618 report(msg: "G_BSWAP size must be a multiple of 16 bits", MI);
1619 break;
1620 }
1621 case TargetOpcode::G_VSCALE: {
1622 if (!MI->getOperand(i: 1).isCImm()) {
1623 report(msg: "G_VSCALE operand must be cimm", MI);
1624 break;
1625 }
1626 if (MI->getOperand(i: 1).getCImm()->isZero()) {
1627 report(msg: "G_VSCALE immediate cannot be zero", MI);
1628 break;
1629 }
1630 break;
1631 }
1632 case TargetOpcode::G_INSERT_SUBVECTOR: {
1633 const MachineOperand &Src0Op = MI->getOperand(i: 1);
1634 if (!Src0Op.isReg()) {
1635 report(msg: "G_INSERT_SUBVECTOR first source must be a register", MI);
1636 break;
1637 }
1638
1639 const MachineOperand &Src1Op = MI->getOperand(i: 2);
1640 if (!Src1Op.isReg()) {
1641 report(msg: "G_INSERT_SUBVECTOR second source must be a register", MI);
1642 break;
1643 }
1644
1645 const MachineOperand &IndexOp = MI->getOperand(i: 3);
1646 if (!IndexOp.isImm()) {
1647 report(msg: "G_INSERT_SUBVECTOR index must be an immediate", MI);
1648 break;
1649 }
1650
1651 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1652 LLT Src0Ty = MRI->getType(Reg: Src0Op.getReg());
1653 LLT Src1Ty = MRI->getType(Reg: Src1Op.getReg());
1654
1655 if (!DstTy.isVector()) {
1656 report(msg: "Destination type must be a vector", MI);
1657 break;
1658 }
1659
1660 if (!Src0Ty.isVector()) {
1661 report(msg: "First source must be a vector", MI);
1662 break;
1663 }
1664
1665 if (!Src1Ty.isVector()) {
1666 report(msg: "Second source must be a vector", MI);
1667 break;
1668 }
1669
1670 if (DstTy != Src0Ty) {
1671 report(msg: "Destination type must match the first source vector type", MI);
1672 break;
1673 }
1674
1675 if (Src0Ty.getElementType() != Src1Ty.getElementType()) {
1676 report(msg: "Element type of source vectors must be the same", MI);
1677 break;
1678 }
1679
1680 if (IndexOp.getImm() != 0 &&
1681 Src1Ty.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1682 report(msg: "Index must be a multiple of the second source vector's "
1683 "minimum vector length",
1684 MI);
1685 break;
1686 }
1687 break;
1688 }
1689 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1690 const MachineOperand &SrcOp = MI->getOperand(i: 1);
1691 if (!SrcOp.isReg()) {
1692 report(msg: "G_EXTRACT_SUBVECTOR first source must be a register", MI);
1693 break;
1694 }
1695
1696 const MachineOperand &IndexOp = MI->getOperand(i: 2);
1697 if (!IndexOp.isImm()) {
1698 report(msg: "G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1699 break;
1700 }
1701
1702 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1703 LLT SrcTy = MRI->getType(Reg: SrcOp.getReg());
1704
1705 if (!DstTy.isVector()) {
1706 report(msg: "Destination type must be a vector", MI);
1707 break;
1708 }
1709
1710 if (!SrcTy.isVector()) {
1711 report(msg: "First source must be a vector", MI);
1712 break;
1713 }
1714
1715 if (DstTy.getElementType() != SrcTy.getElementType()) {
1716 report(msg: "Element type of vectors must be the same", MI);
1717 break;
1718 }
1719
1720 if (IndexOp.getImm() != 0 &&
1721 SrcTy.getElementCount().getKnownMinValue() % IndexOp.getImm() != 0) {
1722 report(msg: "Index must be a multiple of the source vector's minimum vector "
1723 "length",
1724 MI);
1725 break;
1726 }
1727
1728 break;
1729 }
1730 case TargetOpcode::G_SHUFFLE_VECTOR: {
1731 const MachineOperand &MaskOp = MI->getOperand(i: 3);
1732 if (!MaskOp.isShuffleMask()) {
1733 report(msg: "Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1734 break;
1735 }
1736
1737 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1738 LLT Src0Ty = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1739 LLT Src1Ty = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1740
1741 if (Src0Ty != Src1Ty)
1742 report(msg: "Source operands must be the same type", MI);
1743
1744 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1745 report(msg: "G_SHUFFLE_VECTOR cannot change element type", MI);
1746
1747 // Don't check that all operands are vector because scalars are used in
1748 // place of 1 element vectors.
1749 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1750 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1751
1752 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1753
1754 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1755 report(msg: "Wrong result type for shufflemask", MI);
1756
1757 for (int Idx : MaskIdxes) {
1758 if (Idx < 0)
1759 continue;
1760
1761 if (Idx >= 2 * SrcNumElts)
1762 report(msg: "Out of bounds shuffle index", MI);
1763 }
1764
1765 break;
1766 }
1767
1768 case TargetOpcode::G_SPLAT_VECTOR: {
1769 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1770 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1771
1772 if (!DstTy.isScalableVector()) {
1773 report(msg: "Destination type must be a scalable vector", MI);
1774 break;
1775 }
1776
1777 if (!SrcTy.isScalar()) {
1778 report(msg: "Source type must be a scalar", MI);
1779 break;
1780 }
1781
1782 if (TypeSize::isKnownGT(LHS: DstTy.getElementType().getSizeInBits(),
1783 RHS: SrcTy.getSizeInBits())) {
1784 report(msg: "Element type of the destination must be the same size or smaller "
1785 "than the source type",
1786 MI);
1787 break;
1788 }
1789
1790 break;
1791 }
1792 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1793 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1794 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1795 LLT IdxTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1796
1797 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1798 report(msg: "Destination type must be a scalar or pointer", MI);
1799 break;
1800 }
1801
1802 if (!SrcTy.isVector()) {
1803 report(msg: "First source must be a vector", MI);
1804 break;
1805 }
1806
1807 auto TLI = MF->getSubtarget().getTargetLowering();
1808 if (IdxTy.getSizeInBits() !=
1809 TLI->getVectorIdxTy(DL: MF->getDataLayout()).getFixedSizeInBits()) {
1810 report(msg: "Index type must match VectorIdxTy", MI);
1811 break;
1812 }
1813
1814 break;
1815 }
1816 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1817 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1818 LLT VecTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1819 LLT ScaTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1820 LLT IdxTy = MRI->getType(Reg: MI->getOperand(i: 3).getReg());
1821
1822 if (!DstTy.isVector()) {
1823 report(msg: "Destination type must be a vector", MI);
1824 break;
1825 }
1826
1827 if (VecTy != DstTy) {
1828 report(msg: "Destination type and vector type must match", MI);
1829 break;
1830 }
1831
1832 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
1833 report(msg: "Inserted element must be a scalar or pointer", MI);
1834 break;
1835 }
1836
1837 auto TLI = MF->getSubtarget().getTargetLowering();
1838 if (IdxTy.getSizeInBits() !=
1839 TLI->getVectorIdxTy(DL: MF->getDataLayout()).getFixedSizeInBits()) {
1840 report(msg: "Index type must match VectorIdxTy", MI);
1841 break;
1842 }
1843
1844 break;
1845 }
1846 case TargetOpcode::G_DYN_STACKALLOC: {
1847 const MachineOperand &DstOp = MI->getOperand(i: 0);
1848 const MachineOperand &AllocOp = MI->getOperand(i: 1);
1849 const MachineOperand &AlignOp = MI->getOperand(i: 2);
1850
1851 if (!DstOp.isReg() || !MRI->getType(Reg: DstOp.getReg()).isPointer()) {
1852 report(msg: "dst operand 0 must be a pointer type", MI);
1853 break;
1854 }
1855
1856 if (!AllocOp.isReg() || !MRI->getType(Reg: AllocOp.getReg()).isScalar()) {
1857 report(msg: "src operand 1 must be a scalar reg type", MI);
1858 break;
1859 }
1860
1861 if (!AlignOp.isImm()) {
1862 report(msg: "src operand 2 must be an immediate type", MI);
1863 break;
1864 }
1865 break;
1866 }
1867 case TargetOpcode::G_MEMCPY_INLINE:
1868 case TargetOpcode::G_MEMCPY:
1869 case TargetOpcode::G_MEMMOVE: {
1870 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1871 if (MMOs.size() != 2) {
1872 report(msg: "memcpy/memmove must have 2 memory operands", MI);
1873 break;
1874 }
1875
1876 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1877 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1878 report(msg: "wrong memory operand types", MI);
1879 break;
1880 }
1881
1882 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1883 report(msg: "inconsistent memory operand sizes", MI);
1884
1885 LLT DstPtrTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1886 LLT SrcPtrTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1887
1888 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1889 report(msg: "memory instruction operand must be a pointer", MI);
1890 break;
1891 }
1892
1893 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1894 report(msg: "inconsistent store address space", MI);
1895 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1896 report(msg: "inconsistent load address space", MI);
1897
1898 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1899 if (!MI->getOperand(i: 3).isImm() || (MI->getOperand(i: 3).getImm() & ~1LL))
1900 report(msg: "'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1901
1902 break;
1903 }
1904 case TargetOpcode::G_BZERO:
1905 case TargetOpcode::G_MEMSET: {
1906 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1907 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1908 if (MMOs.size() != 1) {
1909 report(Msg: Twine(Name, " must have 1 memory operand"), MI);
1910 break;
1911 }
1912
1913 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1914 report(Msg: Twine(Name, " memory operand must be a store"), MI);
1915 break;
1916 }
1917
1918 LLT DstPtrTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1919 if (!DstPtrTy.isPointer()) {
1920 report(Msg: Twine(Name, " operand must be a pointer"), MI);
1921 break;
1922 }
1923
1924 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1925 report(Msg: "inconsistent " + Twine(Name, " address space"), MI);
1926
1927 if (!MI->getOperand(i: MI->getNumOperands() - 1).isImm() ||
1928 (MI->getOperand(i: MI->getNumOperands() - 1).getImm() & ~1LL))
1929 report(msg: "'tail' flag (last operand) must be an immediate 0 or 1", MI);
1930
1931 break;
1932 }
1933 case TargetOpcode::G_UBSANTRAP: {
1934 const MachineOperand &KindOp = MI->getOperand(i: 0);
1935 if (!MI->getOperand(i: 0).isImm()) {
1936 report(msg: "Crash kind must be an immediate", MO: &KindOp, MONum: 0);
1937 break;
1938 }
1939 int64_t Kind = MI->getOperand(i: 0).getImm();
1940 if (!isInt<8>(x: Kind))
1941 report(msg: "Crash kind must be 8 bit wide", MO: &KindOp, MONum: 0);
1942 break;
1943 }
1944 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1945 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1946 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1947 LLT Src1Ty = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1948 LLT Src2Ty = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1949 if (!DstTy.isScalar())
1950 report(msg: "Vector reduction requires a scalar destination type", MI);
1951 if (!Src1Ty.isScalar())
1952 report(msg: "Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1953 if (!Src2Ty.isVector())
1954 report(msg: "Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1955 break;
1956 }
1957 case TargetOpcode::G_VECREDUCE_FADD:
1958 case TargetOpcode::G_VECREDUCE_FMUL:
1959 case TargetOpcode::G_VECREDUCE_FMAX:
1960 case TargetOpcode::G_VECREDUCE_FMIN:
1961 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1962 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1963 case TargetOpcode::G_VECREDUCE_ADD:
1964 case TargetOpcode::G_VECREDUCE_MUL:
1965 case TargetOpcode::G_VECREDUCE_AND:
1966 case TargetOpcode::G_VECREDUCE_OR:
1967 case TargetOpcode::G_VECREDUCE_XOR:
1968 case TargetOpcode::G_VECREDUCE_SMAX:
1969 case TargetOpcode::G_VECREDUCE_SMIN:
1970 case TargetOpcode::G_VECREDUCE_UMAX:
1971 case TargetOpcode::G_VECREDUCE_UMIN: {
1972 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1973 if (!DstTy.isScalar())
1974 report(msg: "Vector reduction requires a scalar destination type", MI);
1975 break;
1976 }
1977
1978 case TargetOpcode::G_SBFX:
1979 case TargetOpcode::G_UBFX: {
1980 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1981 if (DstTy.isVector()) {
1982 report(msg: "Bitfield extraction is not supported on vectors", MI);
1983 break;
1984 }
1985 break;
1986 }
1987 case TargetOpcode::G_SHL:
1988 case TargetOpcode::G_LSHR:
1989 case TargetOpcode::G_ASHR:
1990 case TargetOpcode::G_ROTR:
1991 case TargetOpcode::G_ROTL: {
1992 LLT Src1Ty = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1993 LLT Src2Ty = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1994 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1995 report(msg: "Shifts and rotates require operands to be either all scalars or "
1996 "all vectors",
1997 MI);
1998 break;
1999 }
2000 break;
2001 }
2002 case TargetOpcode::G_LLROUND:
2003 case TargetOpcode::G_LROUND: {
2004 verifyAllRegOpsScalar(MI: *MI, MRI: *MRI);
2005 break;
2006 }
2007 case TargetOpcode::G_IS_FPCLASS: {
2008 LLT DestTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
2009 LLT DestEltTy = DestTy.getScalarType();
2010 if (!DestEltTy.isScalar()) {
2011 report(msg: "Destination must be a scalar or vector of scalars", MI);
2012 break;
2013 }
2014 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
2015 LLT SrcEltTy = SrcTy.getScalarType();
2016 if (!SrcEltTy.isScalar()) {
2017 report(msg: "Source must be a scalar or vector of scalars", MI);
2018 break;
2019 }
2020 if (!verifyVectorElementMatch(Ty0: DestTy, Ty1: SrcTy, MI))
2021 break;
2022 const MachineOperand &TestMO = MI->getOperand(i: 2);
2023 if (!TestMO.isImm()) {
2024 report(msg: "floating-point class set (operand 2) must be an immediate", MI);
2025 break;
2026 }
2027 int64_t Test = TestMO.getImm();
2028 if (Test < 0 || Test > fcAllFlags) {
2029 report(msg: "Incorrect floating-point class set (operand 2)", MI);
2030 break;
2031 }
2032 break;
2033 }
2034 case TargetOpcode::G_PREFETCH: {
2035 const MachineOperand &AddrOp = MI->getOperand(i: 0);
2036 if (!AddrOp.isReg() || !MRI->getType(Reg: AddrOp.getReg()).isPointer()) {
2037 report(msg: "addr operand must be a pointer", MO: &AddrOp, MONum: 0);
2038 break;
2039 }
2040 const MachineOperand &RWOp = MI->getOperand(i: 1);
2041 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2042 report(msg: "rw operand must be an immediate 0-1", MO: &RWOp, MONum: 1);
2043 break;
2044 }
2045 const MachineOperand &LocalityOp = MI->getOperand(i: 2);
2046 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2047 report(msg: "locality operand must be an immediate 0-3", MO: &LocalityOp, MONum: 2);
2048 break;
2049 }
2050 const MachineOperand &CacheTypeOp = MI->getOperand(i: 3);
2051 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2052 report(msg: "cache type operand must be an immediate 0-1", MO: &CacheTypeOp, MONum: 3);
2053 break;
2054 }
2055 break;
2056 }
2057 case TargetOpcode::G_ASSERT_ALIGN: {
2058 if (MI->getOperand(i: 2).getImm() < 1)
2059 report(msg: "alignment immediate must be >= 1", MI);
2060 break;
2061 }
2062 case TargetOpcode::G_CONSTANT_POOL: {
2063 if (!MI->getOperand(i: 1).isCPI())
2064 report(msg: "Src operand 1 must be a constant pool index", MI);
2065 if (!MRI->getType(Reg: MI->getOperand(i: 0).getReg()).isPointer())
2066 report(msg: "Dst operand 0 must be a pointer", MI);
2067 break;
2068 }
2069 default:
2070 break;
2071 }
2072}
2073
2074void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2075 const MCInstrDesc &MCID = MI->getDesc();
2076 if (MI->getNumOperands() < MCID.getNumOperands()) {
2077 report(msg: "Too few operands", MI);
2078 errs() << MCID.getNumOperands() << " operands expected, but "
2079 << MI->getNumOperands() << " given.\n";
2080 }
2081
2082 if (MI->getFlag(Flag: MachineInstr::NoConvergent) && !MCID.isConvergent())
2083 report(msg: "NoConvergent flag expected only on convergent instructions.", MI);
2084
2085 if (MI->isPHI()) {
2086 if (MF->getProperties().hasProperty(
2087 P: MachineFunctionProperties::Property::NoPHIs))
2088 report(msg: "Found PHI instruction with NoPHIs property set", MI);
2089
2090 if (FirstNonPHI)
2091 report(msg: "Found PHI instruction after non-PHI", MI);
2092 } else if (FirstNonPHI == nullptr)
2093 FirstNonPHI = MI;
2094
2095 // Check the tied operands.
2096 if (MI->isInlineAsm())
2097 verifyInlineAsm(MI);
2098
2099 // Check that unspillable terminators define a reg and have at most one use.
2100 if (TII->isUnspillableTerminator(MI)) {
2101 if (!MI->getOperand(i: 0).isReg() || !MI->getOperand(i: 0).isDef())
2102 report(msg: "Unspillable Terminator does not define a reg", MI);
2103 Register Def = MI->getOperand(i: 0).getReg();
2104 if (Def.isVirtual() &&
2105 !MF->getProperties().hasProperty(
2106 P: MachineFunctionProperties::Property::NoPHIs) &&
2107 std::distance(first: MRI->use_nodbg_begin(RegNo: Def), last: MRI->use_nodbg_end()) > 1)
2108 report(msg: "Unspillable Terminator expected to have at most one use!", MI);
2109 }
2110
2111 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2112 // DBG_VALUEs: these are convenient to use in tests, but should never get
2113 // generated.
2114 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2115 if (!MI->getDebugLoc())
2116 report(msg: "Missing DebugLoc for debug instruction", MI);
2117
2118 // Meta instructions should never be the subject of debug value tracking,
2119 // they don't create a value in the output program at all.
2120 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2121 report(msg: "Metadata instruction should not have a value tracking number", MI);
2122
2123 // Check the MachineMemOperands for basic consistency.
2124 for (MachineMemOperand *Op : MI->memoperands()) {
2125 if (Op->isLoad() && !MI->mayLoad())
2126 report(msg: "Missing mayLoad flag", MI);
2127 if (Op->isStore() && !MI->mayStore())
2128 report(msg: "Missing mayStore flag", MI);
2129 }
2130
2131 // Debug values must not have a slot index.
2132 // Other instructions must have one, unless they are inside a bundle.
2133 if (LiveInts) {
2134 bool mapped = !LiveInts->isNotInMIMap(Instr: *MI);
2135 if (MI->isDebugOrPseudoInstr()) {
2136 if (mapped)
2137 report(msg: "Debug instruction has a slot index", MI);
2138 } else if (MI->isInsideBundle()) {
2139 if (mapped)
2140 report(msg: "Instruction inside bundle has a slot index", MI);
2141 } else {
2142 if (!mapped)
2143 report(msg: "Missing slot index", MI);
2144 }
2145 }
2146
2147 unsigned Opc = MCID.getOpcode();
2148 if (isPreISelGenericOpcode(Opcode: Opc) || isPreISelGenericOptimizationHint(Opcode: Opc)) {
2149 verifyPreISelGenericInstruction(MI);
2150 return;
2151 }
2152
2153 StringRef ErrorInfo;
2154 if (!TII->verifyInstruction(MI: *MI, ErrInfo&: ErrorInfo))
2155 report(msg: ErrorInfo.data(), MI);
2156
2157 // Verify properties of various specific instruction types
2158 switch (MI->getOpcode()) {
2159 case TargetOpcode::COPY: {
2160 const MachineOperand &DstOp = MI->getOperand(i: 0);
2161 const MachineOperand &SrcOp = MI->getOperand(i: 1);
2162 const Register SrcReg = SrcOp.getReg();
2163 const Register DstReg = DstOp.getReg();
2164
2165 LLT DstTy = MRI->getType(Reg: DstReg);
2166 LLT SrcTy = MRI->getType(Reg: SrcReg);
2167 if (SrcTy.isValid() && DstTy.isValid()) {
2168 // If both types are valid, check that the types are the same.
2169 if (SrcTy != DstTy) {
2170 report(msg: "Copy Instruction is illegal with mismatching types", MI);
2171 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
2172 }
2173
2174 break;
2175 }
2176
2177 if (!SrcTy.isValid() && !DstTy.isValid())
2178 break;
2179
2180 // If we have only one valid type, this is likely a copy between a virtual
2181 // and physical register.
2182 TypeSize SrcSize = TRI->getRegSizeInBits(Reg: SrcReg, MRI: *MRI);
2183 TypeSize DstSize = TRI->getRegSizeInBits(Reg: DstReg, MRI: *MRI);
2184 if (SrcReg.isPhysical() && DstTy.isValid()) {
2185 const TargetRegisterClass *SrcRC =
2186 TRI->getMinimalPhysRegClassLLT(Reg: SrcReg, Ty: DstTy);
2187 if (SrcRC)
2188 SrcSize = TRI->getRegSizeInBits(RC: *SrcRC);
2189 }
2190
2191 if (DstReg.isPhysical() && SrcTy.isValid()) {
2192 const TargetRegisterClass *DstRC =
2193 TRI->getMinimalPhysRegClassLLT(Reg: DstReg, Ty: SrcTy);
2194 if (DstRC)
2195 DstSize = TRI->getRegSizeInBits(RC: *DstRC);
2196 }
2197
2198 // The next two checks allow COPY between physical and virtual registers,
2199 // when the virtual register has a scalable size and the physical register
2200 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2201 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2202 // be able to resolve a fixed size for the scalable vector, and at that
2203 // point this function will know for sure whether the sizes are mismatched
2204 // and correctly report a size mismatch.
2205 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2206 !SrcSize.isScalable())
2207 break;
2208 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2209 !DstSize.isScalable())
2210 break;
2211
2212 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2213 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2214 report(msg: "Copy Instruction is illegal with mismatching sizes", MI);
2215 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
2216 << "\n";
2217 }
2218 }
2219 break;
2220 }
2221 case TargetOpcode::STATEPOINT: {
2222 StatepointOpers SO(MI);
2223 if (!MI->getOperand(i: SO.getIDPos()).isImm() ||
2224 !MI->getOperand(i: SO.getNBytesPos()).isImm() ||
2225 !MI->getOperand(i: SO.getNCallArgsPos()).isImm()) {
2226 report(msg: "meta operands to STATEPOINT not constant!", MI);
2227 break;
2228 }
2229
2230 auto VerifyStackMapConstant = [&](unsigned Offset) {
2231 if (Offset >= MI->getNumOperands()) {
2232 report(msg: "stack map constant to STATEPOINT is out of range!", MI);
2233 return;
2234 }
2235 if (!MI->getOperand(i: Offset - 1).isImm() ||
2236 MI->getOperand(i: Offset - 1).getImm() != StackMaps::ConstantOp ||
2237 !MI->getOperand(i: Offset).isImm())
2238 report(msg: "stack map constant to STATEPOINT not well formed!", MI);
2239 };
2240 VerifyStackMapConstant(SO.getCCIdx());
2241 VerifyStackMapConstant(SO.getFlagsIdx());
2242 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2243 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2244 VerifyStackMapConstant(SO.getNumAllocaIdx());
2245 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2246
2247 // Verify that all explicit statepoint defs are tied to gc operands as
2248 // they are expected to be a relocation of gc operands.
2249 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2250 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2251 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2252 unsigned UseOpIdx;
2253 if (!MI->isRegTiedToUseOperand(DefOpIdx: Idx, UseOpIdx: &UseOpIdx)) {
2254 report(msg: "STATEPOINT defs expected to be tied", MI);
2255 break;
2256 }
2257 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2258 report(msg: "STATEPOINT def tied to non-gc operand", MI);
2259 break;
2260 }
2261 }
2262
2263 // TODO: verify we have properly encoded deopt arguments
2264 } break;
2265 case TargetOpcode::INSERT_SUBREG: {
2266 unsigned InsertedSize;
2267 if (unsigned SubIdx = MI->getOperand(i: 2).getSubReg())
2268 InsertedSize = TRI->getSubRegIdxSize(Idx: SubIdx);
2269 else
2270 InsertedSize = TRI->getRegSizeInBits(Reg: MI->getOperand(i: 2).getReg(), MRI: *MRI);
2271 unsigned SubRegSize = TRI->getSubRegIdxSize(Idx: MI->getOperand(i: 3).getImm());
2272 if (SubRegSize < InsertedSize) {
2273 report(msg: "INSERT_SUBREG expected inserted value to have equal or lesser "
2274 "size than the subreg it was inserted into", MI);
2275 break;
2276 }
2277 } break;
2278 case TargetOpcode::REG_SEQUENCE: {
2279 unsigned NumOps = MI->getNumOperands();
2280 if (!(NumOps & 1)) {
2281 report(msg: "Invalid number of operands for REG_SEQUENCE", MI);
2282 break;
2283 }
2284
2285 for (unsigned I = 1; I != NumOps; I += 2) {
2286 const MachineOperand &RegOp = MI->getOperand(i: I);
2287 const MachineOperand &SubRegOp = MI->getOperand(i: I + 1);
2288
2289 if (!RegOp.isReg())
2290 report(msg: "Invalid register operand for REG_SEQUENCE", MO: &RegOp, MONum: I);
2291
2292 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2293 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2294 report(msg: "Invalid subregister index operand for REG_SEQUENCE",
2295 MO: &SubRegOp, MONum: I + 1);
2296 }
2297 }
2298
2299 Register DstReg = MI->getOperand(i: 0).getReg();
2300 if (DstReg.isPhysical())
2301 report(msg: "REG_SEQUENCE does not support physical register results", MI);
2302
2303 if (MI->getOperand(i: 0).getSubReg())
2304 report(msg: "Invalid subreg result for REG_SEQUENCE", MI);
2305
2306 break;
2307 }
2308 }
2309}
2310
2311void
2312MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2313 const MachineInstr *MI = MO->getParent();
2314 const MCInstrDesc &MCID = MI->getDesc();
2315 unsigned NumDefs = MCID.getNumDefs();
2316 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2317 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2318
2319 // The first MCID.NumDefs operands must be explicit register defines
2320 if (MONum < NumDefs) {
2321 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2322 if (!MO->isReg())
2323 report(msg: "Explicit definition must be a register", MO, MONum);
2324 else if (!MO->isDef() && !MCOI.isOptionalDef())
2325 report(msg: "Explicit definition marked as use", MO, MONum);
2326 else if (MO->isImplicit())
2327 report(msg: "Explicit definition marked as implicit", MO, MONum);
2328 } else if (MONum < MCID.getNumOperands()) {
2329 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2330 // Don't check if it's the last operand in a variadic instruction. See,
2331 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2332 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2333 if (!IsOptional) {
2334 if (MO->isReg()) {
2335 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2336 report(msg: "Explicit operand marked as def", MO, MONum);
2337 if (MO->isImplicit())
2338 report(msg: "Explicit operand marked as implicit", MO, MONum);
2339 }
2340
2341 // Check that an instruction has register operands only as expected.
2342 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2343 !MO->isReg() && !MO->isFI())
2344 report(msg: "Expected a register operand.", MO, MONum);
2345 if (MO->isReg()) {
2346 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2347 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2348 !TII->isPCRelRegisterOperandLegal(MO: *MO)))
2349 report(msg: "Expected a non-register operand.", MO, MONum);
2350 }
2351 }
2352
2353 int TiedTo = MCID.getOperandConstraint(OpNum: MONum, Constraint: MCOI::TIED_TO);
2354 if (TiedTo != -1) {
2355 if (!MO->isReg())
2356 report(msg: "Tied use must be a register", MO, MONum);
2357 else if (!MO->isTied())
2358 report(msg: "Operand should be tied", MO, MONum);
2359 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(OpIdx: MONum))
2360 report(msg: "Tied def doesn't match MCInstrDesc", MO, MONum);
2361 else if (MO->getReg().isPhysical()) {
2362 const MachineOperand &MOTied = MI->getOperand(i: TiedTo);
2363 if (!MOTied.isReg())
2364 report(msg: "Tied counterpart must be a register", MO: &MOTied, MONum: TiedTo);
2365 else if (MOTied.getReg().isPhysical() &&
2366 MO->getReg() != MOTied.getReg())
2367 report(msg: "Tied physical registers must match.", MO: &MOTied, MONum: TiedTo);
2368 }
2369 } else if (MO->isReg() && MO->isTied())
2370 report(msg: "Explicit operand should not be tied", MO, MONum);
2371 } else if (!MI->isVariadic()) {
2372 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2373 if (!MO->isValidExcessOperand())
2374 report(msg: "Extra explicit operand on non-variadic instruction", MO, MONum);
2375 }
2376
2377 switch (MO->getType()) {
2378 case MachineOperand::MO_Register: {
2379 // Verify debug flag on debug instructions. Check this first because reg0
2380 // indicates an undefined debug value.
2381 if (MI->isDebugInstr() && MO->isUse()) {
2382 if (!MO->isDebug())
2383 report(msg: "Register operand must be marked debug", MO, MONum);
2384 } else if (MO->isDebug()) {
2385 report(msg: "Register operand must not be marked debug", MO, MONum);
2386 }
2387
2388 const Register Reg = MO->getReg();
2389 if (!Reg)
2390 return;
2391 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2392 checkLiveness(MO, MONum);
2393
2394 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2395 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2396 report(msg: "Undef virtual register def operands require a subregister", MO, MONum);
2397
2398 // Verify the consistency of tied operands.
2399 if (MO->isTied()) {
2400 unsigned OtherIdx = MI->findTiedOperandIdx(OpIdx: MONum);
2401 const MachineOperand &OtherMO = MI->getOperand(i: OtherIdx);
2402 if (!OtherMO.isReg())
2403 report(msg: "Must be tied to a register", MO, MONum);
2404 if (!OtherMO.isTied())
2405 report(msg: "Missing tie flags on tied operand", MO, MONum);
2406 if (MI->findTiedOperandIdx(OpIdx: OtherIdx) != MONum)
2407 report(msg: "Inconsistent tie links", MO, MONum);
2408 if (MONum < MCID.getNumDefs()) {
2409 if (OtherIdx < MCID.getNumOperands()) {
2410 if (-1 == MCID.getOperandConstraint(OpNum: OtherIdx, Constraint: MCOI::TIED_TO))
2411 report(msg: "Explicit def tied to explicit use without tie constraint",
2412 MO, MONum);
2413 } else {
2414 if (!OtherMO.isImplicit())
2415 report(msg: "Explicit def should be tied to implicit use", MO, MONum);
2416 }
2417 }
2418 }
2419
2420 // Verify two-address constraints after the twoaddressinstruction pass.
2421 // Both twoaddressinstruction pass and phi-node-elimination pass call
2422 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2423 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2424 // we shouldn't use the IsSSA as the condition, we should based on
2425 // TiedOpsRewritten property to verify two-address constraints, this
2426 // property will be set in twoaddressinstruction pass.
2427 unsigned DefIdx;
2428 if (MF->getProperties().hasProperty(
2429 P: MachineFunctionProperties::Property::TiedOpsRewritten) &&
2430 MO->isUse() && MI->isRegTiedToDefOperand(UseOpIdx: MONum, DefOpIdx: &DefIdx) &&
2431 Reg != MI->getOperand(i: DefIdx).getReg())
2432 report(msg: "Two-address instruction operands must be identical", MO, MONum);
2433
2434 // Check register classes.
2435 unsigned SubIdx = MO->getSubReg();
2436
2437 if (Reg.isPhysical()) {
2438 if (SubIdx) {
2439 report(msg: "Illegal subregister index for physical register", MO, MONum);
2440 return;
2441 }
2442 if (MONum < MCID.getNumOperands()) {
2443 if (const TargetRegisterClass *DRC =
2444 TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF)) {
2445 if (!DRC->contains(Reg)) {
2446 report(msg: "Illegal physical register for instruction", MO, MONum);
2447 errs() << printReg(Reg, TRI) << " is not a "
2448 << TRI->getRegClassName(Class: DRC) << " register.\n";
2449 }
2450 }
2451 }
2452 if (MO->isRenamable()) {
2453 if (MRI->isReserved(PhysReg: Reg)) {
2454 report(msg: "isRenamable set on reserved register", MO, MONum);
2455 return;
2456 }
2457 }
2458 } else {
2459 // Virtual register.
2460 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2461 if (!RC) {
2462 // This is a generic virtual register.
2463
2464 // Do not allow undef uses for generic virtual registers. This ensures
2465 // getVRegDef can never fail and return null on a generic register.
2466 //
2467 // FIXME: This restriction should probably be broadened to all SSA
2468 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2469 // run on the SSA function just before phi elimination.
2470 if (MO->isUndef())
2471 report(msg: "Generic virtual register use cannot be undef", MO, MONum);
2472
2473 // Debug value instruction is permitted to use undefined vregs.
2474 // This is a performance measure to skip the overhead of immediately
2475 // pruning unused debug operands. The final undef substitution occurs
2476 // when debug values are allocated in LDVImpl::handleDebugValue, so
2477 // these verifications always apply after this pass.
2478 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2479 !MI->isDebugValue() || !MRI->def_empty(RegNo: Reg)) {
2480 // If we're post-Select, we can't have gvregs anymore.
2481 if (isFunctionSelected) {
2482 report(msg: "Generic virtual register invalid in a Selected function",
2483 MO, MONum);
2484 return;
2485 }
2486
2487 // The gvreg must have a type and it must not have a SubIdx.
2488 LLT Ty = MRI->getType(Reg);
2489 if (!Ty.isValid()) {
2490 report(msg: "Generic virtual register must have a valid type", MO,
2491 MONum);
2492 return;
2493 }
2494
2495 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2496 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2497
2498 // If we're post-RegBankSelect, the gvreg must have a bank.
2499 if (!RegBank && isFunctionRegBankSelected) {
2500 report(msg: "Generic virtual register must have a bank in a "
2501 "RegBankSelected function",
2502 MO, MONum);
2503 return;
2504 }
2505
2506 // Make sure the register fits into its register bank if any.
2507 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2508 RBI->getMaximumSize(RegBankID: RegBank->getID()) < Ty.getSizeInBits()) {
2509 report(msg: "Register bank is too small for virtual register", MO,
2510 MONum);
2511 errs() << "Register bank " << RegBank->getName() << " too small("
2512 << RBI->getMaximumSize(RegBankID: RegBank->getID()) << ") to fit "
2513 << Ty.getSizeInBits() << "-bits\n";
2514 return;
2515 }
2516 }
2517
2518 if (SubIdx) {
2519 report(msg: "Generic virtual register does not allow subregister index", MO,
2520 MONum);
2521 return;
2522 }
2523
2524 // If this is a target specific instruction and this operand
2525 // has register class constraint, the virtual register must
2526 // comply to it.
2527 if (!isPreISelGenericOpcode(Opcode: MCID.getOpcode()) &&
2528 MONum < MCID.getNumOperands() &&
2529 TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF)) {
2530 report(msg: "Virtual register does not match instruction constraint", MO,
2531 MONum);
2532 errs() << "Expect register class "
2533 << TRI->getRegClassName(
2534 Class: TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF))
2535 << " but got nothing\n";
2536 return;
2537 }
2538
2539 break;
2540 }
2541 if (SubIdx) {
2542 const TargetRegisterClass *SRC =
2543 TRI->getSubClassWithSubReg(RC, Idx: SubIdx);
2544 if (!SRC) {
2545 report(msg: "Invalid subregister index for virtual register", MO, MONum);
2546 errs() << "Register class " << TRI->getRegClassName(Class: RC)
2547 << " does not support subreg index " << SubIdx << "\n";
2548 return;
2549 }
2550 if (RC != SRC) {
2551 report(msg: "Invalid register class for subregister index", MO, MONum);
2552 errs() << "Register class " << TRI->getRegClassName(Class: RC)
2553 << " does not fully support subreg index " << SubIdx << "\n";
2554 return;
2555 }
2556 }
2557 if (MONum < MCID.getNumOperands()) {
2558 if (const TargetRegisterClass *DRC =
2559 TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF)) {
2560 if (SubIdx) {
2561 const TargetRegisterClass *SuperRC =
2562 TRI->getLargestLegalSuperClass(RC, *MF);
2563 if (!SuperRC) {
2564 report(msg: "No largest legal super class exists.", MO, MONum);
2565 return;
2566 }
2567 DRC = TRI->getMatchingSuperRegClass(A: SuperRC, B: DRC, Idx: SubIdx);
2568 if (!DRC) {
2569 report(msg: "No matching super-reg register class.", MO, MONum);
2570 return;
2571 }
2572 }
2573 if (!RC->hasSuperClassEq(RC: DRC)) {
2574 report(msg: "Illegal virtual register for instruction", MO, MONum);
2575 errs() << "Expected a " << TRI->getRegClassName(Class: DRC)
2576 << " register, but got a " << TRI->getRegClassName(Class: RC)
2577 << " register\n";
2578 }
2579 }
2580 }
2581 }
2582 break;
2583 }
2584
2585 case MachineOperand::MO_RegisterMask:
2586 regMasks.push_back(Elt: MO->getRegMask());
2587 break;
2588
2589 case MachineOperand::MO_MachineBasicBlock:
2590 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MBB: MI->getParent()))
2591 report(msg: "PHI operand is not in the CFG", MO, MONum);
2592 break;
2593
2594 case MachineOperand::MO_FrameIndex:
2595 if (LiveStks && LiveStks->hasInterval(Slot: MO->getIndex()) &&
2596 LiveInts && !LiveInts->isNotInMIMap(Instr: *MI)) {
2597 int FI = MO->getIndex();
2598 LiveInterval &LI = LiveStks->getInterval(Slot: FI);
2599 SlotIndex Idx = LiveInts->getInstructionIndex(Instr: *MI);
2600
2601 bool stores = MI->mayStore();
2602 bool loads = MI->mayLoad();
2603 // For a memory-to-memory move, we need to check if the frame
2604 // index is used for storing or loading, by inspecting the
2605 // memory operands.
2606 if (stores && loads) {
2607 for (auto *MMO : MI->memoperands()) {
2608 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2609 if (PSV == nullptr) continue;
2610 const FixedStackPseudoSourceValue *Value =
2611 dyn_cast<FixedStackPseudoSourceValue>(Val: PSV);
2612 if (Value == nullptr) continue;
2613 if (Value->getFrameIndex() != FI) continue;
2614
2615 if (MMO->isStore())
2616 loads = false;
2617 else
2618 stores = false;
2619 break;
2620 }
2621 if (loads == stores)
2622 report(msg: "Missing fixed stack memoperand.", MI);
2623 }
2624 if (loads && !LI.liveAt(index: Idx.getRegSlot(EC: true))) {
2625 report(msg: "Instruction loads from dead spill slot", MO, MONum);
2626 errs() << "Live stack: " << LI << '\n';
2627 }
2628 if (stores && !LI.liveAt(index: Idx.getRegSlot())) {
2629 report(msg: "Instruction stores to dead spill slot", MO, MONum);
2630 errs() << "Live stack: " << LI << '\n';
2631 }
2632 }
2633 break;
2634
2635 case MachineOperand::MO_CFIIndex:
2636 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2637 report(msg: "CFI instruction has invalid index", MO, MONum);
2638 break;
2639
2640 default:
2641 break;
2642 }
2643}
2644
2645void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2646 unsigned MONum, SlotIndex UseIdx,
2647 const LiveRange &LR,
2648 Register VRegOrUnit,
2649 LaneBitmask LaneMask) {
2650 const MachineInstr *MI = MO->getParent();
2651 LiveQueryResult LRQ = LR.Query(Idx: UseIdx);
2652 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2653 // Check if we have a segment at the use, note however that we only need one
2654 // live subregister range, the others may be dead.
2655 if (!HasValue && LaneMask.none()) {
2656 report(msg: "No live segment at use", MO, MONum);
2657 report_context_liverange(LR);
2658 report_context_vreg_regunit(VRegOrUnit);
2659 report_context(Pos: UseIdx);
2660 }
2661 if (MO->isKill() && !LRQ.isKill()) {
2662 report(msg: "Live range continues after kill flag", MO, MONum);
2663 report_context_liverange(LR);
2664 report_context_vreg_regunit(VRegOrUnit);
2665 if (LaneMask.any())
2666 report_context_lanemask(LaneMask);
2667 report_context(Pos: UseIdx);
2668 }
2669}
2670
2671void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2672 unsigned MONum, SlotIndex DefIdx,
2673 const LiveRange &LR,
2674 Register VRegOrUnit,
2675 bool SubRangeCheck,
2676 LaneBitmask LaneMask) {
2677 if (const VNInfo *VNI = LR.getVNInfoAt(Idx: DefIdx)) {
2678 // The LR can correspond to the whole reg and its def slot is not obliged
2679 // to be the same as the MO' def slot. E.g. when we check here "normal"
2680 // subreg MO but there is other EC subreg MO in the same instruction so the
2681 // whole reg has EC def slot and differs from the currently checked MO' def
2682 // slot. For example:
2683 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2684 // Check that there is an early-clobber def of the same superregister
2685 // somewhere is performed in visitMachineFunctionAfter()
2686 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2687 !SlotIndex::isSameInstr(A: VNI->def, B: DefIdx) ||
2688 (VNI->def != DefIdx &&
2689 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2690 report(msg: "Inconsistent valno->def", MO, MONum);
2691 report_context_liverange(LR);
2692 report_context_vreg_regunit(VRegOrUnit);
2693 if (LaneMask.any())
2694 report_context_lanemask(LaneMask);
2695 report_context(VNI: *VNI);
2696 report_context(Pos: DefIdx);
2697 }
2698 } else {
2699 report(msg: "No live segment at def", MO, MONum);
2700 report_context_liverange(LR);
2701 report_context_vreg_regunit(VRegOrUnit);
2702 if (LaneMask.any())
2703 report_context_lanemask(LaneMask);
2704 report_context(Pos: DefIdx);
2705 }
2706 // Check that, if the dead def flag is present, LiveInts agree.
2707 if (MO->isDead()) {
2708 LiveQueryResult LRQ = LR.Query(Idx: DefIdx);
2709 if (!LRQ.isDeadDef()) {
2710 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2711 // A dead subreg def only tells us that the specific subreg is dead. There
2712 // could be other non-dead defs of other subregs, or we could have other
2713 // parts of the register being live through the instruction. So unless we
2714 // are checking liveness for a subrange it is ok for the live range to
2715 // continue, given that we have a dead def of a subregister.
2716 if (SubRangeCheck || MO->getSubReg() == 0) {
2717 report(msg: "Live range continues after dead def flag", MO, MONum);
2718 report_context_liverange(LR);
2719 report_context_vreg_regunit(VRegOrUnit);
2720 if (LaneMask.any())
2721 report_context_lanemask(LaneMask);
2722 }
2723 }
2724 }
2725}
2726
2727void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2728 const MachineInstr *MI = MO->getParent();
2729 const Register Reg = MO->getReg();
2730 const unsigned SubRegIdx = MO->getSubReg();
2731
2732 const LiveInterval *LI = nullptr;
2733 if (LiveInts && Reg.isVirtual()) {
2734 if (LiveInts->hasInterval(Reg)) {
2735 LI = &LiveInts->getInterval(Reg);
2736 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2737 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(VReg: Reg))
2738 report(msg: "Live interval for subreg operand has no subranges", MO, MONum);
2739 } else {
2740 report(msg: "Virtual register has no live interval", MO, MONum);
2741 }
2742 }
2743
2744 // Both use and def operands can read a register.
2745 if (MO->readsReg()) {
2746 if (MO->isKill())
2747 addRegWithSubRegs(RV&: regsKilled, Reg);
2748
2749 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2750 // which case we have already checked that LiveVars knows any kills on the
2751 // bundle header instead).
2752 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2753 !MI->isBundledWithPred()) {
2754 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2755 if (!is_contained(Range&: VI.Kills, Element: MI))
2756 report(msg: "Kill missing from LiveVariables", MO, MONum);
2757 }
2758
2759 // Check LiveInts liveness and kill.
2760 if (LiveInts && !LiveInts->isNotInMIMap(Instr: *MI)) {
2761 SlotIndex UseIdx;
2762 if (MI->isPHI()) {
2763 // PHI use occurs on the edge, so check for live out here instead.
2764 UseIdx = LiveInts->getMBBEndIdx(
2765 mbb: MI->getOperand(i: MONum + 1).getMBB()).getPrevSlot();
2766 } else {
2767 UseIdx = LiveInts->getInstructionIndex(Instr: *MI);
2768 }
2769 // Check the cached regunit intervals.
2770 if (Reg.isPhysical() && !isReserved(Reg)) {
2771 for (MCRegUnit Unit : TRI->regunits(Reg: Reg.asMCReg())) {
2772 if (MRI->isReservedRegUnit(Unit))
2773 continue;
2774 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2775 checkLivenessAtUse(MO, MONum, UseIdx, LR: *LR, VRegOrUnit: Unit);
2776 }
2777 }
2778
2779 if (Reg.isVirtual()) {
2780 // This is a virtual register interval.
2781 checkLivenessAtUse(MO, MONum, UseIdx, LR: *LI, VRegOrUnit: Reg);
2782
2783 if (LI->hasSubRanges() && !MO->isDef()) {
2784 LaneBitmask MOMask = SubRegIdx != 0
2785 ? TRI->getSubRegIndexLaneMask(SubIdx: SubRegIdx)
2786 : MRI->getMaxLaneMaskForVReg(Reg);
2787 LaneBitmask LiveInMask;
2788 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2789 if ((MOMask & SR.LaneMask).none())
2790 continue;
2791 checkLivenessAtUse(MO, MONum, UseIdx, LR: SR, VRegOrUnit: Reg, LaneMask: SR.LaneMask);
2792 LiveQueryResult LRQ = SR.Query(Idx: UseIdx);
2793 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2794 LiveInMask |= SR.LaneMask;
2795 }
2796 // At least parts of the register has to be live at the use.
2797 if ((LiveInMask & MOMask).none()) {
2798 report(msg: "No live subrange at use", MO, MONum);
2799 report_context(LI: *LI);
2800 report_context(Pos: UseIdx);
2801 }
2802 // For PHIs all lanes should be live
2803 if (MI->isPHI() && LiveInMask != MOMask) {
2804 report(msg: "Not all lanes of PHI source live at use", MO, MONum);
2805 report_context(LI: *LI);
2806 report_context(Pos: UseIdx);
2807 }
2808 }
2809 }
2810 }
2811
2812 // Use of a dead register.
2813 if (!regsLive.count(V: Reg)) {
2814 if (Reg.isPhysical()) {
2815 // Reserved registers may be used even when 'dead'.
2816 bool Bad = !isReserved(Reg);
2817 // We are fine if just any subregister has a defined value.
2818 if (Bad) {
2819
2820 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2821 if (regsLive.count(V: SubReg)) {
2822 Bad = false;
2823 break;
2824 }
2825 }
2826 }
2827 // If there is an additional implicit-use of a super register we stop
2828 // here. By definition we are fine if the super register is not
2829 // (completely) dead, if the complete super register is dead we will
2830 // get a report for its operand.
2831 if (Bad) {
2832 for (const MachineOperand &MOP : MI->uses()) {
2833 if (!MOP.isReg() || !MOP.isImplicit())
2834 continue;
2835
2836 if (!MOP.getReg().isPhysical())
2837 continue;
2838
2839 if (llvm::is_contained(Range: TRI->subregs(Reg: MOP.getReg()), Element: Reg))
2840 Bad = false;
2841 }
2842 }
2843 if (Bad)
2844 report(msg: "Using an undefined physical register", MO, MONum);
2845 } else if (MRI->def_empty(RegNo: Reg)) {
2846 report(msg: "Reading virtual register without a def", MO, MONum);
2847 } else {
2848 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2849 // We don't know which virtual registers are live in, so only complain
2850 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2851 // must be live in. PHI instructions are handled separately.
2852 if (MInfo.regsKilled.count(V: Reg))
2853 report(msg: "Using a killed virtual register", MO, MONum);
2854 else if (!MI->isPHI())
2855 MInfo.vregsLiveIn.insert(KV: std::make_pair(x: Reg, y&: MI));
2856 }
2857 }
2858 }
2859
2860 if (MO->isDef()) {
2861 // Register defined.
2862 // TODO: verify that earlyclobber ops are not used.
2863 if (MO->isDead())
2864 addRegWithSubRegs(RV&: regsDead, Reg);
2865 else
2866 addRegWithSubRegs(RV&: regsDefined, Reg);
2867
2868 // Verify SSA form.
2869 if (MRI->isSSA() && Reg.isVirtual() &&
2870 std::next(x: MRI->def_begin(RegNo: Reg)) != MRI->def_end())
2871 report(msg: "Multiple virtual register defs in SSA form", MO, MONum);
2872
2873 // Check LiveInts for a live segment, but only for virtual registers.
2874 if (LiveInts && !LiveInts->isNotInMIMap(Instr: *MI)) {
2875 SlotIndex DefIdx = LiveInts->getInstructionIndex(Instr: *MI);
2876 DefIdx = DefIdx.getRegSlot(EC: MO->isEarlyClobber());
2877
2878 if (Reg.isVirtual()) {
2879 checkLivenessAtDef(MO, MONum, DefIdx, LR: *LI, VRegOrUnit: Reg);
2880
2881 if (LI->hasSubRanges()) {
2882 LaneBitmask MOMask = SubRegIdx != 0
2883 ? TRI->getSubRegIndexLaneMask(SubIdx: SubRegIdx)
2884 : MRI->getMaxLaneMaskForVReg(Reg);
2885 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2886 if ((SR.LaneMask & MOMask).none())
2887 continue;
2888 checkLivenessAtDef(MO, MONum, DefIdx, LR: SR, VRegOrUnit: Reg, SubRangeCheck: true, LaneMask: SR.LaneMask);
2889 }
2890 }
2891 }
2892 }
2893 }
2894}
2895
2896// This function gets called after visiting all instructions in a bundle. The
2897// argument points to the bundle header.
2898// Normal stand-alone instructions are also considered 'bundles', and this
2899// function is called for all of them.
2900void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2901 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2902 set_union(S1&: MInfo.regsKilled, S2: regsKilled);
2903 set_subtract(S1&: regsLive, S2: regsKilled); regsKilled.clear();
2904 // Kill any masked registers.
2905 while (!regMasks.empty()) {
2906 const uint32_t *Mask = regMasks.pop_back_val();
2907 for (Register Reg : regsLive)
2908 if (Reg.isPhysical() &&
2909 MachineOperand::clobbersPhysReg(RegMask: Mask, PhysReg: Reg.asMCReg()))
2910 regsDead.push_back(Elt: Reg);
2911 }
2912 set_subtract(S1&: regsLive, S2: regsDead); regsDead.clear();
2913 set_union(S1&: regsLive, S2: regsDefined); regsDefined.clear();
2914}
2915
2916void
2917MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2918 MBBInfoMap[MBB].regsLiveOut = regsLive;
2919 regsLive.clear();
2920
2921 if (Indexes) {
2922 SlotIndex stop = Indexes->getMBBEndIdx(mbb: MBB);
2923 if (!(stop > lastIndex)) {
2924 report(msg: "Block ends before last instruction index", MBB);
2925 errs() << "Block ends at " << stop
2926 << " last instruction was at " << lastIndex << '\n';
2927 }
2928 lastIndex = stop;
2929 }
2930}
2931
2932namespace {
2933// This implements a set of registers that serves as a filter: can filter other
2934// sets by passing through elements not in the filter and blocking those that
2935// are. Any filter implicitly includes the full set of physical registers upon
2936// creation, thus filtering them all out. The filter itself as a set only grows,
2937// and needs to be as efficient as possible.
2938struct VRegFilter {
2939 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2940 // no duplicates. Both virtual and physical registers are fine.
2941 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2942 SmallVector<Register, 0> VRegsBuffer;
2943 filterAndAdd(FromRegSet, VRegsBuffer);
2944 }
2945 // Filter \p FromRegSet through the filter and append passed elements into \p
2946 // ToVRegs. All elements appended are then added to the filter itself.
2947 // \returns true if anything changed.
2948 template <typename RegSetT>
2949 bool filterAndAdd(const RegSetT &FromRegSet,
2950 SmallVectorImpl<Register> &ToVRegs) {
2951 unsigned SparseUniverse = Sparse.size();
2952 unsigned NewSparseUniverse = SparseUniverse;
2953 unsigned NewDenseSize = Dense.size();
2954 size_t Begin = ToVRegs.size();
2955 for (Register Reg : FromRegSet) {
2956 if (!Reg.isVirtual())
2957 continue;
2958 unsigned Index = Register::virtReg2Index(Reg);
2959 if (Index < SparseUniverseMax) {
2960 if (Index < SparseUniverse && Sparse.test(Idx: Index))
2961 continue;
2962 NewSparseUniverse = std::max(a: NewSparseUniverse, b: Index + 1);
2963 } else {
2964 if (Dense.count(V: Reg))
2965 continue;
2966 ++NewDenseSize;
2967 }
2968 ToVRegs.push_back(Elt: Reg);
2969 }
2970 size_t End = ToVRegs.size();
2971 if (Begin == End)
2972 return false;
2973 // Reserving space in sets once performs better than doing so continuously
2974 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2975 // tuned all the way down) and double iteration (the second one is over a
2976 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2977 Sparse.resize(N: NewSparseUniverse);
2978 Dense.reserve(Size: NewDenseSize);
2979 for (unsigned I = Begin; I < End; ++I) {
2980 Register Reg = ToVRegs[I];
2981 unsigned Index = Register::virtReg2Index(Reg);
2982 if (Index < SparseUniverseMax)
2983 Sparse.set(Index);
2984 else
2985 Dense.insert(V: Reg);
2986 }
2987 return true;
2988 }
2989
2990private:
2991 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2992 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2993 // are tracked by Dense. The only purpose of the threashold and the Dense set
2994 // is to have a reasonably growing memory usage in pathological cases (large
2995 // number of very sparse VRegFilter instances live at the same time). In
2996 // practice even in the worst-by-execution time cases having all elements
2997 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2998 // space efficient than if tracked by Dense. The threashold is set to keep the
2999 // worst-case memory usage within 2x of figures determined empirically for
3000 // "all Dense" scenario in such worst-by-execution-time cases.
3001 BitVector Sparse;
3002 DenseSet<unsigned> Dense;
3003};
3004
3005// Implements both a transfer function and a (binary, in-place) join operator
3006// for a dataflow over register sets with set union join and filtering transfer
3007// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3008// Maintains out_b as its state, allowing for O(n) iteration over it at any
3009// time, where n is the size of the set (as opposed to O(U) where U is the
3010// universe). filter_b implicitly contains all physical registers at all times.
3011class FilteringVRegSet {
3012 VRegFilter Filter;
3013 SmallVector<Register, 0> VRegs;
3014
3015public:
3016 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3017 // Both virtual and physical registers are fine.
3018 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3019 Filter.add(RS);
3020 }
3021 // Passes \p RS through the filter_b (transfer function) and adds what's left
3022 // to itself (out_b).
3023 template <typename RegSetT> bool add(const RegSetT &RS) {
3024 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3025 // a set union) just add everything being added here to the Filter as well.
3026 return Filter.filterAndAdd(RS, VRegs);
3027 }
3028 using const_iterator = decltype(VRegs)::const_iterator;
3029 const_iterator begin() const { return VRegs.begin(); }
3030 const_iterator end() const { return VRegs.end(); }
3031 size_t size() const { return VRegs.size(); }
3032};
3033} // namespace
3034
3035// Calculate the largest possible vregsPassed sets. These are the registers that
3036// can pass through an MBB live, but may not be live every time. It is assumed
3037// that all vregsPassed sets are empty before the call.
3038void MachineVerifier::calcRegsPassed() {
3039 if (MF->empty())
3040 // ReversePostOrderTraversal doesn't handle empty functions.
3041 return;
3042
3043 for (const MachineBasicBlock *MB :
3044 ReversePostOrderTraversal<const MachineFunction *>(MF)) {
3045 FilteringVRegSet VRegs;
3046 BBInfo &Info = MBBInfoMap[MB];
3047 assert(Info.reachable);
3048
3049 VRegs.addToFilter(RS: Info.regsKilled);
3050 VRegs.addToFilter(RS: Info.regsLiveOut);
3051 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3052 const BBInfo &PredInfo = MBBInfoMap[Pred];
3053 if (!PredInfo.reachable)
3054 continue;
3055
3056 VRegs.add(RS: PredInfo.regsLiveOut);
3057 VRegs.add(RS: PredInfo.vregsPassed);
3058 }
3059 Info.vregsPassed.reserve(Size: VRegs.size());
3060 Info.vregsPassed.insert(I: VRegs.begin(), E: VRegs.end());
3061 }
3062}
3063
3064// Calculate the set of virtual registers that must be passed through each basic
3065// block in order to satisfy the requirements of successor blocks. This is very
3066// similar to calcRegsPassed, only backwards.
3067void MachineVerifier::calcRegsRequired() {
3068 // First push live-in regs to predecessors' vregsRequired.
3069 SmallPtrSet<const MachineBasicBlock*, 8> todo;
3070 for (const auto &MBB : *MF) {
3071 BBInfo &MInfo = MBBInfoMap[&MBB];
3072 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3073 BBInfo &PInfo = MBBInfoMap[Pred];
3074 if (PInfo.addRequired(RM: MInfo.vregsLiveIn))
3075 todo.insert(Ptr: Pred);
3076 }
3077
3078 // Handle the PHI node.
3079 for (const MachineInstr &MI : MBB.phis()) {
3080 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3081 // Skip those Operands which are undef regs or not regs.
3082 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3083 continue;
3084
3085 // Get register and predecessor for one PHI edge.
3086 Register Reg = MI.getOperand(i).getReg();
3087 const MachineBasicBlock *Pred = MI.getOperand(i: i + 1).getMBB();
3088
3089 BBInfo &PInfo = MBBInfoMap[Pred];
3090 if (PInfo.addRequired(Reg))
3091 todo.insert(Ptr: Pred);
3092 }
3093 }
3094 }
3095
3096 // Iteratively push vregsRequired to predecessors. This will converge to the
3097 // same final state regardless of DenseSet iteration order.
3098 while (!todo.empty()) {
3099 const MachineBasicBlock *MBB = *todo.begin();
3100 todo.erase(Ptr: MBB);
3101 BBInfo &MInfo = MBBInfoMap[MBB];
3102 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3103 if (Pred == MBB)
3104 continue;
3105 BBInfo &SInfo = MBBInfoMap[Pred];
3106 if (SInfo.addRequired(RS: MInfo.vregsRequired))
3107 todo.insert(Ptr: Pred);
3108 }
3109 }
3110}
3111
3112// Check PHI instructions at the beginning of MBB. It is assumed that
3113// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3114void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3115 BBInfo &MInfo = MBBInfoMap[&MBB];
3116
3117 SmallPtrSet<const MachineBasicBlock*, 8> seen;
3118 for (const MachineInstr &Phi : MBB) {
3119 if (!Phi.isPHI())
3120 break;
3121 seen.clear();
3122
3123 const MachineOperand &MODef = Phi.getOperand(i: 0);
3124 if (!MODef.isReg() || !MODef.isDef()) {
3125 report(msg: "Expected first PHI operand to be a register def", MO: &MODef, MONum: 0);
3126 continue;
3127 }
3128 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3129 MODef.isEarlyClobber() || MODef.isDebug())
3130 report(msg: "Unexpected flag on PHI operand", MO: &MODef, MONum: 0);
3131 Register DefReg = MODef.getReg();
3132 if (!DefReg.isVirtual())
3133 report(msg: "Expected first PHI operand to be a virtual register", MO: &MODef, MONum: 0);
3134
3135 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3136 const MachineOperand &MO0 = Phi.getOperand(i: I);
3137 if (!MO0.isReg()) {
3138 report(msg: "Expected PHI operand to be a register", MO: &MO0, MONum: I);
3139 continue;
3140 }
3141 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3142 MO0.isDebug() || MO0.isTied())
3143 report(msg: "Unexpected flag on PHI operand", MO: &MO0, MONum: I);
3144
3145 const MachineOperand &MO1 = Phi.getOperand(i: I + 1);
3146 if (!MO1.isMBB()) {
3147 report(msg: "Expected PHI operand to be a basic block", MO: &MO1, MONum: I + 1);
3148 continue;
3149 }
3150
3151 const MachineBasicBlock &Pre = *MO1.getMBB();
3152 if (!Pre.isSuccessor(MBB: &MBB)) {
3153 report(msg: "PHI input is not a predecessor block", MO: &MO1, MONum: I + 1);
3154 continue;
3155 }
3156
3157 if (MInfo.reachable) {
3158 seen.insert(Ptr: &Pre);
3159 BBInfo &PrInfo = MBBInfoMap[&Pre];
3160 if (!MO0.isUndef() && PrInfo.reachable &&
3161 !PrInfo.isLiveOut(Reg: MO0.getReg()))
3162 report(msg: "PHI operand is not live-out from predecessor", MO: &MO0, MONum: I);
3163 }
3164 }
3165
3166 // Did we see all predecessors?
3167 if (MInfo.reachable) {
3168 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3169 if (!seen.count(Ptr: Pred)) {
3170 report(msg: "Missing PHI operand", MI: &Phi);
3171 errs() << printMBBReference(MBB: *Pred)
3172 << " is a predecessor according to the CFG.\n";
3173 }
3174 }
3175 }
3176 }
3177}
3178
3179static void
3180verifyConvergenceControl(const MachineFunction &MF, MachineDomTree &DT,
3181 std::function<void(const Twine &Message)> FailureCB) {
3182 MachineConvergenceVerifier CV;
3183 CV.initialize(OS: &errs(), FailureCB, F: MF);
3184
3185 for (const auto &MBB : MF) {
3186 CV.visit(BB: MBB);
3187 for (const auto &MI : MBB.instrs())
3188 CV.visit(I: MI);
3189 }
3190
3191 if (CV.sawTokens()) {
3192 DT.recalculate(Func&: const_cast<MachineFunction &>(MF));
3193 CV.verify(DT);
3194 }
3195}
3196
3197void MachineVerifier::visitMachineFunctionAfter() {
3198 auto FailureCB = [this](const Twine &Message) {
3199 report(msg: Message.str().c_str(), MF);
3200 };
3201 verifyConvergenceControl(MF: *MF, DT, FailureCB);
3202
3203 calcRegsPassed();
3204
3205 for (const MachineBasicBlock &MBB : *MF)
3206 checkPHIOps(MBB);
3207
3208 // Now check liveness info if available
3209 calcRegsRequired();
3210
3211 // Check for killed virtual registers that should be live out.
3212 for (const auto &MBB : *MF) {
3213 BBInfo &MInfo = MBBInfoMap[&MBB];
3214 for (Register VReg : MInfo.vregsRequired)
3215 if (MInfo.regsKilled.count(V: VReg)) {
3216 report(msg: "Virtual register killed in block, but needed live out.", MBB: &MBB);
3217 errs() << "Virtual register " << printReg(Reg: VReg)
3218 << " is used after the block.\n";
3219 }
3220 }
3221
3222 if (!MF->empty()) {
3223 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3224 for (Register VReg : MInfo.vregsRequired) {
3225 report(msg: "Virtual register defs don't dominate all uses.", MF);
3226 report_context_vreg(VReg);
3227 }
3228 }
3229
3230 if (LiveVars)
3231 verifyLiveVariables();
3232 if (LiveInts)
3233 verifyLiveIntervals();
3234
3235 // Check live-in list of each MBB. If a register is live into MBB, check
3236 // that the register is in regsLiveOut of each predecessor block. Since
3237 // this must come from a definition in the predecesssor or its live-in
3238 // list, this will catch a live-through case where the predecessor does not
3239 // have the register in its live-in list. This currently only checks
3240 // registers that have no aliases, are not allocatable and are not
3241 // reserved, which could mean a condition code register for instance.
3242 if (MRI->tracksLiveness())
3243 for (const auto &MBB : *MF)
3244 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
3245 MCPhysReg LiveInReg = P.PhysReg;
3246 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3247 if (hasAliases || isAllocatable(Reg: LiveInReg) || isReserved(Reg: LiveInReg))
3248 continue;
3249 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3250 BBInfo &PInfo = MBBInfoMap[Pred];
3251 if (!PInfo.regsLiveOut.count(V: LiveInReg)) {
3252 report(msg: "Live in register not found to be live out from predecessor.",
3253 MBB: &MBB);
3254 errs() << TRI->getName(RegNo: LiveInReg)
3255 << " not found to be live out from "
3256 << printMBBReference(MBB: *Pred) << "\n";
3257 }
3258 }
3259 }
3260
3261 for (auto CSInfo : MF->getCallSitesInfo())
3262 if (!CSInfo.first->isCall())
3263 report(msg: "Call site info referencing instruction that is not call", MF);
3264
3265 // If there's debug-info, check that we don't have any duplicate value
3266 // tracking numbers.
3267 if (MF->getFunction().getSubprogram()) {
3268 DenseSet<unsigned> SeenNumbers;
3269 for (const auto &MBB : *MF) {
3270 for (const auto &MI : MBB) {
3271 if (auto Num = MI.peekDebugInstrNum()) {
3272 auto Result = SeenNumbers.insert(V: (unsigned)Num);
3273 if (!Result.second)
3274 report(msg: "Instruction has a duplicated value tracking number", MI: &MI);
3275 }
3276 }
3277 }
3278 }
3279}
3280
3281void MachineVerifier::verifyLiveVariables() {
3282 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3283 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3284 Register Reg = Register::index2VirtReg(Index: I);
3285 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3286 for (const auto &MBB : *MF) {
3287 BBInfo &MInfo = MBBInfoMap[&MBB];
3288
3289 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3290 if (MInfo.vregsRequired.count(V: Reg)) {
3291 if (!VI.AliveBlocks.test(Idx: MBB.getNumber())) {
3292 report(msg: "LiveVariables: Block missing from AliveBlocks", MBB: &MBB);
3293 errs() << "Virtual register " << printReg(Reg)
3294 << " must be live through the block.\n";
3295 }
3296 } else {
3297 if (VI.AliveBlocks.test(Idx: MBB.getNumber())) {
3298 report(msg: "LiveVariables: Block should not be in AliveBlocks", MBB: &MBB);
3299 errs() << "Virtual register " << printReg(Reg)
3300 << " is not needed live through the block.\n";
3301 }
3302 }
3303 }
3304 }
3305}
3306
3307void MachineVerifier::verifyLiveIntervals() {
3308 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3309 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3310 Register Reg = Register::index2VirtReg(Index: I);
3311
3312 // Spilling and splitting may leave unused registers around. Skip them.
3313 if (MRI->reg_nodbg_empty(RegNo: Reg))
3314 continue;
3315
3316 if (!LiveInts->hasInterval(Reg)) {
3317 report(msg: "Missing live interval for virtual register", MF);
3318 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3319 continue;
3320 }
3321
3322 const LiveInterval &LI = LiveInts->getInterval(Reg);
3323 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3324 verifyLiveInterval(LI);
3325 }
3326
3327 // Verify all the cached regunit intervals.
3328 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3329 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit: i))
3330 verifyLiveRange(*LR, i);
3331}
3332
3333void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3334 const VNInfo *VNI, Register Reg,
3335 LaneBitmask LaneMask) {
3336 if (VNI->isUnused())
3337 return;
3338
3339 const VNInfo *DefVNI = LR.getVNInfoAt(Idx: VNI->def);
3340
3341 if (!DefVNI) {
3342 report(msg: "Value not live at VNInfo def and not marked unused", MF);
3343 report_context(LR, VRegUnit: Reg, LaneMask);
3344 report_context(VNI: *VNI);
3345 return;
3346 }
3347
3348 if (DefVNI != VNI) {
3349 report(msg: "Live segment at def has different VNInfo", MF);
3350 report_context(LR, VRegUnit: Reg, LaneMask);
3351 report_context(VNI: *VNI);
3352 return;
3353 }
3354
3355 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(index: VNI->def);
3356 if (!MBB) {
3357 report(msg: "Invalid VNInfo definition index", MF);
3358 report_context(LR, VRegUnit: Reg, LaneMask);
3359 report_context(VNI: *VNI);
3360 return;
3361 }
3362
3363 if (VNI->isPHIDef()) {
3364 if (VNI->def != LiveInts->getMBBStartIdx(mbb: MBB)) {
3365 report(msg: "PHIDef VNInfo is not defined at MBB start", MBB);
3366 report_context(LR, VRegUnit: Reg, LaneMask);
3367 report_context(VNI: *VNI);
3368 }
3369 return;
3370 }
3371
3372 // Non-PHI def.
3373 const MachineInstr *MI = LiveInts->getInstructionFromIndex(index: VNI->def);
3374 if (!MI) {
3375 report(msg: "No instruction at VNInfo def index", MBB);
3376 report_context(LR, VRegUnit: Reg, LaneMask);
3377 report_context(VNI: *VNI);
3378 return;
3379 }
3380
3381 if (Reg != 0) {
3382 bool hasDef = false;
3383 bool isEarlyClobber = false;
3384 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3385 if (!MOI->isReg() || !MOI->isDef())
3386 continue;
3387 if (Reg.isVirtual()) {
3388 if (MOI->getReg() != Reg)
3389 continue;
3390 } else {
3391 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(Reg: MOI->getReg(), RegUnit: Reg))
3392 continue;
3393 }
3394 if (LaneMask.any() &&
3395 (TRI->getSubRegIndexLaneMask(SubIdx: MOI->getSubReg()) & LaneMask).none())
3396 continue;
3397 hasDef = true;
3398 if (MOI->isEarlyClobber())
3399 isEarlyClobber = true;
3400 }
3401
3402 if (!hasDef) {
3403 report(msg: "Defining instruction does not modify register", MI);
3404 report_context(LR, VRegUnit: Reg, LaneMask);
3405 report_context(VNI: *VNI);
3406 }
3407
3408 // Early clobber defs begin at USE slots, but other defs must begin at
3409 // DEF slots.
3410 if (isEarlyClobber) {
3411 if (!VNI->def.isEarlyClobber()) {
3412 report(msg: "Early clobber def must be at an early-clobber slot", MBB);
3413 report_context(LR, VRegUnit: Reg, LaneMask);
3414 report_context(VNI: *VNI);
3415 }
3416 } else if (!VNI->def.isRegister()) {
3417 report(msg: "Non-PHI, non-early clobber def must be at a register slot", MBB);
3418 report_context(LR, VRegUnit: Reg, LaneMask);
3419 report_context(VNI: *VNI);
3420 }
3421 }
3422}
3423
3424void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3425 const LiveRange::const_iterator I,
3426 Register Reg,
3427 LaneBitmask LaneMask) {
3428 const LiveRange::Segment &S = *I;
3429 const VNInfo *VNI = S.valno;
3430 assert(VNI && "Live segment has no valno");
3431
3432 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(ValNo: VNI->id)) {
3433 report(msg: "Foreign valno in live segment", MF);
3434 report_context(LR, VRegUnit: Reg, LaneMask);
3435 report_context(S);
3436 report_context(VNI: *VNI);
3437 }
3438
3439 if (VNI->isUnused()) {
3440 report(msg: "Live segment valno is marked unused", MF);
3441 report_context(LR, VRegUnit: Reg, LaneMask);
3442 report_context(S);
3443 }
3444
3445 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(index: S.start);
3446 if (!MBB) {
3447 report(msg: "Bad start of live segment, no basic block", MF);
3448 report_context(LR, VRegUnit: Reg, LaneMask);
3449 report_context(S);
3450 return;
3451 }
3452 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(mbb: MBB);
3453 if (S.start != MBBStartIdx && S.start != VNI->def) {
3454 report(msg: "Live segment must begin at MBB entry or valno def", MBB);
3455 report_context(LR, VRegUnit: Reg, LaneMask);
3456 report_context(S);
3457 }
3458
3459 const MachineBasicBlock *EndMBB =
3460 LiveInts->getMBBFromIndex(index: S.end.getPrevSlot());
3461 if (!EndMBB) {
3462 report(msg: "Bad end of live segment, no basic block", MF);
3463 report_context(LR, VRegUnit: Reg, LaneMask);
3464 report_context(S);
3465 return;
3466 }
3467
3468 // Checks for non-live-out segments.
3469 if (S.end != LiveInts->getMBBEndIdx(mbb: EndMBB)) {
3470 // RegUnit intervals are allowed dead phis.
3471 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3472 S.end == VNI->def.getDeadSlot())
3473 return;
3474
3475 // The live segment is ending inside EndMBB
3476 const MachineInstr *MI =
3477 LiveInts->getInstructionFromIndex(index: S.end.getPrevSlot());
3478 if (!MI) {
3479 report(msg: "Live segment doesn't end at a valid instruction", MBB: EndMBB);
3480 report_context(LR, VRegUnit: Reg, LaneMask);
3481 report_context(S);
3482 return;
3483 }
3484
3485 // The block slot must refer to a basic block boundary.
3486 if (S.end.isBlock()) {
3487 report(msg: "Live segment ends at B slot of an instruction", MBB: EndMBB);
3488 report_context(LR, VRegUnit: Reg, LaneMask);
3489 report_context(S);
3490 }
3491
3492 if (S.end.isDead()) {
3493 // Segment ends on the dead slot.
3494 // That means there must be a dead def.
3495 if (!SlotIndex::isSameInstr(A: S.start, B: S.end)) {
3496 report(msg: "Live segment ending at dead slot spans instructions", MBB: EndMBB);
3497 report_context(LR, VRegUnit: Reg, LaneMask);
3498 report_context(S);
3499 }
3500 }
3501
3502 // After tied operands are rewritten, a live segment can only end at an
3503 // early-clobber slot if it is being redefined by an early-clobber def.
3504 // TODO: Before tied operands are rewritten, a live segment can only end at
3505 // an early-clobber slot if the last use is tied to an early-clobber def.
3506 if (MF->getProperties().hasProperty(
3507 P: MachineFunctionProperties::Property::TiedOpsRewritten) &&
3508 S.end.isEarlyClobber()) {
3509 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3510 report(msg: "Live segment ending at early clobber slot must be "
3511 "redefined by an EC def in the same instruction",
3512 MBB: EndMBB);
3513 report_context(LR, VRegUnit: Reg, LaneMask);
3514 report_context(S);
3515 }
3516 }
3517
3518 // The following checks only apply to virtual registers. Physreg liveness
3519 // is too weird to check.
3520 if (Reg.isVirtual()) {
3521 // A live segment can end with either a redefinition, a kill flag on a
3522 // use, or a dead flag on a def.
3523 bool hasRead = false;
3524 bool hasSubRegDef = false;
3525 bool hasDeadDef = false;
3526 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3527 if (!MOI->isReg() || MOI->getReg() != Reg)
3528 continue;
3529 unsigned Sub = MOI->getSubReg();
3530 LaneBitmask SLM =
3531 Sub != 0 ? TRI->getSubRegIndexLaneMask(SubIdx: Sub) : LaneBitmask::getAll();
3532 if (MOI->isDef()) {
3533 if (Sub != 0) {
3534 hasSubRegDef = true;
3535 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3536 // mask for subregister defs. Read-undef defs will be handled by
3537 // readsReg below.
3538 SLM = ~SLM;
3539 }
3540 if (MOI->isDead())
3541 hasDeadDef = true;
3542 }
3543 if (LaneMask.any() && (LaneMask & SLM).none())
3544 continue;
3545 if (MOI->readsReg())
3546 hasRead = true;
3547 }
3548 if (S.end.isDead()) {
3549 // Make sure that the corresponding machine operand for a "dead" live
3550 // range has the dead flag. We cannot perform this check for subregister
3551 // liveranges as partially dead values are allowed.
3552 if (LaneMask.none() && !hasDeadDef) {
3553 report(
3554 msg: "Instruction ending live segment on dead slot has no dead flag",
3555 MI);
3556 report_context(LR, VRegUnit: Reg, LaneMask);
3557 report_context(S);
3558 }
3559 } else {
3560 if (!hasRead) {
3561 // When tracking subregister liveness, the main range must start new
3562 // values on partial register writes, even if there is no read.
3563 if (!MRI->shouldTrackSubRegLiveness(VReg: Reg) || LaneMask.any() ||
3564 !hasSubRegDef) {
3565 report(msg: "Instruction ending live segment doesn't read the register",
3566 MI);
3567 report_context(LR, VRegUnit: Reg, LaneMask);
3568 report_context(S);
3569 }
3570 }
3571 }
3572 }
3573 }
3574
3575 // Now check all the basic blocks in this live segment.
3576 MachineFunction::const_iterator MFI = MBB->getIterator();
3577 // Is this live segment the beginning of a non-PHIDef VN?
3578 if (S.start == VNI->def && !VNI->isPHIDef()) {
3579 // Not live-in to any blocks.
3580 if (MBB == EndMBB)
3581 return;
3582 // Skip this block.
3583 ++MFI;
3584 }
3585
3586 SmallVector<SlotIndex, 4> Undefs;
3587 if (LaneMask.any()) {
3588 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3589 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, MRI: *MRI, Indexes: *Indexes);
3590 }
3591
3592 while (true) {
3593 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3594 // We don't know how to track physregs into a landing pad.
3595 if (!Reg.isVirtual() && MFI->isEHPad()) {
3596 if (&*MFI == EndMBB)
3597 break;
3598 ++MFI;
3599 continue;
3600 }
3601
3602 // Is VNI a PHI-def in the current block?
3603 bool IsPHI = VNI->isPHIDef() &&
3604 VNI->def == LiveInts->getMBBStartIdx(mbb: &*MFI);
3605
3606 // Check that VNI is live-out of all predecessors.
3607 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3608 SlotIndex PEnd = LiveInts->getMBBEndIdx(mbb: Pred);
3609 // Predecessor of landing pad live-out on last call.
3610 if (MFI->isEHPad()) {
3611 for (const MachineInstr &MI : llvm::reverse(C: *Pred)) {
3612 if (MI.isCall()) {
3613 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3614 break;
3615 }
3616 }
3617 }
3618 const VNInfo *PVNI = LR.getVNInfoBefore(Idx: PEnd);
3619
3620 // All predecessors must have a live-out value. However for a phi
3621 // instruction with subregister intervals
3622 // only one of the subregisters (not necessarily the current one) needs to
3623 // be defined.
3624 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3625 if (LiveRangeCalc::isJointlyDominated(MBB: Pred, Defs: Undefs, Indexes: *Indexes))
3626 continue;
3627 report(msg: "Register not marked live out of predecessor", MBB: Pred);
3628 report_context(LR, VRegUnit: Reg, LaneMask);
3629 report_context(VNI: *VNI);
3630 errs() << " live into " << printMBBReference(MBB: *MFI) << '@'
3631 << LiveInts->getMBBStartIdx(mbb: &*MFI) << ", not live before "
3632 << PEnd << '\n';
3633 continue;
3634 }
3635
3636 // Only PHI-defs can take different predecessor values.
3637 if (!IsPHI && PVNI != VNI) {
3638 report(msg: "Different value live out of predecessor", MBB: Pred);
3639 report_context(LR, VRegUnit: Reg, LaneMask);
3640 errs() << "Valno #" << PVNI->id << " live out of "
3641 << printMBBReference(MBB: *Pred) << '@' << PEnd << "\nValno #"
3642 << VNI->id << " live into " << printMBBReference(MBB: *MFI) << '@'
3643 << LiveInts->getMBBStartIdx(mbb: &*MFI) << '\n';
3644 }
3645 }
3646 if (&*MFI == EndMBB)
3647 break;
3648 ++MFI;
3649 }
3650}
3651
3652void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3653 LaneBitmask LaneMask) {
3654 for (const VNInfo *VNI : LR.valnos)
3655 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3656
3657 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3658 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3659}
3660
3661void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3662 Register Reg = LI.reg();
3663 assert(Reg.isVirtual());
3664 verifyLiveRange(LR: LI, Reg);
3665
3666 if (LI.hasSubRanges()) {
3667 LaneBitmask Mask;
3668 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3669 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3670 if ((Mask & SR.LaneMask).any()) {
3671 report(msg: "Lane masks of sub ranges overlap in live interval", MF);
3672 report_context(LI);
3673 }
3674 if ((SR.LaneMask & ~MaxMask).any()) {
3675 report(msg: "Subrange lanemask is invalid", MF);
3676 report_context(LI);
3677 }
3678 if (SR.empty()) {
3679 report(msg: "Subrange must not be empty", MF);
3680 report_context(LR: SR, VRegUnit: LI.reg(), LaneMask: SR.LaneMask);
3681 }
3682 Mask |= SR.LaneMask;
3683 verifyLiveRange(LR: SR, Reg: LI.reg(), LaneMask: SR.LaneMask);
3684 if (!LI.covers(Other: SR)) {
3685 report(msg: "A Subrange is not covered by the main range", MF);
3686 report_context(LI);
3687 }
3688 }
3689 }
3690
3691 // Check the LI only has one connected component.
3692 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3693 unsigned NumComp = ConEQ.Classify(LR: LI);
3694 if (NumComp > 1) {
3695 report(msg: "Multiple connected components in live interval", MF);
3696 report_context(LI);
3697 for (unsigned comp = 0; comp != NumComp; ++comp) {
3698 errs() << comp << ": valnos";
3699 for (const VNInfo *I : LI.valnos)
3700 if (comp == ConEQ.getEqClass(VNI: I))
3701 errs() << ' ' << I->id;
3702 errs() << '\n';
3703 }
3704 }
3705}
3706
3707namespace {
3708
3709 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3710 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3711 // value is zero.
3712 // We use a bool plus an integer to capture the stack state.
3713 struct StackStateOfBB {
3714 StackStateOfBB() = default;
3715 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3716 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3717 ExitIsSetup(ExitSetup) {}
3718
3719 // Can be negative, which means we are setting up a frame.
3720 int EntryValue = 0;
3721 int ExitValue = 0;
3722 bool EntryIsSetup = false;
3723 bool ExitIsSetup = false;
3724 };
3725
3726} // end anonymous namespace
3727
3728/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3729/// by a FrameDestroy <n>, stack adjustments are identical on all
3730/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3731void MachineVerifier::verifyStackFrame() {
3732 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3733 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3734 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3735 return;
3736
3737 SmallVector<StackStateOfBB, 8> SPState;
3738 SPState.resize(N: MF->getNumBlockIDs());
3739 df_iterator_default_set<const MachineBasicBlock*> Reachable;
3740
3741 // Visit the MBBs in DFS order.
3742 for (df_ext_iterator<const MachineFunction *,
3743 df_iterator_default_set<const MachineBasicBlock *>>
3744 DFI = df_ext_begin(G: MF, S&: Reachable), DFE = df_ext_end(G: MF, S&: Reachable);
3745 DFI != DFE; ++DFI) {
3746 const MachineBasicBlock *MBB = *DFI;
3747
3748 StackStateOfBB BBState;
3749 // Check the exit state of the DFS stack predecessor.
3750 if (DFI.getPathLength() >= 2) {
3751 const MachineBasicBlock *StackPred = DFI.getPath(n: DFI.getPathLength() - 2);
3752 assert(Reachable.count(StackPred) &&
3753 "DFS stack predecessor is already visited.\n");
3754 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3755 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3756 BBState.ExitValue = BBState.EntryValue;
3757 BBState.ExitIsSetup = BBState.EntryIsSetup;
3758 }
3759
3760 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3761 report(msg: "Call frame size on entry does not match value computed from "
3762 "predecessor",
3763 MBB);
3764 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3765 << " does not match value computed from predecessor "
3766 << -BBState.EntryValue << '\n';
3767 }
3768
3769 // Update stack state by checking contents of MBB.
3770 for (const auto &I : *MBB) {
3771 if (I.getOpcode() == FrameSetupOpcode) {
3772 if (BBState.ExitIsSetup)
3773 report(msg: "FrameSetup is after another FrameSetup", MI: &I);
3774 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3775 report(msg: "AdjustsStack not set in presence of a frame pseudo "
3776 "instruction.", MI: &I);
3777 BBState.ExitValue -= TII->getFrameTotalSize(I);
3778 BBState.ExitIsSetup = true;
3779 }
3780
3781 if (I.getOpcode() == FrameDestroyOpcode) {
3782 int Size = TII->getFrameTotalSize(I);
3783 if (!BBState.ExitIsSetup)
3784 report(msg: "FrameDestroy is not after a FrameSetup", MI: &I);
3785 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3786 BBState.ExitValue;
3787 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3788 report(msg: "FrameDestroy <n> is after FrameSetup <m>", MI: &I);
3789 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3790 << AbsSPAdj << ">.\n";
3791 }
3792 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3793 report(msg: "AdjustsStack not set in presence of a frame pseudo "
3794 "instruction.", MI: &I);
3795 BBState.ExitValue += Size;
3796 BBState.ExitIsSetup = false;
3797 }
3798 }
3799 SPState[MBB->getNumber()] = BBState;
3800
3801 // Make sure the exit state of any predecessor is consistent with the entry
3802 // state.
3803 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3804 if (Reachable.count(Ptr: Pred) &&
3805 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3806 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3807 report(msg: "The exit stack state of a predecessor is inconsistent.", MBB);
3808 errs() << "Predecessor " << printMBBReference(MBB: *Pred)
3809 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3810 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3811 << printMBBReference(MBB: *MBB) << " has entry state ("
3812 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3813 }
3814 }
3815
3816 // Make sure the entry state of any successor is consistent with the exit
3817 // state.
3818 for (const MachineBasicBlock *Succ : MBB->successors()) {
3819 if (Reachable.count(Ptr: Succ) &&
3820 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3821 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3822 report(msg: "The entry stack state of a successor is inconsistent.", MBB);
3823 errs() << "Successor " << printMBBReference(MBB: *Succ)
3824 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3825 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3826 << printMBBReference(MBB: *MBB) << " has exit state ("
3827 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3828 }
3829 }
3830
3831 // Make sure a basic block with return ends with zero stack adjustment.
3832 if (!MBB->empty() && MBB->back().isReturn()) {
3833 if (BBState.ExitIsSetup)
3834 report(msg: "A return block ends with a FrameSetup.", MBB);
3835 if (BBState.ExitValue)
3836 report(msg: "A return block ends with a nonzero stack adjustment.", MBB);
3837 }
3838 }
3839}
3840

source code of llvm/lib/CodeGen/MachineVerifier.cpp