1//===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAGISel class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/SelectionDAGISel.h"
14#include "ScheduleDAGSDNodes.h"
15#include "SelectionDAGBuilder.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/PostOrderIterator.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AssumptionCache.h"
26#include "llvm/Analysis/BranchProbabilityInfo.h"
27#include "llvm/Analysis/CFG.h"
28#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
29#include "llvm/Analysis/OptimizationRemarkEmitter.h"
30#include "llvm/Analysis/ProfileSummaryInfo.h"
31#include "llvm/Analysis/TargetLibraryInfo.h"
32#include "llvm/Analysis/TargetTransformInfo.h"
33#include "llvm/Analysis/UniformityAnalysis.h"
34#include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
35#include "llvm/CodeGen/CodeGenCommonISel.h"
36#include "llvm/CodeGen/FastISel.h"
37#include "llvm/CodeGen/FunctionLoweringInfo.h"
38#include "llvm/CodeGen/GCMetadata.h"
39#include "llvm/CodeGen/ISDOpcodes.h"
40#include "llvm/CodeGen/MachineBasicBlock.h"
41#include "llvm/CodeGen/MachineFrameInfo.h"
42#include "llvm/CodeGen/MachineFunction.h"
43#include "llvm/CodeGen/MachineFunctionPass.h"
44#include "llvm/CodeGen/MachineInstr.h"
45#include "llvm/CodeGen/MachineInstrBuilder.h"
46#include "llvm/CodeGen/MachineMemOperand.h"
47#include "llvm/CodeGen/MachineModuleInfo.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachinePassRegistry.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
51#include "llvm/CodeGen/SchedulerRegistry.h"
52#include "llvm/CodeGen/SelectionDAG.h"
53#include "llvm/CodeGen/SelectionDAGNodes.h"
54#include "llvm/CodeGen/StackMaps.h"
55#include "llvm/CodeGen/StackProtector.h"
56#include "llvm/CodeGen/SwiftErrorValueTracking.h"
57#include "llvm/CodeGen/TargetInstrInfo.h"
58#include "llvm/CodeGen/TargetLowering.h"
59#include "llvm/CodeGen/TargetRegisterInfo.h"
60#include "llvm/CodeGen/TargetSubtargetInfo.h"
61#include "llvm/CodeGen/ValueTypes.h"
62#include "llvm/CodeGen/WinEHFuncInfo.h"
63#include "llvm/CodeGenTypes/MachineValueType.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
68#include "llvm/IR/DebugInfoMetadata.h"
69#include "llvm/IR/DebugLoc.h"
70#include "llvm/IR/DiagnosticInfo.h"
71#include "llvm/IR/EHPersonalities.h"
72#include "llvm/IR/Function.h"
73#include "llvm/IR/InlineAsm.h"
74#include "llvm/IR/InstIterator.h"
75#include "llvm/IR/Instruction.h"
76#include "llvm/IR/Instructions.h"
77#include "llvm/IR/IntrinsicInst.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/IntrinsicsWebAssembly.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/PrintPasses.h"
82#include "llvm/IR/Statepoint.h"
83#include "llvm/IR/Type.h"
84#include "llvm/IR/User.h"
85#include "llvm/IR/Value.h"
86#include "llvm/InitializePasses.h"
87#include "llvm/MC/MCInstrDesc.h"
88#include "llvm/Pass.h"
89#include "llvm/Support/BranchProbability.h"
90#include "llvm/Support/Casting.h"
91#include "llvm/Support/CodeGen.h"
92#include "llvm/Support/CommandLine.h"
93#include "llvm/Support/Compiler.h"
94#include "llvm/Support/Debug.h"
95#include "llvm/Support/ErrorHandling.h"
96#include "llvm/Support/KnownBits.h"
97#include "llvm/Support/Timer.h"
98#include "llvm/Support/raw_ostream.h"
99#include "llvm/Target/TargetIntrinsicInfo.h"
100#include "llvm/Target/TargetMachine.h"
101#include "llvm/Target/TargetOptions.h"
102#include "llvm/Transforms/Utils/BasicBlockUtils.h"
103#include <algorithm>
104#include <cassert>
105#include <cstdint>
106#include <iterator>
107#include <limits>
108#include <memory>
109#include <optional>
110#include <string>
111#include <utility>
112#include <vector>
113
114using namespace llvm;
115
116#define DEBUG_TYPE "isel"
117#define ISEL_DUMP_DEBUG_TYPE DEBUG_TYPE "-dump"
118
119STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
120STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
121STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
122STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
123STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
124STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
125STATISTIC(NumFastIselFailLowerArguments,
126 "Number of entry blocks where fast isel failed to lower arguments");
127
128static cl::opt<int> EnableFastISelAbort(
129 "fast-isel-abort", cl::Hidden,
130 cl::desc("Enable abort calls when \"fast\" instruction selection "
131 "fails to lower an instruction: 0 disable the abort, 1 will "
132 "abort but for args, calls and terminators, 2 will also "
133 "abort for argument lowering, and 3 will never fallback "
134 "to SelectionDAG."));
135
136static cl::opt<bool> EnableFastISelFallbackReport(
137 "fast-isel-report-on-fallback", cl::Hidden,
138 cl::desc("Emit a diagnostic when \"fast\" instruction selection "
139 "falls back to SelectionDAG."));
140
141static cl::opt<bool>
142UseMBPI("use-mbpi",
143 cl::desc("use Machine Branch Probability Info"),
144 cl::init(Val: true), cl::Hidden);
145
146#ifndef NDEBUG
147static cl::opt<std::string>
148FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
149 cl::desc("Only display the basic block whose name "
150 "matches this for all view-*-dags options"));
151static cl::opt<bool>
152ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
153 cl::desc("Pop up a window to show dags before the first "
154 "dag combine pass"));
155static cl::opt<bool>
156ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
157 cl::desc("Pop up a window to show dags before legalize types"));
158static cl::opt<bool>
159 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
160 cl::desc("Pop up a window to show dags before the post "
161 "legalize types dag combine pass"));
162static cl::opt<bool>
163 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
164 cl::desc("Pop up a window to show dags before legalize"));
165static cl::opt<bool>
166ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
167 cl::desc("Pop up a window to show dags before the second "
168 "dag combine pass"));
169static cl::opt<bool>
170ViewISelDAGs("view-isel-dags", cl::Hidden,
171 cl::desc("Pop up a window to show isel dags as they are selected"));
172static cl::opt<bool>
173ViewSchedDAGs("view-sched-dags", cl::Hidden,
174 cl::desc("Pop up a window to show sched dags as they are processed"));
175static cl::opt<bool>
176ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
177 cl::desc("Pop up a window to show SUnit dags after they are processed"));
178#else
179static const bool ViewDAGCombine1 = false, ViewLegalizeTypesDAGs = false,
180 ViewDAGCombineLT = false, ViewLegalizeDAGs = false,
181 ViewDAGCombine2 = false, ViewISelDAGs = false,
182 ViewSchedDAGs = false, ViewSUnitDAGs = false;
183#endif
184
185#ifndef NDEBUG
186#define ISEL_DUMP(X) \
187 do { \
188 if (llvm::DebugFlag && \
189 (isCurrentDebugType(DEBUG_TYPE) || \
190 (isCurrentDebugType(ISEL_DUMP_DEBUG_TYPE) && MatchFilterFuncName))) { \
191 X; \
192 } \
193 } while (false)
194#else
195#define ISEL_DUMP(X) do { } while (false)
196#endif
197
198//===---------------------------------------------------------------------===//
199///
200/// RegisterScheduler class - Track the registration of instruction schedulers.
201///
202//===---------------------------------------------------------------------===//
203MachinePassRegistry<RegisterScheduler::FunctionPassCtor>
204 RegisterScheduler::Registry;
205
206//===---------------------------------------------------------------------===//
207///
208/// ISHeuristic command line option for instruction schedulers.
209///
210//===---------------------------------------------------------------------===//
211static cl::opt<RegisterScheduler::FunctionPassCtor, false,
212 RegisterPassParser<RegisterScheduler>>
213ISHeuristic("pre-RA-sched",
214 cl::init(Val: &createDefaultScheduler), cl::Hidden,
215 cl::desc("Instruction schedulers available (before register"
216 " allocation):"));
217
218static RegisterScheduler
219defaultListDAGScheduler("default", "Best scheduler for the target",
220 createDefaultScheduler);
221
222static bool dontUseFastISelFor(const Function &Fn) {
223 // Don't enable FastISel for functions with swiftasync Arguments.
224 // Debug info on those is reliant on good Argument lowering, and FastISel is
225 // not capable of lowering the entire function. Mixing the two selectors tend
226 // to result in poor lowering of Arguments.
227 return any_of(Range: Fn.args(), P: [](const Argument &Arg) {
228 return Arg.hasAttribute(Attribute::AttrKind::SwiftAsync);
229 });
230}
231
232namespace llvm {
233
234 //===--------------------------------------------------------------------===//
235 /// This class is used by SelectionDAGISel to temporarily override
236 /// the optimization level on a per-function basis.
237 class OptLevelChanger {
238 SelectionDAGISel &IS;
239 CodeGenOptLevel SavedOptLevel;
240 bool SavedFastISel;
241
242 public:
243 OptLevelChanger(SelectionDAGISel &ISel, CodeGenOptLevel NewOptLevel)
244 : IS(ISel) {
245 SavedOptLevel = IS.OptLevel;
246 SavedFastISel = IS.TM.Options.EnableFastISel;
247 if (NewOptLevel != SavedOptLevel) {
248 IS.OptLevel = NewOptLevel;
249 IS.TM.setOptLevel(NewOptLevel);
250 LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
251 << IS.MF->getFunction().getName() << "\n");
252 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(SavedOptLevel)
253 << " ; After: -O" << static_cast<int>(NewOptLevel)
254 << "\n");
255 if (NewOptLevel == CodeGenOptLevel::None)
256 IS.TM.setFastISel(IS.TM.getO0WantsFastISel());
257 }
258 if (dontUseFastISelFor(Fn: IS.MF->getFunction()))
259 IS.TM.setFastISel(false);
260 LLVM_DEBUG(
261 dbgs() << "\tFastISel is "
262 << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
263 << "\n");
264 }
265
266 ~OptLevelChanger() {
267 if (IS.OptLevel == SavedOptLevel)
268 return;
269 LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
270 << IS.MF->getFunction().getName() << "\n");
271 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(IS.OptLevel)
272 << " ; After: -O" << static_cast<int>(SavedOptLevel) << "\n");
273 IS.OptLevel = SavedOptLevel;
274 IS.TM.setOptLevel(SavedOptLevel);
275 IS.TM.setFastISel(SavedFastISel);
276 }
277 };
278
279 //===--------------------------------------------------------------------===//
280 /// createDefaultScheduler - This creates an instruction scheduler appropriate
281 /// for the target.
282 ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
283 CodeGenOptLevel OptLevel) {
284 const TargetLowering *TLI = IS->TLI;
285 const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
286
287 // Try first to see if the Target has its own way of selecting a scheduler
288 if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
289 return SchedulerCtor(IS, OptLevel);
290 }
291
292 if (OptLevel == CodeGenOptLevel::None ||
293 (ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
294 TLI->getSchedulingPreference() == Sched::Source)
295 return createSourceListDAGScheduler(IS, OptLevel);
296 if (TLI->getSchedulingPreference() == Sched::RegPressure)
297 return createBURRListDAGScheduler(IS, OptLevel);
298 if (TLI->getSchedulingPreference() == Sched::Hybrid)
299 return createHybridListDAGScheduler(IS, OptLevel);
300 if (TLI->getSchedulingPreference() == Sched::VLIW)
301 return createVLIWDAGScheduler(IS, OptLevel);
302 if (TLI->getSchedulingPreference() == Sched::Fast)
303 return createFastDAGScheduler(IS, OptLevel);
304 if (TLI->getSchedulingPreference() == Sched::Linearize)
305 return createDAGLinearizer(IS, OptLevel);
306 assert(TLI->getSchedulingPreference() == Sched::ILP &&
307 "Unknown sched type!");
308 return createILPListDAGScheduler(IS, OptLevel);
309 }
310
311} // end namespace llvm
312
313// EmitInstrWithCustomInserter - This method should be implemented by targets
314// that mark instructions with the 'usesCustomInserter' flag. These
315// instructions are special in various ways, which require special support to
316// insert. The specified MachineInstr is created but not inserted into any
317// basic blocks, and this method is called to expand it into a sequence of
318// instructions, potentially also creating new basic blocks and control flow.
319// When new basic blocks are inserted and the edges from MBB to its successors
320// are modified, the method should insert pairs of <OldSucc, NewSucc> into the
321// DenseMap.
322MachineBasicBlock *
323TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
324 MachineBasicBlock *MBB) const {
325#ifndef NDEBUG
326 dbgs() << "If a target marks an instruction with "
327 "'usesCustomInserter', it must implement "
328 "TargetLowering::EmitInstrWithCustomInserter!\n";
329#endif
330 llvm_unreachable(nullptr);
331}
332
333void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
334 SDNode *Node) const {
335 assert(!MI.hasPostISelHook() &&
336 "If a target marks an instruction with 'hasPostISelHook', "
337 "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
338}
339
340//===----------------------------------------------------------------------===//
341// SelectionDAGISel code
342//===----------------------------------------------------------------------===//
343
344SelectionDAGISel::SelectionDAGISel(char &ID, TargetMachine &tm,
345 CodeGenOptLevel OL)
346 : MachineFunctionPass(ID), TM(tm), FuncInfo(new FunctionLoweringInfo()),
347 SwiftError(new SwiftErrorValueTracking()),
348 CurDAG(new SelectionDAG(tm, OL)),
349 SDB(std::make_unique<SelectionDAGBuilder>(args&: *CurDAG, args&: *FuncInfo, args&: *SwiftError,
350 args&: OL)),
351 OptLevel(OL) {
352 initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
353 initializeBranchProbabilityInfoWrapperPassPass(
354 *PassRegistry::getPassRegistry());
355 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
356 initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
357}
358
359SelectionDAGISel::~SelectionDAGISel() {
360 delete CurDAG;
361 delete SwiftError;
362}
363
364void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
365 if (OptLevel != CodeGenOptLevel::None)
366 AU.addRequired<AAResultsWrapperPass>();
367 AU.addRequired<GCModuleInfo>();
368 AU.addRequired<StackProtector>();
369 AU.addPreserved<GCModuleInfo>();
370 AU.addRequired<TargetLibraryInfoWrapperPass>();
371 AU.addRequired<TargetTransformInfoWrapperPass>();
372 AU.addRequired<AssumptionCacheTracker>();
373 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
374 AU.addRequired<BranchProbabilityInfoWrapperPass>();
375 AU.addRequired<ProfileSummaryInfoWrapperPass>();
376 // AssignmentTrackingAnalysis only runs if assignment tracking is enabled for
377 // the module.
378 AU.addRequired<AssignmentTrackingAnalysis>();
379 AU.addPreserved<AssignmentTrackingAnalysis>();
380 if (OptLevel != CodeGenOptLevel::None)
381 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
382 MachineFunctionPass::getAnalysisUsage(AU);
383}
384
385static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F,
386 MachineModuleInfo &MMI) {
387 // Only needed for MSVC
388 if (!TT.isWindowsMSVCEnvironment())
389 return;
390
391 // If it's already set, nothing to do.
392 if (MMI.usesMSVCFloatingPoint())
393 return;
394
395 for (const Instruction &I : instructions(F)) {
396 if (I.getType()->isFPOrFPVectorTy()) {
397 MMI.setUsesMSVCFloatingPoint(true);
398 return;
399 }
400 for (const auto &Op : I.operands()) {
401 if (Op->getType()->isFPOrFPVectorTy()) {
402 MMI.setUsesMSVCFloatingPoint(true);
403 return;
404 }
405 }
406 }
407}
408
409bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
410 // If we already selected that function, we do not need to run SDISel.
411 if (mf.getProperties().hasProperty(
412 P: MachineFunctionProperties::Property::Selected))
413 return false;
414 // Do some sanity-checking on the command-line options.
415 assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
416 "-fast-isel-abort > 0 requires -fast-isel");
417
418 const Function &Fn = mf.getFunction();
419 MF = &mf;
420
421#ifndef NDEBUG
422 StringRef FuncName = Fn.getName();
423 MatchFilterFuncName = isFunctionInPrintList(FunctionName: FuncName);
424#else
425 (void)MatchFilterFuncName;
426#endif
427
428 // Decide what flavour of variable location debug-info will be used, before
429 // we change the optimisation level.
430 bool InstrRef = mf.shouldUseDebugInstrRef();
431 mf.setUseDebugInstrRef(InstrRef);
432
433 // Reset the target options before resetting the optimization
434 // level below.
435 // FIXME: This is a horrible hack and should be processed via
436 // codegen looking at the optimization level explicitly when
437 // it wants to look at it.
438 TM.resetTargetOptions(F: Fn);
439 // Reset OptLevel to None for optnone functions.
440 CodeGenOptLevel NewOptLevel = OptLevel;
441 if (OptLevel != CodeGenOptLevel::None && skipFunction(F: Fn))
442 NewOptLevel = CodeGenOptLevel::None;
443 OptLevelChanger OLC(*this, NewOptLevel);
444
445 TII = MF->getSubtarget().getInstrInfo();
446 TLI = MF->getSubtarget().getTargetLowering();
447 RegInfo = &MF->getRegInfo();
448 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F: Fn);
449 GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(F: Fn) : nullptr;
450 ORE = std::make_unique<OptimizationRemarkEmitter>(args: &Fn);
451 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F&: mf.getFunction());
452 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
453 BlockFrequencyInfo *BFI = nullptr;
454 if (PSI && PSI->hasProfileSummary() && OptLevel != CodeGenOptLevel::None)
455 BFI = &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI();
456
457 FunctionVarLocs const *FnVarLocs = nullptr;
458 if (isAssignmentTrackingEnabled(M: *Fn.getParent()))
459 FnVarLocs = getAnalysis<AssignmentTrackingAnalysis>().getResults();
460
461 ISEL_DUMP(dbgs() << "\n\n\n=== " << FuncName << "\n");
462
463 UniformityInfo *UA = nullptr;
464 if (auto *UAPass = getAnalysisIfAvailable<UniformityInfoWrapperPass>())
465 UA = &UAPass->getUniformityInfo();
466 CurDAG->init(NewMF&: *MF, NewORE&: *ORE, PassPtr: this, LibraryInfo: LibInfo, UA, PSIin: PSI, BFIin: BFI, FnVarLocs);
467 FuncInfo->set(Fn, MF&: *MF, DAG: CurDAG);
468 SwiftError->setFunction(*MF);
469
470 // Now get the optional analyzes if we want to.
471 // This is based on the possibly changed OptLevel (after optnone is taken
472 // into account). That's unfortunate but OK because it just means we won't
473 // ask for passes that have been required anyway.
474
475 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
476 FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
477 else
478 FuncInfo->BPI = nullptr;
479
480 if (OptLevel != CodeGenOptLevel::None)
481 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
482 else
483 AA = nullptr;
484
485 SDB->init(gfi: GFI, AA, AC, li: LibInfo);
486
487 MF->setHasInlineAsm(false);
488
489 FuncInfo->SplitCSR = false;
490
491 // We split CSR if the target supports it for the given function
492 // and the function has only return exits.
493 if (OptLevel != CodeGenOptLevel::None && TLI->supportSplitCSR(MF)) {
494 FuncInfo->SplitCSR = true;
495
496 // Collect all the return blocks.
497 for (const BasicBlock &BB : Fn) {
498 if (!succ_empty(BB: &BB))
499 continue;
500
501 const Instruction *Term = BB.getTerminator();
502 if (isa<UnreachableInst>(Val: Term) || isa<ReturnInst>(Val: Term))
503 continue;
504
505 // Bail out if the exit block is not Return nor Unreachable.
506 FuncInfo->SplitCSR = false;
507 break;
508 }
509 }
510
511 MachineBasicBlock *EntryMBB = &MF->front();
512 if (FuncInfo->SplitCSR)
513 // This performs initialization so lowering for SplitCSR will be correct.
514 TLI->initializeSplitCSR(Entry: EntryMBB);
515
516 SelectAllBasicBlocks(Fn);
517 if (FastISelFailed && EnableFastISelFallbackReport) {
518 DiagnosticInfoISelFallback DiagFallback(Fn);
519 Fn.getContext().diagnose(DI: DiagFallback);
520 }
521
522 // Replace forward-declared registers with the registers containing
523 // the desired value.
524 // Note: it is important that this happens **before** the call to
525 // EmitLiveInCopies, since implementations can skip copies of unused
526 // registers. If we don't apply the reg fixups before, some registers may
527 // appear as unused and will be skipped, resulting in bad MI.
528 MachineRegisterInfo &MRI = MF->getRegInfo();
529 for (DenseMap<Register, Register>::iterator I = FuncInfo->RegFixups.begin(),
530 E = FuncInfo->RegFixups.end();
531 I != E; ++I) {
532 Register From = I->first;
533 Register To = I->second;
534 // If To is also scheduled to be replaced, find what its ultimate
535 // replacement is.
536 while (true) {
537 DenseMap<Register, Register>::iterator J = FuncInfo->RegFixups.find(Val: To);
538 if (J == E)
539 break;
540 To = J->second;
541 }
542 // Make sure the new register has a sufficiently constrained register class.
543 if (From.isVirtual() && To.isVirtual())
544 MRI.constrainRegClass(Reg: To, RC: MRI.getRegClass(Reg: From));
545 // Replace it.
546
547 // Replacing one register with another won't touch the kill flags.
548 // We need to conservatively clear the kill flags as a kill on the old
549 // register might dominate existing uses of the new register.
550 if (!MRI.use_empty(RegNo: To))
551 MRI.clearKillFlags(Reg: From);
552 MRI.replaceRegWith(FromReg: From, ToReg: To);
553 }
554
555 // If the first basic block in the function has live ins that need to be
556 // copied into vregs, emit the copies into the top of the block before
557 // emitting the code for the block.
558 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
559 RegInfo->EmitLiveInCopies(EntryMBB, TRI, TII: *TII);
560
561 // Insert copies in the entry block and the return blocks.
562 if (FuncInfo->SplitCSR) {
563 SmallVector<MachineBasicBlock*, 4> Returns;
564 // Collect all the return blocks.
565 for (MachineBasicBlock &MBB : mf) {
566 if (!MBB.succ_empty())
567 continue;
568
569 MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
570 if (Term != MBB.end() && Term->isReturn()) {
571 Returns.push_back(Elt: &MBB);
572 continue;
573 }
574 }
575 TLI->insertCopiesSplitCSR(Entry: EntryMBB, Exits: Returns);
576 }
577
578 DenseMap<unsigned, unsigned> LiveInMap;
579 if (!FuncInfo->ArgDbgValues.empty())
580 for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
581 if (LI.second)
582 LiveInMap.insert(KV: LI);
583
584 // Insert DBG_VALUE instructions for function arguments to the entry block.
585 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
586 MachineInstr *MI = FuncInfo->ArgDbgValues[e - i - 1];
587 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
588 "Function parameters should not be described by DBG_VALUE_LIST.");
589 bool hasFI = MI->getDebugOperand(Index: 0).isFI();
590 Register Reg =
591 hasFI ? TRI.getFrameRegister(MF: *MF) : MI->getDebugOperand(Index: 0).getReg();
592 if (Reg.isPhysical())
593 EntryMBB->insert(I: EntryMBB->begin(), MI);
594 else {
595 MachineInstr *Def = RegInfo->getVRegDef(Reg);
596 if (Def) {
597 MachineBasicBlock::iterator InsertPos = Def;
598 // FIXME: VR def may not be in entry block.
599 Def->getParent()->insert(I: std::next(x: InsertPos), MI);
600 } else
601 LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
602 << Register::virtReg2Index(Reg) << "\n");
603 }
604
605 // Don't try and extend through copies in instruction referencing mode.
606 if (InstrRef)
607 continue;
608
609 // If Reg is live-in then update debug info to track its copy in a vreg.
610 DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Val: Reg);
611 if (LDI != LiveInMap.end()) {
612 assert(!hasFI && "There's no handling of frame pointer updating here yet "
613 "- add if needed");
614 MachineInstr *Def = RegInfo->getVRegDef(Reg: LDI->second);
615 MachineBasicBlock::iterator InsertPos = Def;
616 const MDNode *Variable = MI->getDebugVariable();
617 const MDNode *Expr = MI->getDebugExpression();
618 DebugLoc DL = MI->getDebugLoc();
619 bool IsIndirect = MI->isIndirectDebugValue();
620 if (IsIndirect)
621 assert(MI->getDebugOffset().getImm() == 0 &&
622 "DBG_VALUE with nonzero offset");
623 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
624 "Expected inlined-at fields to agree");
625 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
626 "Didn't expect to see a DBG_VALUE_LIST here");
627 // Def is never a terminator here, so it is ok to increment InsertPos.
628 BuildMI(BB&: *EntryMBB, I: ++InsertPos, DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE),
629 IsIndirect, Reg: LDI->second, Variable, Expr);
630
631 // If this vreg is directly copied into an exported register then
632 // that COPY instructions also need DBG_VALUE, if it is the only
633 // user of LDI->second.
634 MachineInstr *CopyUseMI = nullptr;
635 for (MachineRegisterInfo::use_instr_iterator
636 UI = RegInfo->use_instr_begin(RegNo: LDI->second),
637 E = RegInfo->use_instr_end(); UI != E; ) {
638 MachineInstr *UseMI = &*(UI++);
639 if (UseMI->isDebugValue()) continue;
640 if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
641 CopyUseMI = UseMI; continue;
642 }
643 // Otherwise this is another use or second copy use.
644 CopyUseMI = nullptr; break;
645 }
646 if (CopyUseMI &&
647 TRI.getRegSizeInBits(Reg: LDI->second, MRI) ==
648 TRI.getRegSizeInBits(Reg: CopyUseMI->getOperand(i: 0).getReg(), MRI)) {
649 // Use MI's debug location, which describes where Variable was
650 // declared, rather than whatever is attached to CopyUseMI.
651 MachineInstr *NewMI =
652 BuildMI(MF&: *MF, DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE), IsIndirect,
653 Reg: CopyUseMI->getOperand(i: 0).getReg(), Variable, Expr);
654 MachineBasicBlock::iterator Pos = CopyUseMI;
655 EntryMBB->insertAfter(I: Pos, MI: NewMI);
656 }
657 }
658 }
659
660 // For debug-info, in instruction referencing mode, we need to perform some
661 // post-isel maintenence.
662 if (MF->useDebugInstrRef())
663 MF->finalizeDebugInstrRefs();
664
665 // Determine if there are any calls in this machine function.
666 MachineFrameInfo &MFI = MF->getFrameInfo();
667 for (const auto &MBB : *MF) {
668 if (MFI.hasCalls() && MF->hasInlineAsm())
669 break;
670
671 for (const auto &MI : MBB) {
672 const MCInstrDesc &MCID = TII->get(Opcode: MI.getOpcode());
673 if ((MCID.isCall() && !MCID.isReturn()) ||
674 MI.isStackAligningInlineAsm()) {
675 MFI.setHasCalls(true);
676 }
677 if (MI.isInlineAsm()) {
678 MF->setHasInlineAsm(true);
679 }
680 }
681 }
682
683 // Determine if there is a call to setjmp in the machine function.
684 MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
685
686 // Determine if floating point is used for msvc
687 computeUsesMSVCFloatingPoint(TT: TM.getTargetTriple(), F: Fn, MMI&: MF->getMMI());
688
689 // Release function-specific state. SDB and CurDAG are already cleared
690 // at this point.
691 FuncInfo->clear();
692
693 ISEL_DUMP(dbgs() << "*** MachineFunction at end of ISel ***\n");
694 ISEL_DUMP(MF->print(dbgs()));
695
696 return true;
697}
698
699static void reportFastISelFailure(MachineFunction &MF,
700 OptimizationRemarkEmitter &ORE,
701 OptimizationRemarkMissed &R,
702 bool ShouldAbort) {
703 // Print the function name explicitly if we don't have a debug location (which
704 // makes the diagnostic less useful) or if we're going to emit a raw error.
705 if (!R.getLocation().isValid() || ShouldAbort)
706 R << (" (in function: " + MF.getName() + ")").str();
707
708 if (ShouldAbort)
709 report_fatal_error(reason: Twine(R.getMsg()));
710
711 ORE.emit(OptDiag&: R);
712 LLVM_DEBUG(dbgs() << R.getMsg() << "\n");
713}
714
715void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
716 BasicBlock::const_iterator End,
717 bool &HadTailCall) {
718 // Allow creating illegal types during DAG building for the basic block.
719 CurDAG->NewNodesMustHaveLegalTypes = false;
720
721 // Lower the instructions. If a call is emitted as a tail call, cease emitting
722 // nodes for this block. If an instruction is elided, don't emit it, but do
723 // handle any debug-info attached to it.
724 for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
725 if (!ElidedArgCopyInstrs.count(Ptr: &*I))
726 SDB->visit(I: *I);
727 else
728 SDB->visitDbgInfo(I: *I);
729 }
730
731 // Make sure the root of the DAG is up-to-date.
732 CurDAG->setRoot(SDB->getControlRoot());
733 HadTailCall = SDB->HasTailCall;
734 SDB->resolveOrClearDbgInfo();
735 SDB->clear();
736
737 // Final step, emit the lowered DAG as machine code.
738 CodeGenAndEmitDAG();
739}
740
741void SelectionDAGISel::ComputeLiveOutVRegInfo() {
742 SmallPtrSet<SDNode *, 16> Added;
743 SmallVector<SDNode*, 128> Worklist;
744
745 Worklist.push_back(Elt: CurDAG->getRoot().getNode());
746 Added.insert(Ptr: CurDAG->getRoot().getNode());
747
748 KnownBits Known;
749
750 do {
751 SDNode *N = Worklist.pop_back_val();
752
753 // Otherwise, add all chain operands to the worklist.
754 for (const SDValue &Op : N->op_values())
755 if (Op.getValueType() == MVT::Other && Added.insert(Ptr: Op.getNode()).second)
756 Worklist.push_back(Elt: Op.getNode());
757
758 // If this is a CopyToReg with a vreg dest, process it.
759 if (N->getOpcode() != ISD::CopyToReg)
760 continue;
761
762 unsigned DestReg = cast<RegisterSDNode>(Val: N->getOperand(Num: 1))->getReg();
763 if (!Register::isVirtualRegister(Reg: DestReg))
764 continue;
765
766 // Ignore non-integer values.
767 SDValue Src = N->getOperand(Num: 2);
768 EVT SrcVT = Src.getValueType();
769 if (!SrcVT.isInteger())
770 continue;
771
772 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Op: Src);
773 Known = CurDAG->computeKnownBits(Op: Src);
774 FuncInfo->AddLiveOutRegInfo(Reg: DestReg, NumSignBits, Known);
775 } while (!Worklist.empty());
776}
777
778void SelectionDAGISel::CodeGenAndEmitDAG() {
779 StringRef GroupName = "sdag";
780 StringRef GroupDescription = "Instruction Selection and Scheduling";
781 std::string BlockName;
782 bool MatchFilterBB = false; (void)MatchFilterBB;
783#ifndef NDEBUG
784 TargetTransformInfo &TTI =
785 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F: *FuncInfo->Fn);
786#endif
787
788 // Pre-type legalization allow creation of any node types.
789 CurDAG->NewNodesMustHaveLegalTypes = false;
790
791#ifndef NDEBUG
792 MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
793 FilterDAGBasicBlockName ==
794 FuncInfo->MBB->getBasicBlock()->getName());
795#endif
796#ifdef NDEBUG
797 if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewDAGCombineLT ||
798 ViewLegalizeDAGs || ViewDAGCombine2 || ViewISelDAGs || ViewSchedDAGs ||
799 ViewSUnitDAGs)
800#endif
801 {
802 BlockName =
803 (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
804 }
805 ISEL_DUMP(dbgs() << "\nInitial selection DAG: "
806 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
807 << "'\n";
808 CurDAG->dump());
809
810#ifndef NDEBUG
811 if (TTI.hasBranchDivergence())
812 CurDAG->VerifyDAGDivergence();
813#endif
814
815 if (ViewDAGCombine1 && MatchFilterBB)
816 CurDAG->viewGraph(Title: "dag-combine1 input for " + BlockName);
817
818 // Run the DAG combiner in pre-legalize mode.
819 {
820 NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
821 GroupDescription, TimePassesIsEnabled);
822 CurDAG->Combine(Level: BeforeLegalizeTypes, AA, OptLevel);
823 }
824
825 ISEL_DUMP(dbgs() << "\nOptimized lowered selection DAG: "
826 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
827 << "'\n";
828 CurDAG->dump());
829
830#ifndef NDEBUG
831 if (TTI.hasBranchDivergence())
832 CurDAG->VerifyDAGDivergence();
833#endif
834
835 // Second step, hack on the DAG until it only uses operations and types that
836 // the target supports.
837 if (ViewLegalizeTypesDAGs && MatchFilterBB)
838 CurDAG->viewGraph(Title: "legalize-types input for " + BlockName);
839
840 bool Changed;
841 {
842 NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
843 GroupDescription, TimePassesIsEnabled);
844 Changed = CurDAG->LegalizeTypes();
845 }
846
847 ISEL_DUMP(dbgs() << "\nType-legalized selection DAG: "
848 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
849 << "'\n";
850 CurDAG->dump());
851
852#ifndef NDEBUG
853 if (TTI.hasBranchDivergence())
854 CurDAG->VerifyDAGDivergence();
855#endif
856
857 // Only allow creation of legal node types.
858 CurDAG->NewNodesMustHaveLegalTypes = true;
859
860 if (Changed) {
861 if (ViewDAGCombineLT && MatchFilterBB)
862 CurDAG->viewGraph(Title: "dag-combine-lt input for " + BlockName);
863
864 // Run the DAG combiner in post-type-legalize mode.
865 {
866 NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
867 GroupName, GroupDescription, TimePassesIsEnabled);
868 CurDAG->Combine(Level: AfterLegalizeTypes, AA, OptLevel);
869 }
870
871 ISEL_DUMP(dbgs() << "\nOptimized type-legalized selection DAG: "
872 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
873 << "'\n";
874 CurDAG->dump());
875
876#ifndef NDEBUG
877 if (TTI.hasBranchDivergence())
878 CurDAG->VerifyDAGDivergence();
879#endif
880 }
881
882 {
883 NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
884 GroupDescription, TimePassesIsEnabled);
885 Changed = CurDAG->LegalizeVectors();
886 }
887
888 if (Changed) {
889 ISEL_DUMP(dbgs() << "\nVector-legalized selection DAG: "
890 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
891 << "'\n";
892 CurDAG->dump());
893
894#ifndef NDEBUG
895 if (TTI.hasBranchDivergence())
896 CurDAG->VerifyDAGDivergence();
897#endif
898
899 {
900 NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
901 GroupDescription, TimePassesIsEnabled);
902 CurDAG->LegalizeTypes();
903 }
904
905 ISEL_DUMP(dbgs() << "\nVector/type-legalized selection DAG: "
906 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
907 << "'\n";
908 CurDAG->dump());
909
910#ifndef NDEBUG
911 if (TTI.hasBranchDivergence())
912 CurDAG->VerifyDAGDivergence();
913#endif
914
915 if (ViewDAGCombineLT && MatchFilterBB)
916 CurDAG->viewGraph(Title: "dag-combine-lv input for " + BlockName);
917
918 // Run the DAG combiner in post-type-legalize mode.
919 {
920 NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
921 GroupName, GroupDescription, TimePassesIsEnabled);
922 CurDAG->Combine(Level: AfterLegalizeVectorOps, AA, OptLevel);
923 }
924
925 ISEL_DUMP(dbgs() << "\nOptimized vector-legalized selection DAG: "
926 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
927 << "'\n";
928 CurDAG->dump());
929
930#ifndef NDEBUG
931 if (TTI.hasBranchDivergence())
932 CurDAG->VerifyDAGDivergence();
933#endif
934 }
935
936 if (ViewLegalizeDAGs && MatchFilterBB)
937 CurDAG->viewGraph(Title: "legalize input for " + BlockName);
938
939 {
940 NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
941 GroupDescription, TimePassesIsEnabled);
942 CurDAG->Legalize();
943 }
944
945 ISEL_DUMP(dbgs() << "\nLegalized selection DAG: "
946 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
947 << "'\n";
948 CurDAG->dump());
949
950#ifndef NDEBUG
951 if (TTI.hasBranchDivergence())
952 CurDAG->VerifyDAGDivergence();
953#endif
954
955 if (ViewDAGCombine2 && MatchFilterBB)
956 CurDAG->viewGraph(Title: "dag-combine2 input for " + BlockName);
957
958 // Run the DAG combiner in post-legalize mode.
959 {
960 NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
961 GroupDescription, TimePassesIsEnabled);
962 CurDAG->Combine(Level: AfterLegalizeDAG, AA, OptLevel);
963 }
964
965 ISEL_DUMP(dbgs() << "\nOptimized legalized selection DAG: "
966 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
967 << "'\n";
968 CurDAG->dump());
969
970#ifndef NDEBUG
971 if (TTI.hasBranchDivergence())
972 CurDAG->VerifyDAGDivergence();
973#endif
974
975 if (OptLevel != CodeGenOptLevel::None)
976 ComputeLiveOutVRegInfo();
977
978 if (ViewISelDAGs && MatchFilterBB)
979 CurDAG->viewGraph(Title: "isel input for " + BlockName);
980
981 // Third, instruction select all of the operations to machine code, adding the
982 // code to the MachineBasicBlock.
983 {
984 NamedRegionTimer T("isel", "Instruction Selection", GroupName,
985 GroupDescription, TimePassesIsEnabled);
986 DoInstructionSelection();
987 }
988
989 ISEL_DUMP(dbgs() << "\nSelected selection DAG: "
990 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
991 << "'\n";
992 CurDAG->dump());
993
994 if (ViewSchedDAGs && MatchFilterBB)
995 CurDAG->viewGraph(Title: "scheduler input for " + BlockName);
996
997 // Schedule machine code.
998 ScheduleDAGSDNodes *Scheduler = CreateScheduler();
999 {
1000 NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
1001 GroupDescription, TimePassesIsEnabled);
1002 Scheduler->Run(dag: CurDAG, bb: FuncInfo->MBB);
1003 }
1004
1005 if (ViewSUnitDAGs && MatchFilterBB)
1006 Scheduler->viewGraph();
1007
1008 // Emit machine code to BB. This can change 'BB' to the last block being
1009 // inserted into.
1010 MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
1011 {
1012 NamedRegionTimer T("emit", "Instruction Creation", GroupName,
1013 GroupDescription, TimePassesIsEnabled);
1014
1015 // FuncInfo->InsertPt is passed by reference and set to the end of the
1016 // scheduled instructions.
1017 LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(InsertPos&: FuncInfo->InsertPt);
1018 }
1019
1020 // If the block was split, make sure we update any references that are used to
1021 // update PHI nodes later on.
1022 if (FirstMBB != LastMBB)
1023 SDB->UpdateSplitBlock(First: FirstMBB, Last: LastMBB);
1024
1025 // Free the scheduler state.
1026 {
1027 NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
1028 GroupDescription, TimePassesIsEnabled);
1029 delete Scheduler;
1030 }
1031
1032 // Free the SelectionDAG state, now that we're finished with it.
1033 CurDAG->clear();
1034}
1035
1036namespace {
1037
1038/// ISelUpdater - helper class to handle updates of the instruction selection
1039/// graph.
1040class ISelUpdater : public SelectionDAG::DAGUpdateListener {
1041 SelectionDAG::allnodes_iterator &ISelPosition;
1042
1043public:
1044 ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
1045 : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
1046
1047 /// NodeDeleted - Handle nodes deleted from the graph. If the node being
1048 /// deleted is the current ISelPosition node, update ISelPosition.
1049 ///
1050 void NodeDeleted(SDNode *N, SDNode *E) override {
1051 if (ISelPosition == SelectionDAG::allnodes_iterator(N))
1052 ++ISelPosition;
1053 }
1054
1055 /// NodeInserted - Handle new nodes inserted into the graph: propagate
1056 /// metadata from root nodes that also applies to new nodes, in case the root
1057 /// is later deleted.
1058 void NodeInserted(SDNode *N) override {
1059 SDNode *CurNode = &*ISelPosition;
1060 if (MDNode *MD = DAG.getPCSections(Node: CurNode))
1061 DAG.addPCSections(Node: N, MD);
1062 if (MDNode *MMRA = DAG.getMMRAMetadata(Node: CurNode))
1063 DAG.addMMRAMetadata(Node: N, MMRA);
1064 }
1065};
1066
1067} // end anonymous namespace
1068
1069// This function is used to enforce the topological node id property
1070// leveraged during instruction selection. Before the selection process all
1071// nodes are given a non-negative id such that all nodes have a greater id than
1072// their operands. As this holds transitively we can prune checks that a node N
1073// is a predecessor of M another by not recursively checking through M's
1074// operands if N's ID is larger than M's ID. This significantly improves
1075// performance of various legality checks (e.g. IsLegalToFold / UpdateChains).
1076
1077// However, when we fuse multiple nodes into a single node during the
1078// selection we may induce a predecessor relationship between inputs and
1079// outputs of distinct nodes being merged, violating the topological property.
1080// Should a fused node have a successor which has yet to be selected,
1081// our legality checks would be incorrect. To avoid this we mark all unselected
1082// successor nodes, i.e. id != -1, as invalid for pruning by bit-negating (x =>
1083// (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M.
1084// We use bit-negation to more clearly enforce that node id -1 can only be
1085// achieved by selected nodes. As the conversion is reversable to the original
1086// Id, topological pruning can still be leveraged when looking for unselected
1087// nodes. This method is called internally in all ISel replacement related
1088// functions.
1089void SelectionDAGISel::EnforceNodeIdInvariant(SDNode *Node) {
1090 SmallVector<SDNode *, 4> Nodes;
1091 Nodes.push_back(Elt: Node);
1092
1093 while (!Nodes.empty()) {
1094 SDNode *N = Nodes.pop_back_val();
1095 for (auto *U : N->uses()) {
1096 auto UId = U->getNodeId();
1097 if (UId > 0) {
1098 InvalidateNodeId(N: U);
1099 Nodes.push_back(Elt: U);
1100 }
1101 }
1102 }
1103}
1104
1105// InvalidateNodeId - As explained in EnforceNodeIdInvariant, mark a
1106// NodeId with the equivalent node id which is invalid for topological
1107// pruning.
1108void SelectionDAGISel::InvalidateNodeId(SDNode *N) {
1109 int InvalidId = -(N->getNodeId() + 1);
1110 N->setNodeId(InvalidId);
1111}
1112
1113// getUninvalidatedNodeId - get original uninvalidated node id.
1114int SelectionDAGISel::getUninvalidatedNodeId(SDNode *N) {
1115 int Id = N->getNodeId();
1116 if (Id < -1)
1117 return -(Id + 1);
1118 return Id;
1119}
1120
1121void SelectionDAGISel::DoInstructionSelection() {
1122 LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
1123 << printMBBReference(*FuncInfo->MBB) << " '"
1124 << FuncInfo->MBB->getName() << "'\n");
1125
1126 PreprocessISelDAG();
1127
1128 // Select target instructions for the DAG.
1129 {
1130 // Number all nodes with a topological order and set DAGSize.
1131 DAGSize = CurDAG->AssignTopologicalOrder();
1132
1133 // Create a dummy node (which is not added to allnodes), that adds
1134 // a reference to the root node, preventing it from being deleted,
1135 // and tracking any changes of the root.
1136 HandleSDNode Dummy(CurDAG->getRoot());
1137 SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
1138 ++ISelPosition;
1139
1140 // Make sure that ISelPosition gets properly updated when nodes are deleted
1141 // in calls made from this function. New nodes inherit relevant metadata.
1142 ISelUpdater ISU(*CurDAG, ISelPosition);
1143
1144 // The AllNodes list is now topological-sorted. Visit the
1145 // nodes by starting at the end of the list (the root of the
1146 // graph) and preceding back toward the beginning (the entry
1147 // node).
1148 while (ISelPosition != CurDAG->allnodes_begin()) {
1149 SDNode *Node = &*--ISelPosition;
1150 // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
1151 // but there are currently some corner cases that it misses. Also, this
1152 // makes it theoretically possible to disable the DAGCombiner.
1153 if (Node->use_empty())
1154 continue;
1155
1156#ifndef NDEBUG
1157 SmallVector<SDNode *, 4> Nodes;
1158 Nodes.push_back(Elt: Node);
1159
1160 while (!Nodes.empty()) {
1161 auto N = Nodes.pop_back_val();
1162 if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0)
1163 continue;
1164 for (const SDValue &Op : N->op_values()) {
1165 if (Op->getOpcode() == ISD::TokenFactor)
1166 Nodes.push_back(Elt: Op.getNode());
1167 else {
1168 // We rely on topological ordering of node ids for checking for
1169 // cycles when fusing nodes during selection. All unselected nodes
1170 // successors of an already selected node should have a negative id.
1171 // This assertion will catch such cases. If this assertion triggers
1172 // it is likely you using DAG-level Value/Node replacement functions
1173 // (versus equivalent ISEL replacement) in backend-specific
1174 // selections. See comment in EnforceNodeIdInvariant for more
1175 // details.
1176 assert(Op->getNodeId() != -1 &&
1177 "Node has already selected predecessor node");
1178 }
1179 }
1180 }
1181#endif
1182
1183 // When we are using non-default rounding modes or FP exception behavior
1184 // FP operations are represented by StrictFP pseudo-operations. For
1185 // targets that do not (yet) understand strict FP operations directly,
1186 // we convert them to normal FP opcodes instead at this point. This
1187 // will allow them to be handled by existing target-specific instruction
1188 // selectors.
1189 if (!TLI->isStrictFPEnabled() && Node->isStrictFPOpcode()) {
1190 // For some opcodes, we need to call TLI->getOperationAction using
1191 // the first operand type instead of the result type. Note that this
1192 // must match what SelectionDAGLegalize::LegalizeOp is doing.
1193 EVT ActionVT;
1194 switch (Node->getOpcode()) {
1195 case ISD::STRICT_SINT_TO_FP:
1196 case ISD::STRICT_UINT_TO_FP:
1197 case ISD::STRICT_LRINT:
1198 case ISD::STRICT_LLRINT:
1199 case ISD::STRICT_LROUND:
1200 case ISD::STRICT_LLROUND:
1201 case ISD::STRICT_FSETCC:
1202 case ISD::STRICT_FSETCCS:
1203 ActionVT = Node->getOperand(Num: 1).getValueType();
1204 break;
1205 default:
1206 ActionVT = Node->getValueType(ResNo: 0);
1207 break;
1208 }
1209 if (TLI->getOperationAction(Op: Node->getOpcode(), VT: ActionVT)
1210 == TargetLowering::Expand)
1211 Node = CurDAG->mutateStrictFPToFP(Node);
1212 }
1213
1214 LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
1215 Node->dump(CurDAG));
1216
1217 Select(N: Node);
1218 }
1219
1220 CurDAG->setRoot(Dummy.getValue());
1221 }
1222
1223 LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
1224
1225 PostprocessISelDAG();
1226}
1227
1228static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) {
1229 for (const User *U : CPI->users()) {
1230 if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(Val: U)) {
1231 Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1232 if (IID == Intrinsic::eh_exceptionpointer ||
1233 IID == Intrinsic::eh_exceptioncode)
1234 return true;
1235 }
1236 }
1237 return false;
1238}
1239
1240// wasm.landingpad.index intrinsic is for associating a landing pad index number
1241// with a catchpad instruction. Retrieve the landing pad index in the intrinsic
1242// and store the mapping in the function.
1243static void mapWasmLandingPadIndex(MachineBasicBlock *MBB,
1244 const CatchPadInst *CPI) {
1245 MachineFunction *MF = MBB->getParent();
1246 // In case of single catch (...), we don't emit LSDA, so we don't need
1247 // this information.
1248 bool IsSingleCatchAllClause =
1249 CPI->arg_size() == 1 &&
1250 cast<Constant>(Val: CPI->getArgOperand(i: 0))->isNullValue();
1251 // cathchpads for longjmp use an empty type list, e.g. catchpad within %0 []
1252 // and they don't need LSDA info
1253 bool IsCatchLongjmp = CPI->arg_size() == 0;
1254 if (!IsSingleCatchAllClause && !IsCatchLongjmp) {
1255 // Create a mapping from landing pad label to landing pad index.
1256 bool IntrFound = false;
1257 for (const User *U : CPI->users()) {
1258 if (const auto *Call = dyn_cast<IntrinsicInst>(Val: U)) {
1259 Intrinsic::ID IID = Call->getIntrinsicID();
1260 if (IID == Intrinsic::wasm_landingpad_index) {
1261 Value *IndexArg = Call->getArgOperand(i: 1);
1262 int Index = cast<ConstantInt>(Val: IndexArg)->getZExtValue();
1263 MF->setWasmLandingPadIndex(LPad: MBB, Index);
1264 IntrFound = true;
1265 break;
1266 }
1267 }
1268 }
1269 assert(IntrFound && "wasm.landingpad.index intrinsic not found!");
1270 (void)IntrFound;
1271 }
1272}
1273
1274/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1275/// do other setup for EH landing-pad blocks.
1276bool SelectionDAGISel::PrepareEHLandingPad() {
1277 MachineBasicBlock *MBB = FuncInfo->MBB;
1278 const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1279 const BasicBlock *LLVMBB = MBB->getBasicBlock();
1280 const TargetRegisterClass *PtrRC =
1281 TLI->getRegClassFor(VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
1282
1283 auto Pers = classifyEHPersonality(Pers: PersonalityFn);
1284
1285 // Catchpads have one live-in register, which typically holds the exception
1286 // pointer or code.
1287 if (isFuncletEHPersonality(Pers)) {
1288 if (const auto *CPI = dyn_cast<CatchPadInst>(Val: LLVMBB->getFirstNonPHI())) {
1289 if (hasExceptionPointerOrCodeUser(CPI)) {
1290 // Get or create the virtual register to hold the pointer or code. Mark
1291 // the live in physreg and copy into the vreg.
1292 MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1293 assert(EHPhysReg && "target lacks exception pointer register");
1294 MBB->addLiveIn(PhysReg: EHPhysReg);
1295 unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, RC: PtrRC);
1296 BuildMI(BB&: *MBB, I: FuncInfo->InsertPt, MIMD: SDB->getCurDebugLoc(),
1297 MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: VReg)
1298 .addReg(RegNo: EHPhysReg, flags: RegState::Kill);
1299 }
1300 }
1301 return true;
1302 }
1303
1304 // Add a label to mark the beginning of the landing pad. Deletion of the
1305 // landing pad can thus be detected via the MachineModuleInfo.
1306 MCSymbol *Label = MF->addLandingPad(LandingPad: MBB);
1307
1308 const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::EH_LABEL);
1309 BuildMI(BB&: *MBB, I: FuncInfo->InsertPt, MIMD: SDB->getCurDebugLoc(), MCID: II)
1310 .addSym(Sym: Label);
1311
1312 // If the unwinder does not preserve all registers, ensure that the
1313 // function marks the clobbered registers as used.
1314 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
1315 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(MF: *MF))
1316 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
1317
1318 if (Pers == EHPersonality::Wasm_CXX) {
1319 if (const auto *CPI = dyn_cast<CatchPadInst>(Val: LLVMBB->getFirstNonPHI()))
1320 mapWasmLandingPadIndex(MBB, CPI);
1321 } else {
1322 // Assign the call site to the landing pad's begin label.
1323 MF->setCallSiteLandingPad(Sym: Label, Sites: SDB->LPadToCallSiteMap[MBB]);
1324 // Mark exception register as live in.
1325 if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1326 FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(PhysReg: Reg, RC: PtrRC);
1327 // Mark exception selector register as live in.
1328 if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1329 FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(PhysReg: Reg, RC: PtrRC);
1330 }
1331
1332 return true;
1333}
1334
1335// Mark and Report IPToState for each Block under IsEHa
1336void SelectionDAGISel::reportIPToStateForBlocks(MachineFunction *MF) {
1337 MachineModuleInfo &MMI = MF->getMMI();
1338 llvm::WinEHFuncInfo *EHInfo = MF->getWinEHFuncInfo();
1339 if (!EHInfo)
1340 return;
1341 for (auto MBBI = MF->begin(), E = MF->end(); MBBI != E; ++MBBI) {
1342 MachineBasicBlock *MBB = &*MBBI;
1343 const BasicBlock *BB = MBB->getBasicBlock();
1344 int State = EHInfo->BlockToStateMap[BB];
1345 if (BB->getFirstMayFaultInst()) {
1346 // Report IP range only for blocks with Faulty inst
1347 auto MBBb = MBB->getFirstNonPHI();
1348 MachineInstr *MIb = &*MBBb;
1349 if (MIb->isTerminator())
1350 continue;
1351
1352 // Insert EH Labels
1353 MCSymbol *BeginLabel = MMI.getContext().createTempSymbol();
1354 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
1355 EHInfo->addIPToStateRange(State, InvokeBegin: BeginLabel, InvokeEnd: EndLabel);
1356 BuildMI(BB&: *MBB, I: MBBb, MIMD: SDB->getCurDebugLoc(),
1357 MCID: TII->get(Opcode: TargetOpcode::EH_LABEL))
1358 .addSym(Sym: BeginLabel);
1359 auto MBBe = MBB->instr_end();
1360 MachineInstr *MIe = &*(--MBBe);
1361 // insert before (possible multiple) terminators
1362 while (MIe->isTerminator())
1363 MIe = &*(--MBBe);
1364 ++MBBe;
1365 BuildMI(BB&: *MBB, I: MBBe, MIMD: SDB->getCurDebugLoc(),
1366 MCID: TII->get(Opcode: TargetOpcode::EH_LABEL))
1367 .addSym(Sym: EndLabel);
1368 }
1369 }
1370}
1371
1372/// isFoldedOrDeadInstruction - Return true if the specified instruction is
1373/// side-effect free and is either dead or folded into a generated instruction.
1374/// Return false if it needs to be emitted.
1375static bool isFoldedOrDeadInstruction(const Instruction *I,
1376 const FunctionLoweringInfo &FuncInfo) {
1377 return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1378 !I->isTerminator() && // Terminators aren't folded.
1379 !isa<DbgInfoIntrinsic>(Val: I) && // Debug instructions aren't folded.
1380 !I->isEHPad() && // EH pad instructions aren't folded.
1381 !FuncInfo.isExportedInst(V: I); // Exported instrs must be computed.
1382}
1383
1384static bool processIfEntryValueDbgDeclare(FunctionLoweringInfo &FuncInfo,
1385 const Value *Arg, DIExpression *Expr,
1386 DILocalVariable *Var,
1387 DebugLoc DbgLoc) {
1388 if (!Expr->isEntryValue() || !isa<Argument>(Val: Arg))
1389 return false;
1390
1391 auto ArgIt = FuncInfo.ValueMap.find(Val: Arg);
1392 if (ArgIt == FuncInfo.ValueMap.end())
1393 return false;
1394 Register ArgVReg = ArgIt->getSecond();
1395
1396 // Find the corresponding livein physical register to this argument.
1397 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1398 if (VirtReg == ArgVReg) {
1399 // Append an op deref to account for the fact that this is a dbg_declare.
1400 Expr = DIExpression::append(Expr, Ops: dwarf::DW_OP_deref);
1401 FuncInfo.MF->setVariableDbgInfo(Var, Expr, Reg: PhysReg, Loc: DbgLoc);
1402 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1403 << ", Expr=" << *Expr << ", MCRegister=" << PhysReg
1404 << ", DbgLoc=" << DbgLoc << "\n");
1405 return true;
1406 }
1407 return false;
1408}
1409
1410static bool processDbgDeclare(FunctionLoweringInfo &FuncInfo,
1411 const Value *Address, DIExpression *Expr,
1412 DILocalVariable *Var, DebugLoc DbgLoc) {
1413 if (!Address) {
1414 LLVM_DEBUG(dbgs() << "processDbgDeclares skipping " << *Var
1415 << " (bad address)\n");
1416 return false;
1417 }
1418
1419 if (processIfEntryValueDbgDeclare(FuncInfo, Arg: Address, Expr, Var, DbgLoc))
1420 return true;
1421
1422 MachineFunction *MF = FuncInfo.MF;
1423 const DataLayout &DL = MF->getDataLayout();
1424
1425 assert(Var && "Missing variable");
1426 assert(DbgLoc && "Missing location");
1427
1428 // Look through casts and constant offset GEPs. These mostly come from
1429 // inalloca.
1430 APInt Offset(DL.getTypeSizeInBits(Ty: Address->getType()), 0);
1431 Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1432
1433 // Check if the variable is a static alloca or a byval or inalloca
1434 // argument passed in memory. If it is not, then we will ignore this
1435 // intrinsic and handle this during isel like dbg.value.
1436 int FI = std::numeric_limits<int>::max();
1437 if (const auto *AI = dyn_cast<AllocaInst>(Val: Address)) {
1438 auto SI = FuncInfo.StaticAllocaMap.find(Val: AI);
1439 if (SI != FuncInfo.StaticAllocaMap.end())
1440 FI = SI->second;
1441 } else if (const auto *Arg = dyn_cast<Argument>(Val: Address))
1442 FI = FuncInfo.getArgumentFrameIndex(A: Arg);
1443
1444 if (FI == std::numeric_limits<int>::max())
1445 return false;
1446
1447 if (Offset.getBoolValue())
1448 Expr = DIExpression::prepend(Expr, Flags: DIExpression::ApplyOffset,
1449 Offset: Offset.getZExtValue());
1450
1451 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1452 << ", Expr=" << *Expr << ", FI=" << FI
1453 << ", DbgLoc=" << DbgLoc << "\n");
1454 MF->setVariableDbgInfo(Var, Expr, Slot: FI, Loc: DbgLoc);
1455 return true;
1456}
1457
1458/// Collect llvm.dbg.declare information. This is done after argument lowering
1459/// in case the declarations refer to arguments.
1460static void processDbgDeclares(FunctionLoweringInfo &FuncInfo) {
1461 for (const auto &I : instructions(F: *FuncInfo.Fn)) {
1462 const auto *DI = dyn_cast<DbgDeclareInst>(Val: &I);
1463 if (DI && processDbgDeclare(FuncInfo, Address: DI->getAddress(), Expr: DI->getExpression(),
1464 Var: DI->getVariable(), DbgLoc: DI->getDebugLoc()))
1465 FuncInfo.PreprocessedDbgDeclares.insert(Ptr: DI);
1466 for (const DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) {
1467 if (DVR.Type == DbgVariableRecord::LocationType::Declare &&
1468 processDbgDeclare(FuncInfo, Address: DVR.getVariableLocationOp(OpIdx: 0),
1469 Expr: DVR.getExpression(), Var: DVR.getVariable(),
1470 DbgLoc: DVR.getDebugLoc()))
1471 FuncInfo.PreprocessedDVRDeclares.insert(Ptr: &DVR);
1472 }
1473 }
1474}
1475
1476/// Collect single location variable information generated with assignment
1477/// tracking. This is done after argument lowering in case the declarations
1478/// refer to arguments.
1479static void processSingleLocVars(FunctionLoweringInfo &FuncInfo,
1480 FunctionVarLocs const *FnVarLocs) {
1481 for (auto It = FnVarLocs->single_locs_begin(),
1482 End = FnVarLocs->single_locs_end();
1483 It != End; ++It) {
1484 assert(!It->Values.hasArgList() && "Single loc variadic ops not supported");
1485 processDbgDeclare(FuncInfo, Address: It->Values.getVariableLocationOp(OpIdx: 0), Expr: It->Expr,
1486 Var: FnVarLocs->getDILocalVariable(ID: It->VariableID), DbgLoc: It->DL);
1487 }
1488}
1489
1490void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1491 FastISelFailed = false;
1492 // Initialize the Fast-ISel state, if needed.
1493 FastISel *FastIS = nullptr;
1494 if (TM.Options.EnableFastISel) {
1495 LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
1496 FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1497 }
1498
1499 ReversePostOrderTraversal<const Function*> RPOT(&Fn);
1500
1501 // Lower arguments up front. An RPO iteration always visits the entry block
1502 // first.
1503 assert(*RPOT.begin() == &Fn.getEntryBlock());
1504 ++NumEntryBlocks;
1505
1506 // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1507 FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()];
1508 FuncInfo->InsertPt = FuncInfo->MBB->begin();
1509
1510 CurDAG->setFunctionLoweringInfo(FuncInfo.get());
1511
1512 if (!FastIS) {
1513 LowerArguments(F: Fn);
1514 } else {
1515 // See if fast isel can lower the arguments.
1516 FastIS->startNewBlock();
1517 if (!FastIS->lowerArguments()) {
1518 FastISelFailed = true;
1519 // Fast isel failed to lower these arguments
1520 ++NumFastIselFailLowerArguments;
1521
1522 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1523 Fn.getSubprogram(),
1524 &Fn.getEntryBlock());
1525 R << "FastISel didn't lower all arguments: "
1526 << ore::NV("Prototype", Fn.getFunctionType());
1527 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort: EnableFastISelAbort > 1);
1528
1529 // Use SelectionDAG argument lowering
1530 LowerArguments(F: Fn);
1531 CurDAG->setRoot(SDB->getControlRoot());
1532 SDB->clear();
1533 CodeGenAndEmitDAG();
1534 }
1535
1536 // If we inserted any instructions at the beginning, make a note of
1537 // where they are, so we can be sure to emit subsequent instructions
1538 // after them.
1539 if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1540 FastIS->setLastLocalValue(&*std::prev(x: FuncInfo->InsertPt));
1541 else
1542 FastIS->setLastLocalValue(nullptr);
1543 }
1544
1545 bool Inserted = SwiftError->createEntriesInEntryBlock(DbgLoc: SDB->getCurDebugLoc());
1546
1547 if (FastIS && Inserted)
1548 FastIS->setLastLocalValue(&*std::prev(x: FuncInfo->InsertPt));
1549
1550 if (isAssignmentTrackingEnabled(M: *Fn.getParent())) {
1551 assert(CurDAG->getFunctionVarLocs() &&
1552 "expected AssignmentTrackingAnalysis pass results");
1553 processSingleLocVars(FuncInfo&: *FuncInfo, FnVarLocs: CurDAG->getFunctionVarLocs());
1554 } else {
1555 processDbgDeclares(FuncInfo&: *FuncInfo);
1556 }
1557
1558 // Iterate over all basic blocks in the function.
1559 StackProtector &SP = getAnalysis<StackProtector>();
1560 for (const BasicBlock *LLVMBB : RPOT) {
1561 if (OptLevel != CodeGenOptLevel::None) {
1562 bool AllPredsVisited = true;
1563 for (const BasicBlock *Pred : predecessors(BB: LLVMBB)) {
1564 if (!FuncInfo->VisitedBBs.count(Ptr: Pred)) {
1565 AllPredsVisited = false;
1566 break;
1567 }
1568 }
1569
1570 if (AllPredsVisited) {
1571 for (const PHINode &PN : LLVMBB->phis())
1572 FuncInfo->ComputePHILiveOutRegInfo(&PN);
1573 } else {
1574 for (const PHINode &PN : LLVMBB->phis())
1575 FuncInfo->InvalidatePHILiveOutRegInfo(PN: &PN);
1576 }
1577
1578 FuncInfo->VisitedBBs.insert(Ptr: LLVMBB);
1579 }
1580
1581 BasicBlock::const_iterator const Begin =
1582 LLVMBB->getFirstNonPHI()->getIterator();
1583 BasicBlock::const_iterator const End = LLVMBB->end();
1584 BasicBlock::const_iterator BI = End;
1585
1586 FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1587 if (!FuncInfo->MBB)
1588 continue; // Some blocks like catchpads have no code or MBB.
1589
1590 // Insert new instructions after any phi or argument setup code.
1591 FuncInfo->InsertPt = FuncInfo->MBB->end();
1592
1593 // Setup an EH landing-pad block.
1594 FuncInfo->ExceptionPointerVirtReg = 0;
1595 FuncInfo->ExceptionSelectorVirtReg = 0;
1596 if (LLVMBB->isEHPad())
1597 if (!PrepareEHLandingPad())
1598 continue;
1599
1600 // Before doing SelectionDAG ISel, see if FastISel has been requested.
1601 if (FastIS) {
1602 if (LLVMBB != &Fn.getEntryBlock())
1603 FastIS->startNewBlock();
1604
1605 unsigned NumFastIselRemaining = std::distance(first: Begin, last: End);
1606
1607 // Pre-assign swifterror vregs.
1608 SwiftError->preassignVRegs(MBB: FuncInfo->MBB, Begin, End);
1609
1610 // Do FastISel on as many instructions as possible.
1611 for (; BI != Begin; --BI) {
1612 const Instruction *Inst = &*std::prev(x: BI);
1613
1614 // If we no longer require this instruction, skip it.
1615 if (isFoldedOrDeadInstruction(I: Inst, FuncInfo: *FuncInfo) ||
1616 ElidedArgCopyInstrs.count(Ptr: Inst)) {
1617 --NumFastIselRemaining;
1618 FastIS->handleDbgInfo(II: Inst);
1619 continue;
1620 }
1621
1622 // Bottom-up: reset the insert pos at the top, after any local-value
1623 // instructions.
1624 FastIS->recomputeInsertPt();
1625
1626 // Try to select the instruction with FastISel.
1627 if (FastIS->selectInstruction(I: Inst)) {
1628 --NumFastIselRemaining;
1629 ++NumFastIselSuccess;
1630
1631 FastIS->handleDbgInfo(II: Inst);
1632 // If fast isel succeeded, skip over all the folded instructions, and
1633 // then see if there is a load right before the selected instructions.
1634 // Try to fold the load if so.
1635 const Instruction *BeforeInst = Inst;
1636 while (BeforeInst != &*Begin) {
1637 BeforeInst = &*std::prev(x: BasicBlock::const_iterator(BeforeInst));
1638 if (!isFoldedOrDeadInstruction(I: BeforeInst, FuncInfo: *FuncInfo))
1639 break;
1640 }
1641 if (BeforeInst != Inst && isa<LoadInst>(Val: BeforeInst) &&
1642 BeforeInst->hasOneUse() &&
1643 FastIS->tryToFoldLoad(LI: cast<LoadInst>(Val: BeforeInst), FoldInst: Inst)) {
1644 // If we succeeded, don't re-select the load.
1645 LLVM_DEBUG(dbgs()
1646 << "FastISel folded load: " << *BeforeInst << "\n");
1647 FastIS->handleDbgInfo(II: BeforeInst);
1648 BI = std::next(x: BasicBlock::const_iterator(BeforeInst));
1649 --NumFastIselRemaining;
1650 ++NumFastIselSuccess;
1651 }
1652 continue;
1653 }
1654
1655 FastISelFailed = true;
1656
1657 // Then handle certain instructions as single-LLVM-Instruction blocks.
1658 // We cannot separate out GCrelocates to their own blocks since we need
1659 // to keep track of gc-relocates for a particular gc-statepoint. This is
1660 // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1661 // visitGCRelocate.
1662 if (isa<CallInst>(Val: Inst) && !isa<GCStatepointInst>(Val: Inst) &&
1663 !isa<GCRelocateInst>(Val: Inst) && !isa<GCResultInst>(Val: Inst)) {
1664 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1665 Inst->getDebugLoc(), LLVMBB);
1666
1667 R << "FastISel missed call";
1668
1669 if (R.isEnabled() || EnableFastISelAbort) {
1670 std::string InstStrStorage;
1671 raw_string_ostream InstStr(InstStrStorage);
1672 InstStr << *Inst;
1673
1674 R << ": " << InstStr.str();
1675 }
1676
1677 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort: EnableFastISelAbort > 2);
1678
1679 if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1680 !Inst->use_empty()) {
1681 Register &R = FuncInfo->ValueMap[Inst];
1682 if (!R)
1683 R = FuncInfo->CreateRegs(V: Inst);
1684 }
1685
1686 bool HadTailCall = false;
1687 MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
1688 SelectBasicBlock(Begin: Inst->getIterator(), End: BI, HadTailCall);
1689
1690 // If the call was emitted as a tail call, we're done with the block.
1691 // We also need to delete any previously emitted instructions.
1692 if (HadTailCall) {
1693 FastIS->removeDeadCode(I: SavedInsertPt, E: FuncInfo->MBB->end());
1694 --BI;
1695 break;
1696 }
1697
1698 // Recompute NumFastIselRemaining as Selection DAG instruction
1699 // selection may have handled the call, input args, etc.
1700 unsigned RemainingNow = std::distance(first: Begin, last: BI);
1701 NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1702 NumFastIselRemaining = RemainingNow;
1703 continue;
1704 }
1705
1706 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1707 Inst->getDebugLoc(), LLVMBB);
1708
1709 bool ShouldAbort = EnableFastISelAbort;
1710 if (Inst->isTerminator()) {
1711 // Use a different message for terminator misses.
1712 R << "FastISel missed terminator";
1713 // Don't abort for terminator unless the level is really high
1714 ShouldAbort = (EnableFastISelAbort > 2);
1715 } else {
1716 R << "FastISel missed";
1717 }
1718
1719 if (R.isEnabled() || EnableFastISelAbort) {
1720 std::string InstStrStorage;
1721 raw_string_ostream InstStr(InstStrStorage);
1722 InstStr << *Inst;
1723 R << ": " << InstStr.str();
1724 }
1725
1726 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort);
1727
1728 NumFastIselFailures += NumFastIselRemaining;
1729 break;
1730 }
1731
1732 FastIS->recomputeInsertPt();
1733 }
1734
1735 if (SP.shouldEmitSDCheck(BB: *LLVMBB)) {
1736 bool FunctionBasedInstrumentation =
1737 TLI->getSSPStackGuardCheck(M: *Fn.getParent());
1738 SDB->SPDescriptor.initialize(BB: LLVMBB, MBB: FuncInfo->MBBMap[LLVMBB],
1739 FunctionBasedInstrumentation);
1740 }
1741
1742 if (Begin != BI)
1743 ++NumDAGBlocks;
1744 else
1745 ++NumFastIselBlocks;
1746
1747 if (Begin != BI) {
1748 // Run SelectionDAG instruction selection on the remainder of the block
1749 // not handled by FastISel. If FastISel is not run, this is the entire
1750 // block.
1751 bool HadTailCall;
1752 SelectBasicBlock(Begin, End: BI, HadTailCall);
1753
1754 // But if FastISel was run, we already selected some of the block.
1755 // If we emitted a tail-call, we need to delete any previously emitted
1756 // instruction that follows it.
1757 if (FastIS && HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1758 FastIS->removeDeadCode(I: FuncInfo->InsertPt, E: FuncInfo->MBB->end());
1759 }
1760
1761 if (FastIS)
1762 FastIS->finishBasicBlock();
1763 FinishBasicBlock();
1764 FuncInfo->PHINodesToUpdate.clear();
1765 ElidedArgCopyInstrs.clear();
1766 }
1767
1768 // AsynchEH: Report Block State under -AsynchEH
1769 if (Fn.getParent()->getModuleFlag(Key: "eh-asynch"))
1770 reportIPToStateForBlocks(MF);
1771
1772 SP.copyToMachineFrameInfo(MFI&: MF->getFrameInfo());
1773
1774 SwiftError->propagateVRegs();
1775
1776 delete FastIS;
1777 SDB->clearDanglingDebugInfo();
1778 SDB->SPDescriptor.resetPerFunctionState();
1779}
1780
1781void
1782SelectionDAGISel::FinishBasicBlock() {
1783 LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
1784 << FuncInfo->PHINodesToUpdate.size() << "\n";
1785 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
1786 ++i) dbgs()
1787 << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
1788 << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1789
1790 // Next, now that we know what the last MBB the LLVM BB expanded is, update
1791 // PHI nodes in successors.
1792 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1793 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
1794 assert(PHI->isPHI() &&
1795 "This is not a machine PHI node that we are updating!");
1796 if (!FuncInfo->MBB->isSuccessor(MBB: PHI->getParent()))
1797 continue;
1798 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[i].second).addMBB(MBB: FuncInfo->MBB);
1799 }
1800
1801 // Handle stack protector.
1802 if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1803 // The target provides a guard check function. There is no need to
1804 // generate error handling code or to split current basic block.
1805 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1806
1807 // Add load and check to the basicblock.
1808 FuncInfo->MBB = ParentMBB;
1809 FuncInfo->InsertPt =
1810 findSplitPointForStackProtector(BB: ParentMBB, TII: *TII);
1811 SDB->visitSPDescriptorParent(SPD&: SDB->SPDescriptor, ParentBB: ParentMBB);
1812 CurDAG->setRoot(SDB->getRoot());
1813 SDB->clear();
1814 CodeGenAndEmitDAG();
1815
1816 // Clear the Per-BB State.
1817 SDB->SPDescriptor.resetPerBBState();
1818 } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1819 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1820 MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1821
1822 // Find the split point to split the parent mbb. At the same time copy all
1823 // physical registers used in the tail of parent mbb into virtual registers
1824 // before the split point and back into physical registers after the split
1825 // point. This prevents us needing to deal with Live-ins and many other
1826 // register allocation issues caused by us splitting the parent mbb. The
1827 // register allocator will clean up said virtual copies later on.
1828 MachineBasicBlock::iterator SplitPoint =
1829 findSplitPointForStackProtector(BB: ParentMBB, TII: *TII);
1830
1831 // Splice the terminator of ParentMBB into SuccessMBB.
1832 SuccessMBB->splice(Where: SuccessMBB->end(), Other: ParentMBB,
1833 From: SplitPoint,
1834 To: ParentMBB->end());
1835
1836 // Add compare/jump on neq/jump to the parent BB.
1837 FuncInfo->MBB = ParentMBB;
1838 FuncInfo->InsertPt = ParentMBB->end();
1839 SDB->visitSPDescriptorParent(SPD&: SDB->SPDescriptor, ParentBB: ParentMBB);
1840 CurDAG->setRoot(SDB->getRoot());
1841 SDB->clear();
1842 CodeGenAndEmitDAG();
1843
1844 // CodeGen Failure MBB if we have not codegened it yet.
1845 MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1846 if (FailureMBB->empty()) {
1847 FuncInfo->MBB = FailureMBB;
1848 FuncInfo->InsertPt = FailureMBB->end();
1849 SDB->visitSPDescriptorFailure(SPD&: SDB->SPDescriptor);
1850 CurDAG->setRoot(SDB->getRoot());
1851 SDB->clear();
1852 CodeGenAndEmitDAG();
1853 }
1854
1855 // Clear the Per-BB State.
1856 SDB->SPDescriptor.resetPerBBState();
1857 }
1858
1859 // Lower each BitTestBlock.
1860 for (auto &BTB : SDB->SL->BitTestCases) {
1861 // Lower header first, if it wasn't already lowered
1862 if (!BTB.Emitted) {
1863 // Set the current basic block to the mbb we wish to insert the code into
1864 FuncInfo->MBB = BTB.Parent;
1865 FuncInfo->InsertPt = FuncInfo->MBB->end();
1866 // Emit the code
1867 SDB->visitBitTestHeader(B&: BTB, SwitchBB: FuncInfo->MBB);
1868 CurDAG->setRoot(SDB->getRoot());
1869 SDB->clear();
1870 CodeGenAndEmitDAG();
1871 }
1872
1873 BranchProbability UnhandledProb = BTB.Prob;
1874 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
1875 UnhandledProb -= BTB.Cases[j].ExtraProb;
1876 // Set the current basic block to the mbb we wish to insert the code into
1877 FuncInfo->MBB = BTB.Cases[j].ThisBB;
1878 FuncInfo->InsertPt = FuncInfo->MBB->end();
1879 // Emit the code
1880
1881 // If all cases cover a contiguous range, it is not necessary to jump to
1882 // the default block after the last bit test fails. This is because the
1883 // range check during bit test header creation has guaranteed that every
1884 // case here doesn't go outside the range. In this case, there is no need
1885 // to perform the last bit test, as it will always be true. Instead, make
1886 // the second-to-last bit-test fall through to the target of the last bit
1887 // test, and delete the last bit test.
1888
1889 MachineBasicBlock *NextMBB;
1890 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
1891 // Second-to-last bit-test with contiguous range or omitted range
1892 // check: fall through to the target of the final bit test.
1893 NextMBB = BTB.Cases[j + 1].TargetBB;
1894 } else if (j + 1 == ej) {
1895 // For the last bit test, fall through to Default.
1896 NextMBB = BTB.Default;
1897 } else {
1898 // Otherwise, fall through to the next bit test.
1899 NextMBB = BTB.Cases[j + 1].ThisBB;
1900 }
1901
1902 SDB->visitBitTestCase(BB&: BTB, NextMBB, BranchProbToNext: UnhandledProb, Reg: BTB.Reg, B&: BTB.Cases[j],
1903 SwitchBB: FuncInfo->MBB);
1904
1905 CurDAG->setRoot(SDB->getRoot());
1906 SDB->clear();
1907 CodeGenAndEmitDAG();
1908
1909 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
1910 // Since we're not going to use the final bit test, remove it.
1911 BTB.Cases.pop_back();
1912 break;
1913 }
1914 }
1915
1916 // Update PHI Nodes
1917 for (const std::pair<MachineInstr *, unsigned> &P :
1918 FuncInfo->PHINodesToUpdate) {
1919 MachineInstrBuilder PHI(*MF, P.first);
1920 MachineBasicBlock *PHIBB = PHI->getParent();
1921 assert(PHI->isPHI() &&
1922 "This is not a machine PHI node that we are updating!");
1923 // This is "default" BB. We have two jumps to it. From "header" BB and
1924 // from last "case" BB, unless the latter was skipped.
1925 if (PHIBB == BTB.Default) {
1926 PHI.addReg(RegNo: P.second).addMBB(MBB: BTB.Parent);
1927 if (!BTB.ContiguousRange) {
1928 PHI.addReg(RegNo: P.second).addMBB(MBB: BTB.Cases.back().ThisBB);
1929 }
1930 }
1931 // One of "cases" BB.
1932 for (const SwitchCG::BitTestCase &BT : BTB.Cases) {
1933 MachineBasicBlock* cBB = BT.ThisBB;
1934 if (cBB->isSuccessor(MBB: PHIBB))
1935 PHI.addReg(RegNo: P.second).addMBB(MBB: cBB);
1936 }
1937 }
1938 }
1939 SDB->SL->BitTestCases.clear();
1940
1941 // If the JumpTable record is filled in, then we need to emit a jump table.
1942 // Updating the PHI nodes is tricky in this case, since we need to determine
1943 // whether the PHI is a successor of the range check MBB or the jump table MBB
1944 for (unsigned i = 0, e = SDB->SL->JTCases.size(); i != e; ++i) {
1945 // Lower header first, if it wasn't already lowered
1946 if (!SDB->SL->JTCases[i].first.Emitted) {
1947 // Set the current basic block to the mbb we wish to insert the code into
1948 FuncInfo->MBB = SDB->SL->JTCases[i].first.HeaderBB;
1949 FuncInfo->InsertPt = FuncInfo->MBB->end();
1950 // Emit the code
1951 SDB->visitJumpTableHeader(JT&: SDB->SL->JTCases[i].second,
1952 JTH&: SDB->SL->JTCases[i].first, SwitchBB: FuncInfo->MBB);
1953 CurDAG->setRoot(SDB->getRoot());
1954 SDB->clear();
1955 CodeGenAndEmitDAG();
1956 }
1957
1958 // Set the current basic block to the mbb we wish to insert the code into
1959 FuncInfo->MBB = SDB->SL->JTCases[i].second.MBB;
1960 FuncInfo->InsertPt = FuncInfo->MBB->end();
1961 // Emit the code
1962 SDB->visitJumpTable(JT&: SDB->SL->JTCases[i].second);
1963 CurDAG->setRoot(SDB->getRoot());
1964 SDB->clear();
1965 CodeGenAndEmitDAG();
1966
1967 // Update PHI Nodes
1968 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
1969 pi != pe; ++pi) {
1970 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
1971 MachineBasicBlock *PHIBB = PHI->getParent();
1972 assert(PHI->isPHI() &&
1973 "This is not a machine PHI node that we are updating!");
1974 // "default" BB. We can go there only from header BB.
1975 if (PHIBB == SDB->SL->JTCases[i].second.Default)
1976 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pi].second)
1977 .addMBB(MBB: SDB->SL->JTCases[i].first.HeaderBB);
1978 // JT BB. Just iterate over successors here
1979 if (FuncInfo->MBB->isSuccessor(MBB: PHIBB))
1980 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pi].second).addMBB(MBB: FuncInfo->MBB);
1981 }
1982 }
1983 SDB->SL->JTCases.clear();
1984
1985 // If we generated any switch lowering information, build and codegen any
1986 // additional DAGs necessary.
1987 for (unsigned i = 0, e = SDB->SL->SwitchCases.size(); i != e; ++i) {
1988 // Set the current basic block to the mbb we wish to insert the code into
1989 FuncInfo->MBB = SDB->SL->SwitchCases[i].ThisBB;
1990 FuncInfo->InsertPt = FuncInfo->MBB->end();
1991
1992 // Determine the unique successors.
1993 SmallVector<MachineBasicBlock *, 2> Succs;
1994 Succs.push_back(Elt: SDB->SL->SwitchCases[i].TrueBB);
1995 if (SDB->SL->SwitchCases[i].TrueBB != SDB->SL->SwitchCases[i].FalseBB)
1996 Succs.push_back(Elt: SDB->SL->SwitchCases[i].FalseBB);
1997
1998 // Emit the code. Note that this could result in FuncInfo->MBB being split.
1999 SDB->visitSwitchCase(CB&: SDB->SL->SwitchCases[i], SwitchBB: FuncInfo->MBB);
2000 CurDAG->setRoot(SDB->getRoot());
2001 SDB->clear();
2002 CodeGenAndEmitDAG();
2003
2004 // Remember the last block, now that any splitting is done, for use in
2005 // populating PHI nodes in successors.
2006 MachineBasicBlock *ThisBB = FuncInfo->MBB;
2007
2008 // Handle any PHI nodes in successors of this chunk, as if we were coming
2009 // from the original BB before switch expansion. Note that PHI nodes can
2010 // occur multiple times in PHINodesToUpdate. We have to be very careful to
2011 // handle them the right number of times.
2012 for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
2013 FuncInfo->MBB = Succs[i];
2014 FuncInfo->InsertPt = FuncInfo->MBB->end();
2015 // FuncInfo->MBB may have been removed from the CFG if a branch was
2016 // constant folded.
2017 if (ThisBB->isSuccessor(MBB: FuncInfo->MBB)) {
2018 for (MachineBasicBlock::iterator
2019 MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
2020 MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
2021 MachineInstrBuilder PHI(*MF, MBBI);
2022 // This value for this PHI node is recorded in PHINodesToUpdate.
2023 for (unsigned pn = 0; ; ++pn) {
2024 assert(pn != FuncInfo->PHINodesToUpdate.size() &&
2025 "Didn't find PHI entry!");
2026 if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
2027 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pn].second).addMBB(MBB: ThisBB);
2028 break;
2029 }
2030 }
2031 }
2032 }
2033 }
2034 }
2035 SDB->SL->SwitchCases.clear();
2036}
2037
2038/// Create the scheduler. If a specific scheduler was specified
2039/// via the SchedulerRegistry, use it, otherwise select the
2040/// one preferred by the target.
2041///
2042ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
2043 return ISHeuristic(this, OptLevel);
2044}
2045
2046//===----------------------------------------------------------------------===//
2047// Helper functions used by the generated instruction selector.
2048//===----------------------------------------------------------------------===//
2049// Calls to these methods are generated by tblgen.
2050
2051/// CheckAndMask - The isel is trying to match something like (and X, 255). If
2052/// the dag combiner simplified the 255, we still want to match. RHS is the
2053/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2054/// specified in the .td file (e.g. 255).
2055bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
2056 int64_t DesiredMaskS) const {
2057 const APInt &ActualMask = RHS->getAPIntValue();
2058 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2059
2060 // If the actual mask exactly matches, success!
2061 if (ActualMask == DesiredMask)
2062 return true;
2063
2064 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2065 if (!ActualMask.isSubsetOf(RHS: DesiredMask))
2066 return false;
2067
2068 // Otherwise, the DAG Combiner may have proven that the value coming in is
2069 // either already zero or is not demanded. Check for known zero input bits.
2070 APInt NeededMask = DesiredMask & ~ActualMask;
2071 if (CurDAG->MaskedValueIsZero(Op: LHS, Mask: NeededMask))
2072 return true;
2073
2074 // TODO: check to see if missing bits are just not demanded.
2075
2076 // Otherwise, this pattern doesn't match.
2077 return false;
2078}
2079
2080/// CheckOrMask - The isel is trying to match something like (or X, 255). If
2081/// the dag combiner simplified the 255, we still want to match. RHS is the
2082/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2083/// specified in the .td file (e.g. 255).
2084bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
2085 int64_t DesiredMaskS) const {
2086 const APInt &ActualMask = RHS->getAPIntValue();
2087 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2088
2089 // If the actual mask exactly matches, success!
2090 if (ActualMask == DesiredMask)
2091 return true;
2092
2093 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2094 if (!ActualMask.isSubsetOf(RHS: DesiredMask))
2095 return false;
2096
2097 // Otherwise, the DAG Combiner may have proven that the value coming in is
2098 // either already zero or is not demanded. Check for known zero input bits.
2099 APInt NeededMask = DesiredMask & ~ActualMask;
2100 KnownBits Known = CurDAG->computeKnownBits(Op: LHS);
2101
2102 // If all the missing bits in the or are already known to be set, match!
2103 if (NeededMask.isSubsetOf(RHS: Known.One))
2104 return true;
2105
2106 // TODO: check to see if missing bits are just not demanded.
2107
2108 // Otherwise, this pattern doesn't match.
2109 return false;
2110}
2111
2112/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2113/// by tblgen. Others should not call it.
2114void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
2115 const SDLoc &DL) {
2116 std::vector<SDValue> InOps;
2117 std::swap(x&: InOps, y&: Ops);
2118
2119 Ops.push_back(x: InOps[InlineAsm::Op_InputChain]); // 0
2120 Ops.push_back(x: InOps[InlineAsm::Op_AsmString]); // 1
2121 Ops.push_back(x: InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
2122 Ops.push_back(x: InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
2123
2124 unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
2125 if (InOps[e-1].getValueType() == MVT::Glue)
2126 --e; // Don't process a glue operand if it is here.
2127
2128 while (i != e) {
2129 InlineAsm::Flag Flags(InOps[i]->getAsZExtVal());
2130 if (!Flags.isMemKind() && !Flags.isFuncKind()) {
2131 // Just skip over this operand, copying the operands verbatim.
2132 Ops.insert(position: Ops.end(), first: InOps.begin() + i,
2133 last: InOps.begin() + i + Flags.getNumOperandRegisters() + 1);
2134 i += Flags.getNumOperandRegisters() + 1;
2135 } else {
2136 assert(Flags.getNumOperandRegisters() == 1 &&
2137 "Memory operand with multiple values?");
2138
2139 unsigned TiedToOperand;
2140 if (Flags.isUseOperandTiedToDef(Idx&: TiedToOperand)) {
2141 // We need the constraint ID from the operand this is tied to.
2142 unsigned CurOp = InlineAsm::Op_FirstOperand;
2143 Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
2144 for (; TiedToOperand; --TiedToOperand) {
2145 CurOp += Flags.getNumOperandRegisters() + 1;
2146 Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
2147 }
2148 }
2149
2150 // Otherwise, this is a memory operand. Ask the target to select it.
2151 std::vector<SDValue> SelOps;
2152 const InlineAsm::ConstraintCode ConstraintID =
2153 Flags.getMemoryConstraintID();
2154 if (SelectInlineAsmMemoryOperand(Op: InOps[i+1], ConstraintID, OutOps&: SelOps))
2155 report_fatal_error(reason: "Could not match memory address. Inline asm"
2156 " failure!");
2157
2158 // Add this to the output node.
2159 Flags = InlineAsm::Flag(Flags.isMemKind() ? InlineAsm::Kind::Mem
2160 : InlineAsm::Kind::Func,
2161 SelOps.size());
2162 Flags.setMemConstraint(ConstraintID);
2163 Ops.push_back(CurDAG->getTargetConstant(Flags, DL, MVT::i32));
2164 llvm::append_range(C&: Ops, R&: SelOps);
2165 i += 2;
2166 }
2167 }
2168
2169 // Add the glue input back if present.
2170 if (e != InOps.size())
2171 Ops.push_back(x: InOps.back());
2172}
2173
2174/// findGlueUse - Return use of MVT::Glue value produced by the specified
2175/// SDNode.
2176///
2177static SDNode *findGlueUse(SDNode *N) {
2178 unsigned FlagResNo = N->getNumValues()-1;
2179 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2180 SDUse &Use = I.getUse();
2181 if (Use.getResNo() == FlagResNo)
2182 return Use.getUser();
2183 }
2184 return nullptr;
2185}
2186
2187/// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path
2188/// beyond "ImmedUse". We may ignore chains as they are checked separately.
2189static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
2190 bool IgnoreChains) {
2191 SmallPtrSet<const SDNode *, 16> Visited;
2192 SmallVector<const SDNode *, 16> WorkList;
2193 // Only check if we have non-immediate uses of Def.
2194 if (ImmedUse->isOnlyUserOf(N: Def))
2195 return false;
2196
2197 // We don't care about paths to Def that go through ImmedUse so mark it
2198 // visited and mark non-def operands as used.
2199 Visited.insert(Ptr: ImmedUse);
2200 for (const SDValue &Op : ImmedUse->op_values()) {
2201 SDNode *N = Op.getNode();
2202 // Ignore chain deps (they are validated by
2203 // HandleMergeInputChains) and immediate uses
2204 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2205 continue;
2206 if (!Visited.insert(Ptr: N).second)
2207 continue;
2208 WorkList.push_back(Elt: N);
2209 }
2210
2211 // Initialize worklist to operands of Root.
2212 if (Root != ImmedUse) {
2213 for (const SDValue &Op : Root->op_values()) {
2214 SDNode *N = Op.getNode();
2215 // Ignore chains (they are validated by HandleMergeInputChains)
2216 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2217 continue;
2218 if (!Visited.insert(Ptr: N).second)
2219 continue;
2220 WorkList.push_back(Elt: N);
2221 }
2222 }
2223
2224 return SDNode::hasPredecessorHelper(N: Def, Visited, Worklist&: WorkList, MaxSteps: 0, TopologicalPrune: true);
2225}
2226
2227/// IsProfitableToFold - Returns true if it's profitable to fold the specific
2228/// operand node N of U during instruction selection that starts at Root.
2229bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
2230 SDNode *Root) const {
2231 if (OptLevel == CodeGenOptLevel::None)
2232 return false;
2233 return N.hasOneUse();
2234}
2235
2236/// IsLegalToFold - Returns true if the specific operand node N of
2237/// U can be folded during instruction selection that starts at Root.
2238bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
2239 CodeGenOptLevel OptLevel,
2240 bool IgnoreChains) {
2241 if (OptLevel == CodeGenOptLevel::None)
2242 return false;
2243
2244 // If Root use can somehow reach N through a path that doesn't contain
2245 // U then folding N would create a cycle. e.g. In the following
2246 // diagram, Root can reach N through X. If N is folded into Root, then
2247 // X is both a predecessor and a successor of U.
2248 //
2249 // [N*] //
2250 // ^ ^ //
2251 // / \ //
2252 // [U*] [X]? //
2253 // ^ ^ //
2254 // \ / //
2255 // \ / //
2256 // [Root*] //
2257 //
2258 // * indicates nodes to be folded together.
2259 //
2260 // If Root produces glue, then it gets (even more) interesting. Since it
2261 // will be "glued" together with its glue use in the scheduler, we need to
2262 // check if it might reach N.
2263 //
2264 // [N*] //
2265 // ^ ^ //
2266 // / \ //
2267 // [U*] [X]? //
2268 // ^ ^ //
2269 // \ \ //
2270 // \ | //
2271 // [Root*] | //
2272 // ^ | //
2273 // f | //
2274 // | / //
2275 // [Y] / //
2276 // ^ / //
2277 // f / //
2278 // | / //
2279 // [GU] //
2280 //
2281 // If GU (glue use) indirectly reaches N (the load), and Root folds N
2282 // (call it Fold), then X is a predecessor of GU and a successor of
2283 // Fold. But since Fold and GU are glued together, this will create
2284 // a cycle in the scheduling graph.
2285
2286 // If the node has glue, walk down the graph to the "lowest" node in the
2287 // glueged set.
2288 EVT VT = Root->getValueType(ResNo: Root->getNumValues()-1);
2289 while (VT == MVT::Glue) {
2290 SDNode *GU = findGlueUse(N: Root);
2291 if (!GU)
2292 break;
2293 Root = GU;
2294 VT = Root->getValueType(ResNo: Root->getNumValues()-1);
2295
2296 // If our query node has a glue result with a use, we've walked up it. If
2297 // the user (which has already been selected) has a chain or indirectly uses
2298 // the chain, HandleMergeInputChains will not consider it. Because of
2299 // this, we cannot ignore chains in this predicate.
2300 IgnoreChains = false;
2301 }
2302
2303 return !findNonImmUse(Root, Def: N.getNode(), ImmedUse: U, IgnoreChains);
2304}
2305
2306void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
2307 SDLoc DL(N);
2308
2309 std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2310 SelectInlineAsmMemoryOperands(Ops, DL);
2311
2312 const EVT VTs[] = {MVT::Other, MVT::Glue};
2313 SDValue New = CurDAG->getNode(N->getOpcode(), DL, VTs, Ops);
2314 New->setNodeId(-1);
2315 ReplaceUses(F: N, T: New.getNode());
2316 CurDAG->RemoveDeadNode(N);
2317}
2318
2319void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2320 SDLoc dl(Op);
2321 MDNodeSDNode *MD = cast<MDNodeSDNode>(Val: Op->getOperand(Num: 1));
2322 const MDString *RegStr = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
2323
2324 EVT VT = Op->getValueType(ResNo: 0);
2325 LLT Ty = VT.isSimple() ? getLLTForMVT(Ty: VT.getSimpleVT()) : LLT();
2326 Register Reg =
2327 TLI->getRegisterByName(RegName: RegStr->getString().data(), Ty,
2328 MF: CurDAG->getMachineFunction());
2329 SDValue New = CurDAG->getCopyFromReg(
2330 Chain: Op->getOperand(Num: 0), dl, Reg, VT: Op->getValueType(ResNo: 0));
2331 New->setNodeId(-1);
2332 ReplaceUses(F: Op, T: New.getNode());
2333 CurDAG->RemoveDeadNode(N: Op);
2334}
2335
2336void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2337 SDLoc dl(Op);
2338 MDNodeSDNode *MD = cast<MDNodeSDNode>(Val: Op->getOperand(Num: 1));
2339 const MDString *RegStr = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
2340
2341 EVT VT = Op->getOperand(Num: 2).getValueType();
2342 LLT Ty = VT.isSimple() ? getLLTForMVT(Ty: VT.getSimpleVT()) : LLT();
2343
2344 Register Reg = TLI->getRegisterByName(RegName: RegStr->getString().data(), Ty,
2345 MF: CurDAG->getMachineFunction());
2346 SDValue New = CurDAG->getCopyToReg(
2347 Chain: Op->getOperand(Num: 0), dl, Reg, N: Op->getOperand(Num: 2));
2348 New->setNodeId(-1);
2349 ReplaceUses(F: Op, T: New.getNode());
2350 CurDAG->RemoveDeadNode(N: Op);
2351}
2352
2353void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2354 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::IMPLICIT_DEF, VT: N->getValueType(ResNo: 0));
2355}
2356
2357void SelectionDAGISel::Select_FREEZE(SDNode *N) {
2358 // TODO: We don't have FREEZE pseudo-instruction in MachineInstr-level now.
2359 // If FREEZE instruction is added later, the code below must be changed as
2360 // well.
2361 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::COPY, VT: N->getValueType(ResNo: 0),
2362 Op1: N->getOperand(Num: 0));
2363}
2364
2365void SelectionDAGISel::Select_ARITH_FENCE(SDNode *N) {
2366 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::ARITH_FENCE, VT: N->getValueType(ResNo: 0),
2367 Op1: N->getOperand(Num: 0));
2368}
2369
2370void SelectionDAGISel::Select_MEMBARRIER(SDNode *N) {
2371 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::MEMBARRIER, VT: N->getValueType(ResNo: 0),
2372 Op1: N->getOperand(Num: 0));
2373}
2374
2375void SelectionDAGISel::Select_CONVERGENCECTRL_ANCHOR(SDNode *N) {
2376 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::CONVERGENCECTRL_ANCHOR,
2377 VT: N->getValueType(ResNo: 0));
2378}
2379
2380void SelectionDAGISel::Select_CONVERGENCECTRL_ENTRY(SDNode *N) {
2381 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::CONVERGENCECTRL_ENTRY,
2382 VT: N->getValueType(ResNo: 0));
2383}
2384
2385void SelectionDAGISel::Select_CONVERGENCECTRL_LOOP(SDNode *N) {
2386 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::CONVERGENCECTRL_LOOP,
2387 VT: N->getValueType(ResNo: 0), Op1: N->getOperand(Num: 0));
2388}
2389
2390void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
2391 SDValue OpVal, SDLoc DL) {
2392 SDNode *OpNode = OpVal.getNode();
2393
2394 // FrameIndex nodes should have been directly emitted to TargetFrameIndex
2395 // nodes at DAG-construction time.
2396 assert(OpNode->getOpcode() != ISD::FrameIndex);
2397
2398 if (OpNode->getOpcode() == ISD::Constant) {
2399 Ops.push_back(
2400 Elt: CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
2401 Ops.push_back(Elt: CurDAG->getTargetConstant(Val: OpNode->getAsZExtVal(), DL,
2402 VT: OpVal.getValueType()));
2403 } else {
2404 Ops.push_back(Elt: OpVal);
2405 }
2406}
2407
2408void SelectionDAGISel::Select_STACKMAP(SDNode *N) {
2409 SmallVector<SDValue, 32> Ops;
2410 auto *It = N->op_begin();
2411 SDLoc DL(N);
2412
2413 // Stash the chain and glue operands so we can move them to the end.
2414 SDValue Chain = *It++;
2415 SDValue InGlue = *It++;
2416
2417 // <id> operand.
2418 SDValue ID = *It++;
2419 assert(ID.getValueType() == MVT::i64);
2420 Ops.push_back(Elt: ID);
2421
2422 // <numShadowBytes> operand.
2423 SDValue Shad = *It++;
2424 assert(Shad.getValueType() == MVT::i32);
2425 Ops.push_back(Elt: Shad);
2426
2427 // Live variable operands.
2428 for (; It != N->op_end(); It++)
2429 pushStackMapLiveVariable(Ops, OpVal: *It, DL);
2430
2431 Ops.push_back(Elt: Chain);
2432 Ops.push_back(Elt: InGlue);
2433
2434 SDVTList NodeTys = CurDAG->getVTList(MVT::Other, MVT::Glue);
2435 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::STACKMAP, VTs: NodeTys, Ops);
2436}
2437
2438void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
2439 SmallVector<SDValue, 32> Ops;
2440 auto *It = N->op_begin();
2441 SDLoc DL(N);
2442
2443 // Cache arguments that will be moved to the end in the target node.
2444 SDValue Chain = *It++;
2445 std::optional<SDValue> Glue;
2446 if (It->getValueType() == MVT::Glue)
2447 Glue = *It++;
2448 SDValue RegMask = *It++;
2449
2450 // <id> operand.
2451 SDValue ID = *It++;
2452 assert(ID.getValueType() == MVT::i64);
2453 Ops.push_back(Elt: ID);
2454
2455 // <numShadowBytes> operand.
2456 SDValue Shad = *It++;
2457 assert(Shad.getValueType() == MVT::i32);
2458 Ops.push_back(Elt: Shad);
2459
2460 // Add the callee.
2461 Ops.push_back(Elt: *It++);
2462
2463 // Add <numArgs>.
2464 SDValue NumArgs = *It++;
2465 assert(NumArgs.getValueType() == MVT::i32);
2466 Ops.push_back(Elt: NumArgs);
2467
2468 // Calling convention.
2469 Ops.push_back(Elt: *It++);
2470
2471 // Push the args for the call.
2472 for (uint64_t I = NumArgs->getAsZExtVal(); I != 0; I--)
2473 Ops.push_back(Elt: *It++);
2474
2475 // Now push the live variables.
2476 for (; It != N->op_end(); It++)
2477 pushStackMapLiveVariable(Ops, OpVal: *It, DL);
2478
2479 // Finally, the regmask, chain and (if present) glue are moved to the end.
2480 Ops.push_back(Elt: RegMask);
2481 Ops.push_back(Elt: Chain);
2482 if (Glue.has_value())
2483 Ops.push_back(Elt: *Glue);
2484
2485 SDVTList NodeTys = N->getVTList();
2486 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::PATCHPOINT, VTs: NodeTys, Ops);
2487}
2488
2489/// GetVBR - decode a vbr encoding whose top bit is set.
2490LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
2491GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2492 assert(Val >= 128 && "Not a VBR");
2493 Val &= 127; // Remove first vbr bit.
2494
2495 unsigned Shift = 7;
2496 uint64_t NextBits;
2497 do {
2498 NextBits = MatcherTable[Idx++];
2499 Val |= (NextBits&127) << Shift;
2500 Shift += 7;
2501 } while (NextBits & 128);
2502
2503 return Val;
2504}
2505
2506void SelectionDAGISel::Select_JUMP_TABLE_DEBUG_INFO(SDNode *N) {
2507 SDLoc dl(N);
2508 CurDAG->SelectNodeTo(N, TargetOpcode::JUMP_TABLE_DEBUG_INFO, MVT::Glue,
2509 CurDAG->getTargetConstant(N->getConstantOperandVal(Num: 1),
2510 dl, MVT::i64, true));
2511}
2512
2513/// When a match is complete, this method updates uses of interior chain results
2514/// to use the new results.
2515void SelectionDAGISel::UpdateChains(
2516 SDNode *NodeToMatch, SDValue InputChain,
2517 SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2518 SmallVector<SDNode*, 4> NowDeadNodes;
2519
2520 // Now that all the normal results are replaced, we replace the chain and
2521 // glue results if present.
2522 if (!ChainNodesMatched.empty()) {
2523 assert(InputChain.getNode() &&
2524 "Matched input chains but didn't produce a chain");
2525 // Loop over all of the nodes we matched that produced a chain result.
2526 // Replace all the chain results with the final chain we ended up with.
2527 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2528 SDNode *ChainNode = ChainNodesMatched[i];
2529 // If ChainNode is null, it's because we replaced it on a previous
2530 // iteration and we cleared it out of the map. Just skip it.
2531 if (!ChainNode)
2532 continue;
2533
2534 assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2535 "Deleted node left in chain");
2536
2537 // Don't replace the results of the root node if we're doing a
2538 // MorphNodeTo.
2539 if (ChainNode == NodeToMatch && isMorphNodeTo)
2540 continue;
2541
2542 SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2543 if (ChainVal.getValueType() == MVT::Glue)
2544 ChainVal = ChainVal.getValue(R: ChainVal->getNumValues()-2);
2545 assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2546 SelectionDAG::DAGNodeDeletedListener NDL(
2547 *CurDAG, [&](SDNode *N, SDNode *E) {
2548 std::replace(first: ChainNodesMatched.begin(), last: ChainNodesMatched.end(), old_value: N,
2549 new_value: static_cast<SDNode *>(nullptr));
2550 });
2551 if (ChainNode->getOpcode() != ISD::TokenFactor)
2552 ReplaceUses(F: ChainVal, T: InputChain);
2553
2554 // If the node became dead and we haven't already seen it, delete it.
2555 if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2556 !llvm::is_contained(Range&: NowDeadNodes, Element: ChainNode))
2557 NowDeadNodes.push_back(Elt: ChainNode);
2558 }
2559 }
2560
2561 if (!NowDeadNodes.empty())
2562 CurDAG->RemoveDeadNodes(DeadNodes&: NowDeadNodes);
2563
2564 LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
2565}
2566
2567/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2568/// operation for when the pattern matched at least one node with a chains. The
2569/// input vector contains a list of all of the chained nodes that we match. We
2570/// must determine if this is a valid thing to cover (i.e. matching it won't
2571/// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2572/// be used as the input node chain for the generated nodes.
2573static SDValue
2574HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
2575 SelectionDAG *CurDAG) {
2576
2577 SmallPtrSet<const SDNode *, 16> Visited;
2578 SmallVector<const SDNode *, 8> Worklist;
2579 SmallVector<SDValue, 3> InputChains;
2580 unsigned int Max = 8192;
2581
2582 // Quick exit on trivial merge.
2583 if (ChainNodesMatched.size() == 1)
2584 return ChainNodesMatched[0]->getOperand(Num: 0);
2585
2586 // Add chains that aren't already added (internal). Peek through
2587 // token factors.
2588 std::function<void(const SDValue)> AddChains = [&](const SDValue V) {
2589 if (V.getValueType() != MVT::Other)
2590 return;
2591 if (V->getOpcode() == ISD::EntryToken)
2592 return;
2593 if (!Visited.insert(Ptr: V.getNode()).second)
2594 return;
2595 if (V->getOpcode() == ISD::TokenFactor) {
2596 for (const SDValue &Op : V->op_values())
2597 AddChains(Op);
2598 } else
2599 InputChains.push_back(Elt: V);
2600 };
2601
2602 for (auto *N : ChainNodesMatched) {
2603 Worklist.push_back(Elt: N);
2604 Visited.insert(Ptr: N);
2605 }
2606
2607 while (!Worklist.empty())
2608 AddChains(Worklist.pop_back_val()->getOperand(Num: 0));
2609
2610 // Skip the search if there are no chain dependencies.
2611 if (InputChains.size() == 0)
2612 return CurDAG->getEntryNode();
2613
2614 // If one of these chains is a successor of input, we must have a
2615 // node that is both the predecessor and successor of the
2616 // to-be-merged nodes. Fail.
2617 Visited.clear();
2618 for (SDValue V : InputChains)
2619 Worklist.push_back(Elt: V.getNode());
2620
2621 for (auto *N : ChainNodesMatched)
2622 if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps: Max, TopologicalPrune: true))
2623 return SDValue();
2624
2625 // Return merged chain.
2626 if (InputChains.size() == 1)
2627 return InputChains[0];
2628 return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
2629 MVT::Other, InputChains);
2630}
2631
2632/// MorphNode - Handle morphing a node in place for the selector.
2633SDNode *SelectionDAGISel::
2634MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2635 ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2636 // It is possible we're using MorphNodeTo to replace a node with no
2637 // normal results with one that has a normal result (or we could be
2638 // adding a chain) and the input could have glue and chains as well.
2639 // In this case we need to shift the operands down.
2640 // FIXME: This is a horrible hack and broken in obscure cases, no worse
2641 // than the old isel though.
2642 int OldGlueResultNo = -1, OldChainResultNo = -1;
2643
2644 unsigned NTMNumResults = Node->getNumValues();
2645 if (Node->getValueType(ResNo: NTMNumResults-1) == MVT::Glue) {
2646 OldGlueResultNo = NTMNumResults-1;
2647 if (NTMNumResults != 1 &&
2648 Node->getValueType(ResNo: NTMNumResults-2) == MVT::Other)
2649 OldChainResultNo = NTMNumResults-2;
2650 } else if (Node->getValueType(ResNo: NTMNumResults-1) == MVT::Other)
2651 OldChainResultNo = NTMNumResults-1;
2652
2653 // Call the underlying SelectionDAG routine to do the transmogrification. Note
2654 // that this deletes operands of the old node that become dead.
2655 SDNode *Res = CurDAG->MorphNodeTo(N: Node, Opc: ~TargetOpc, VTs: VTList, Ops);
2656
2657 // MorphNodeTo can operate in two ways: if an existing node with the
2658 // specified operands exists, it can just return it. Otherwise, it
2659 // updates the node in place to have the requested operands.
2660 if (Res == Node) {
2661 // If we updated the node in place, reset the node ID. To the isel,
2662 // this should be just like a newly allocated machine node.
2663 Res->setNodeId(-1);
2664 }
2665
2666 unsigned ResNumResults = Res->getNumValues();
2667 // Move the glue if needed.
2668 if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2669 static_cast<unsigned>(OldGlueResultNo) != ResNumResults - 1)
2670 ReplaceUses(F: SDValue(Node, OldGlueResultNo),
2671 T: SDValue(Res, ResNumResults - 1));
2672
2673 if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2674 --ResNumResults;
2675
2676 // Move the chain reference if needed.
2677 if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2678 static_cast<unsigned>(OldChainResultNo) != ResNumResults - 1)
2679 ReplaceUses(F: SDValue(Node, OldChainResultNo),
2680 T: SDValue(Res, ResNumResults - 1));
2681
2682 // Otherwise, no replacement happened because the node already exists. Replace
2683 // Uses of the old node with the new one.
2684 if (Res != Node) {
2685 ReplaceNode(F: Node, T: Res);
2686 } else {
2687 EnforceNodeIdInvariant(Node: Res);
2688 }
2689
2690 return Res;
2691}
2692
2693/// CheckSame - Implements OP_CheckSame.
2694LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2695CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2696 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes) {
2697 // Accept if it is exactly the same as a previously recorded node.
2698 unsigned RecNo = MatcherTable[MatcherIndex++];
2699 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2700 return N == RecordedNodes[RecNo].first;
2701}
2702
2703/// CheckChildSame - Implements OP_CheckChildXSame.
2704LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckChildSame(
2705 const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2706 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes,
2707 unsigned ChildNo) {
2708 if (ChildNo >= N.getNumOperands())
2709 return false; // Match fails if out of range child #.
2710 return ::CheckSame(MatcherTable, MatcherIndex, N: N.getOperand(i: ChildNo),
2711 RecordedNodes);
2712}
2713
2714/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2715LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2716CheckPatternPredicate(unsigned Opcode, const unsigned char *MatcherTable,
2717 unsigned &MatcherIndex, const SelectionDAGISel &SDISel) {
2718 bool TwoBytePredNo =
2719 Opcode == SelectionDAGISel::OPC_CheckPatternPredicateTwoByte;
2720 unsigned PredNo =
2721 TwoBytePredNo || Opcode == SelectionDAGISel::OPC_CheckPatternPredicate
2722 ? MatcherTable[MatcherIndex++]
2723 : Opcode - SelectionDAGISel::OPC_CheckPatternPredicate0;
2724 if (TwoBytePredNo)
2725 PredNo |= MatcherTable[MatcherIndex++] << 8;
2726 return SDISel.CheckPatternPredicate(PredNo);
2727}
2728
2729/// CheckNodePredicate - Implements OP_CheckNodePredicate.
2730LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2731CheckNodePredicate(unsigned Opcode, const unsigned char *MatcherTable,
2732 unsigned &MatcherIndex, const SelectionDAGISel &SDISel,
2733 SDNode *N) {
2734 unsigned PredNo = Opcode == SelectionDAGISel::OPC_CheckPredicate
2735 ? MatcherTable[MatcherIndex++]
2736 : Opcode - SelectionDAGISel::OPC_CheckPredicate0;
2737 return SDISel.CheckNodePredicate(N, PredNo);
2738}
2739
2740LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2741CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2742 SDNode *N) {
2743 uint16_t Opc = MatcherTable[MatcherIndex++];
2744 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
2745 return N->getOpcode() == Opc;
2746}
2747
2748LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckType(MVT::SimpleValueType VT,
2749 SDValue N,
2750 const TargetLowering *TLI,
2751 const DataLayout &DL) {
2752 if (N.getValueType() == VT)
2753 return true;
2754
2755 // Handle the case when VT is iPTR.
2756 return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2757}
2758
2759LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2760CheckChildType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI,
2761 const DataLayout &DL, unsigned ChildNo) {
2762 if (ChildNo >= N.getNumOperands())
2763 return false; // Match fails if out of range child #.
2764 return ::CheckType(VT, N: N.getOperand(i: ChildNo), TLI, DL);
2765}
2766
2767LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2768CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2769 SDValue N) {
2770 return cast<CondCodeSDNode>(Val&: N)->get() ==
2771 static_cast<ISD::CondCode>(MatcherTable[MatcherIndex++]);
2772}
2773
2774LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2775CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2776 SDValue N) {
2777 if (2 >= N.getNumOperands())
2778 return false;
2779 return ::CheckCondCode(MatcherTable, MatcherIndex, N: N.getOperand(i: 2));
2780}
2781
2782LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2783CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2784 SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2785 MVT::SimpleValueType VT =
2786 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
2787 if (cast<VTSDNode>(Val&: N)->getVT() == VT)
2788 return true;
2789
2790 // Handle the case when VT is iPTR.
2791 return VT == MVT::iPTR && cast<VTSDNode>(Val&: N)->getVT() == TLI->getPointerTy(DL);
2792}
2793
2794// Bit 0 stores the sign of the immediate. The upper bits contain the magnitude
2795// shifted left by 1.
2796static uint64_t decodeSignRotatedValue(uint64_t V) {
2797 if ((V & 1) == 0)
2798 return V >> 1;
2799 if (V != 1)
2800 return -(V >> 1);
2801 // There is no such thing as -0 with integers. "-0" really means MININT.
2802 return 1ULL << 63;
2803}
2804
2805LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2806CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2807 SDValue N) {
2808 int64_t Val = MatcherTable[MatcherIndex++];
2809 if (Val & 128)
2810 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2811
2812 Val = decodeSignRotatedValue(V: Val);
2813
2814 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: N);
2815 return C && C->getAPIntValue().trySExtValue() == Val;
2816}
2817
2818LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2819CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2820 SDValue N, unsigned ChildNo) {
2821 if (ChildNo >= N.getNumOperands())
2822 return false; // Match fails if out of range child #.
2823 return ::CheckInteger(MatcherTable, MatcherIndex, N: N.getOperand(i: ChildNo));
2824}
2825
2826LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2827CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2828 SDValue N, const SelectionDAGISel &SDISel) {
2829 int64_t Val = MatcherTable[MatcherIndex++];
2830 if (Val & 128)
2831 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2832
2833 if (N->getOpcode() != ISD::AND) return false;
2834
2835 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
2836 return C && SDISel.CheckAndMask(LHS: N.getOperand(i: 0), RHS: C, DesiredMaskS: Val);
2837}
2838
2839LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2840CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2841 const SelectionDAGISel &SDISel) {
2842 int64_t Val = MatcherTable[MatcherIndex++];
2843 if (Val & 128)
2844 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2845
2846 if (N->getOpcode() != ISD::OR) return false;
2847
2848 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
2849 return C && SDISel.CheckOrMask(LHS: N.getOperand(i: 0), RHS: C, DesiredMaskS: Val);
2850}
2851
2852/// IsPredicateKnownToFail - If we know how and can do so without pushing a
2853/// scope, evaluate the current node. If the current predicate is known to
2854/// fail, set Result=true and return anything. If the current predicate is
2855/// known to pass, set Result=false and return the MatcherIndex to continue
2856/// with. If the current predicate is unknown, set Result=false and return the
2857/// MatcherIndex to continue with.
2858static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2859 unsigned Index, SDValue N,
2860 bool &Result,
2861 const SelectionDAGISel &SDISel,
2862 SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2863 unsigned Opcode = Table[Index++];
2864 switch (Opcode) {
2865 default:
2866 Result = false;
2867 return Index-1; // Could not evaluate this predicate.
2868 case SelectionDAGISel::OPC_CheckSame:
2869 Result = !::CheckSame(MatcherTable: Table, MatcherIndex&: Index, N, RecordedNodes);
2870 return Index;
2871 case SelectionDAGISel::OPC_CheckChild0Same:
2872 case SelectionDAGISel::OPC_CheckChild1Same:
2873 case SelectionDAGISel::OPC_CheckChild2Same:
2874 case SelectionDAGISel::OPC_CheckChild3Same:
2875 Result = !::CheckChildSame(MatcherTable: Table, MatcherIndex&: Index, N, RecordedNodes,
2876 ChildNo: Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
2877 return Index;
2878 case SelectionDAGISel::OPC_CheckPatternPredicate:
2879 case SelectionDAGISel::OPC_CheckPatternPredicate0:
2880 case SelectionDAGISel::OPC_CheckPatternPredicate1:
2881 case SelectionDAGISel::OPC_CheckPatternPredicate2:
2882 case SelectionDAGISel::OPC_CheckPatternPredicate3:
2883 case SelectionDAGISel::OPC_CheckPatternPredicate4:
2884 case SelectionDAGISel::OPC_CheckPatternPredicate5:
2885 case SelectionDAGISel::OPC_CheckPatternPredicate6:
2886 case SelectionDAGISel::OPC_CheckPatternPredicate7:
2887 case SelectionDAGISel::OPC_CheckPatternPredicateTwoByte:
2888 Result = !::CheckPatternPredicate(Opcode, MatcherTable: Table, MatcherIndex&: Index, SDISel);
2889 return Index;
2890 case SelectionDAGISel::OPC_CheckPredicate:
2891 case SelectionDAGISel::OPC_CheckPredicate0:
2892 case SelectionDAGISel::OPC_CheckPredicate1:
2893 case SelectionDAGISel::OPC_CheckPredicate2:
2894 case SelectionDAGISel::OPC_CheckPredicate3:
2895 case SelectionDAGISel::OPC_CheckPredicate4:
2896 case SelectionDAGISel::OPC_CheckPredicate5:
2897 case SelectionDAGISel::OPC_CheckPredicate6:
2898 case SelectionDAGISel::OPC_CheckPredicate7:
2899 Result = !::CheckNodePredicate(Opcode, MatcherTable: Table, MatcherIndex&: Index, SDISel, N: N.getNode());
2900 return Index;
2901 case SelectionDAGISel::OPC_CheckOpcode:
2902 Result = !::CheckOpcode(MatcherTable: Table, MatcherIndex&: Index, N: N.getNode());
2903 return Index;
2904 case SelectionDAGISel::OPC_CheckType:
2905 case SelectionDAGISel::OPC_CheckTypeI32:
2906 case SelectionDAGISel::OPC_CheckTypeI64: {
2907 MVT::SimpleValueType VT;
2908 switch (Opcode) {
2909 case SelectionDAGISel::OPC_CheckTypeI32:
2910 VT = MVT::i32;
2911 break;
2912 case SelectionDAGISel::OPC_CheckTypeI64:
2913 VT = MVT::i64;
2914 break;
2915 default:
2916 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
2917 break;
2918 }
2919 Result = !::CheckType(VT, N, TLI: SDISel.TLI, DL: SDISel.CurDAG->getDataLayout());
2920 return Index;
2921 }
2922 case SelectionDAGISel::OPC_CheckTypeRes: {
2923 unsigned Res = Table[Index++];
2924 Result = !::CheckType(VT: static_cast<MVT::SimpleValueType>(Table[Index++]),
2925 N: N.getValue(R: Res), TLI: SDISel.TLI,
2926 DL: SDISel.CurDAG->getDataLayout());
2927 return Index;
2928 }
2929 case SelectionDAGISel::OPC_CheckChild0Type:
2930 case SelectionDAGISel::OPC_CheckChild1Type:
2931 case SelectionDAGISel::OPC_CheckChild2Type:
2932 case SelectionDAGISel::OPC_CheckChild3Type:
2933 case SelectionDAGISel::OPC_CheckChild4Type:
2934 case SelectionDAGISel::OPC_CheckChild5Type:
2935 case SelectionDAGISel::OPC_CheckChild6Type:
2936 case SelectionDAGISel::OPC_CheckChild7Type:
2937 case SelectionDAGISel::OPC_CheckChild0TypeI32:
2938 case SelectionDAGISel::OPC_CheckChild1TypeI32:
2939 case SelectionDAGISel::OPC_CheckChild2TypeI32:
2940 case SelectionDAGISel::OPC_CheckChild3TypeI32:
2941 case SelectionDAGISel::OPC_CheckChild4TypeI32:
2942 case SelectionDAGISel::OPC_CheckChild5TypeI32:
2943 case SelectionDAGISel::OPC_CheckChild6TypeI32:
2944 case SelectionDAGISel::OPC_CheckChild7TypeI32:
2945 case SelectionDAGISel::OPC_CheckChild0TypeI64:
2946 case SelectionDAGISel::OPC_CheckChild1TypeI64:
2947 case SelectionDAGISel::OPC_CheckChild2TypeI64:
2948 case SelectionDAGISel::OPC_CheckChild3TypeI64:
2949 case SelectionDAGISel::OPC_CheckChild4TypeI64:
2950 case SelectionDAGISel::OPC_CheckChild5TypeI64:
2951 case SelectionDAGISel::OPC_CheckChild6TypeI64:
2952 case SelectionDAGISel::OPC_CheckChild7TypeI64: {
2953 MVT::SimpleValueType VT;
2954 unsigned ChildNo;
2955 if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI32 &&
2956 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI32) {
2957 VT = MVT::i32;
2958 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI32;
2959 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
2960 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI64) {
2961 VT = MVT::i64;
2962 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI64;
2963 } else {
2964 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
2965 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
2966 }
2967 Result = !::CheckChildType(VT, N, TLI: SDISel.TLI,
2968 DL: SDISel.CurDAG->getDataLayout(), ChildNo);
2969 return Index;
2970 }
2971 case SelectionDAGISel::OPC_CheckCondCode:
2972 Result = !::CheckCondCode(MatcherTable: Table, MatcherIndex&: Index, N);
2973 return Index;
2974 case SelectionDAGISel::OPC_CheckChild2CondCode:
2975 Result = !::CheckChild2CondCode(MatcherTable: Table, MatcherIndex&: Index, N);
2976 return Index;
2977 case SelectionDAGISel::OPC_CheckValueType:
2978 Result = !::CheckValueType(MatcherTable: Table, MatcherIndex&: Index, N, TLI: SDISel.TLI,
2979 DL: SDISel.CurDAG->getDataLayout());
2980 return Index;
2981 case SelectionDAGISel::OPC_CheckInteger:
2982 Result = !::CheckInteger(MatcherTable: Table, MatcherIndex&: Index, N);
2983 return Index;
2984 case SelectionDAGISel::OPC_CheckChild0Integer:
2985 case SelectionDAGISel::OPC_CheckChild1Integer:
2986 case SelectionDAGISel::OPC_CheckChild2Integer:
2987 case SelectionDAGISel::OPC_CheckChild3Integer:
2988 case SelectionDAGISel::OPC_CheckChild4Integer:
2989 Result = !::CheckChildInteger(MatcherTable: Table, MatcherIndex&: Index, N,
2990 ChildNo: Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
2991 return Index;
2992 case SelectionDAGISel::OPC_CheckAndImm:
2993 Result = !::CheckAndImm(MatcherTable: Table, MatcherIndex&: Index, N, SDISel);
2994 return Index;
2995 case SelectionDAGISel::OPC_CheckOrImm:
2996 Result = !::CheckOrImm(MatcherTable: Table, MatcherIndex&: Index, N, SDISel);
2997 return Index;
2998 }
2999}
3000
3001namespace {
3002
3003struct MatchScope {
3004 /// FailIndex - If this match fails, this is the index to continue with.
3005 unsigned FailIndex;
3006
3007 /// NodeStack - The node stack when the scope was formed.
3008 SmallVector<SDValue, 4> NodeStack;
3009
3010 /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
3011 unsigned NumRecordedNodes;
3012
3013 /// NumMatchedMemRefs - The number of matched memref entries.
3014 unsigned NumMatchedMemRefs;
3015
3016 /// InputChain/InputGlue - The current chain/glue
3017 SDValue InputChain, InputGlue;
3018
3019 /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
3020 bool HasChainNodesMatched;
3021};
3022
3023/// \A DAG update listener to keep the matching state
3024/// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
3025/// change the DAG while matching. X86 addressing mode matcher is an example
3026/// for this.
3027class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
3028{
3029 SDNode **NodeToMatch;
3030 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes;
3031 SmallVectorImpl<MatchScope> &MatchScopes;
3032
3033public:
3034 MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
3035 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
3036 SmallVectorImpl<MatchScope> &MS)
3037 : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
3038 RecordedNodes(RN), MatchScopes(MS) {}
3039
3040 void NodeDeleted(SDNode *N, SDNode *E) override {
3041 // Some early-returns here to avoid the search if we deleted the node or
3042 // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
3043 // do, so it's unnecessary to update matching state at that point).
3044 // Neither of these can occur currently because we only install this
3045 // update listener during matching a complex patterns.
3046 if (!E || E->isMachineOpcode())
3047 return;
3048 // Check if NodeToMatch was updated.
3049 if (N == *NodeToMatch)
3050 *NodeToMatch = E;
3051 // Performing linear search here does not matter because we almost never
3052 // run this code. You'd have to have a CSE during complex pattern
3053 // matching.
3054 for (auto &I : RecordedNodes)
3055 if (I.first.getNode() == N)
3056 I.first.setNode(E);
3057
3058 for (auto &I : MatchScopes)
3059 for (auto &J : I.NodeStack)
3060 if (J.getNode() == N)
3061 J.setNode(E);
3062 }
3063};
3064
3065} // end anonymous namespace
3066
3067void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
3068 const unsigned char *MatcherTable,
3069 unsigned TableSize) {
3070 // FIXME: Should these even be selected? Handle these cases in the caller?
3071 switch (NodeToMatch->getOpcode()) {
3072 default:
3073 break;
3074 case ISD::EntryToken: // These nodes remain the same.
3075 case ISD::BasicBlock:
3076 case ISD::Register:
3077 case ISD::RegisterMask:
3078 case ISD::HANDLENODE:
3079 case ISD::MDNODE_SDNODE:
3080 case ISD::TargetConstant:
3081 case ISD::TargetConstantFP:
3082 case ISD::TargetConstantPool:
3083 case ISD::TargetFrameIndex:
3084 case ISD::TargetExternalSymbol:
3085 case ISD::MCSymbol:
3086 case ISD::TargetBlockAddress:
3087 case ISD::TargetJumpTable:
3088 case ISD::TargetGlobalTLSAddress:
3089 case ISD::TargetGlobalAddress:
3090 case ISD::TokenFactor:
3091 case ISD::CopyFromReg:
3092 case ISD::CopyToReg:
3093 case ISD::EH_LABEL:
3094 case ISD::ANNOTATION_LABEL:
3095 case ISD::LIFETIME_START:
3096 case ISD::LIFETIME_END:
3097 case ISD::PSEUDO_PROBE:
3098 NodeToMatch->setNodeId(-1); // Mark selected.
3099 return;
3100 case ISD::AssertSext:
3101 case ISD::AssertZext:
3102 case ISD::AssertAlign:
3103 ReplaceUses(F: SDValue(NodeToMatch, 0), T: NodeToMatch->getOperand(Num: 0));
3104 CurDAG->RemoveDeadNode(N: NodeToMatch);
3105 return;
3106 case ISD::INLINEASM:
3107 case ISD::INLINEASM_BR:
3108 Select_INLINEASM(N: NodeToMatch);
3109 return;
3110 case ISD::READ_REGISTER:
3111 Select_READ_REGISTER(Op: NodeToMatch);
3112 return;
3113 case ISD::WRITE_REGISTER:
3114 Select_WRITE_REGISTER(Op: NodeToMatch);
3115 return;
3116 case ISD::UNDEF:
3117 Select_UNDEF(N: NodeToMatch);
3118 return;
3119 case ISD::FREEZE:
3120 Select_FREEZE(N: NodeToMatch);
3121 return;
3122 case ISD::ARITH_FENCE:
3123 Select_ARITH_FENCE(N: NodeToMatch);
3124 return;
3125 case ISD::MEMBARRIER:
3126 Select_MEMBARRIER(N: NodeToMatch);
3127 return;
3128 case ISD::STACKMAP:
3129 Select_STACKMAP(N: NodeToMatch);
3130 return;
3131 case ISD::PATCHPOINT:
3132 Select_PATCHPOINT(N: NodeToMatch);
3133 return;
3134 case ISD::JUMP_TABLE_DEBUG_INFO:
3135 Select_JUMP_TABLE_DEBUG_INFO(N: NodeToMatch);
3136 return;
3137 case ISD::CONVERGENCECTRL_ANCHOR:
3138 Select_CONVERGENCECTRL_ANCHOR(N: NodeToMatch);
3139 return;
3140 case ISD::CONVERGENCECTRL_ENTRY:
3141 Select_CONVERGENCECTRL_ENTRY(N: NodeToMatch);
3142 return;
3143 case ISD::CONVERGENCECTRL_LOOP:
3144 Select_CONVERGENCECTRL_LOOP(N: NodeToMatch);
3145 return;
3146 }
3147
3148 assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
3149
3150 // Set up the node stack with NodeToMatch as the only node on the stack.
3151 SmallVector<SDValue, 8> NodeStack;
3152 SDValue N = SDValue(NodeToMatch, 0);
3153 NodeStack.push_back(Elt: N);
3154
3155 // MatchScopes - Scopes used when matching, if a match failure happens, this
3156 // indicates where to continue checking.
3157 SmallVector<MatchScope, 8> MatchScopes;
3158
3159 // RecordedNodes - This is the set of nodes that have been recorded by the
3160 // state machine. The second value is the parent of the node, or null if the
3161 // root is recorded.
3162 SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
3163
3164 // MatchedMemRefs - This is the set of MemRef's we've seen in the input
3165 // pattern.
3166 SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
3167
3168 // These are the current input chain and glue for use when generating nodes.
3169 // Various Emit operations change these. For example, emitting a copytoreg
3170 // uses and updates these.
3171 SDValue InputChain, InputGlue;
3172
3173 // ChainNodesMatched - If a pattern matches nodes that have input/output
3174 // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
3175 // which ones they are. The result is captured into this list so that we can
3176 // update the chain results when the pattern is complete.
3177 SmallVector<SDNode*, 3> ChainNodesMatched;
3178
3179 LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
3180
3181 // Determine where to start the interpreter. Normally we start at opcode #0,
3182 // but if the state machine starts with an OPC_SwitchOpcode, then we
3183 // accelerate the first lookup (which is guaranteed to be hot) with the
3184 // OpcodeOffset table.
3185 unsigned MatcherIndex = 0;
3186
3187 if (!OpcodeOffset.empty()) {
3188 // Already computed the OpcodeOffset table, just index into it.
3189 if (N.getOpcode() < OpcodeOffset.size())
3190 MatcherIndex = OpcodeOffset[N.getOpcode()];
3191 LLVM_DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
3192
3193 } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3194 // Otherwise, the table isn't computed, but the state machine does start
3195 // with an OPC_SwitchOpcode instruction. Populate the table now, since this
3196 // is the first time we're selecting an instruction.
3197 unsigned Idx = 1;
3198 while (true) {
3199 // Get the size of this case.
3200 unsigned CaseSize = MatcherTable[Idx++];
3201 if (CaseSize & 128)
3202 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx);
3203 if (CaseSize == 0) break;
3204
3205 // Get the opcode, add the index to the table.
3206 uint16_t Opc = MatcherTable[Idx++];
3207 Opc |= static_cast<uint16_t>(MatcherTable[Idx++]) << 8;
3208 if (Opc >= OpcodeOffset.size())
3209 OpcodeOffset.resize(new_size: (Opc+1)*2);
3210 OpcodeOffset[Opc] = Idx;
3211 Idx += CaseSize;
3212 }
3213
3214 // Okay, do the lookup for the first opcode.
3215 if (N.getOpcode() < OpcodeOffset.size())
3216 MatcherIndex = OpcodeOffset[N.getOpcode()];
3217 }
3218
3219 while (true) {
3220 assert(MatcherIndex < TableSize && "Invalid index");
3221#ifndef NDEBUG
3222 unsigned CurrentOpcodeIndex = MatcherIndex;
3223#endif
3224 BuiltinOpcodes Opcode =
3225 static_cast<BuiltinOpcodes>(MatcherTable[MatcherIndex++]);
3226 switch (Opcode) {
3227 case OPC_Scope: {
3228 // Okay, the semantics of this operation are that we should push a scope
3229 // then evaluate the first child. However, pushing a scope only to have
3230 // the first check fail (which then pops it) is inefficient. If we can
3231 // determine immediately that the first check (or first several) will
3232 // immediately fail, don't even bother pushing a scope for them.
3233 unsigned FailIndex;
3234
3235 while (true) {
3236 unsigned NumToSkip = MatcherTable[MatcherIndex++];
3237 if (NumToSkip & 128)
3238 NumToSkip = GetVBR(Val: NumToSkip, MatcherTable, Idx&: MatcherIndex);
3239 // Found the end of the scope with no match.
3240 if (NumToSkip == 0) {
3241 FailIndex = 0;
3242 break;
3243 }
3244
3245 FailIndex = MatcherIndex+NumToSkip;
3246
3247 unsigned MatcherIndexOfPredicate = MatcherIndex;
3248 (void)MatcherIndexOfPredicate; // silence warning.
3249
3250 // If we can't evaluate this predicate without pushing a scope (e.g. if
3251 // it is a 'MoveParent') or if the predicate succeeds on this node, we
3252 // push the scope and evaluate the full predicate chain.
3253 bool Result;
3254 MatcherIndex = IsPredicateKnownToFail(Table: MatcherTable, Index: MatcherIndex, N,
3255 Result, SDISel: *this, RecordedNodes);
3256 if (!Result)
3257 break;
3258
3259 LLVM_DEBUG(
3260 dbgs() << " Skipped scope entry (due to false predicate) at "
3261 << "index " << MatcherIndexOfPredicate << ", continuing at "
3262 << FailIndex << "\n");
3263 ++NumDAGIselRetries;
3264
3265 // Otherwise, we know that this case of the Scope is guaranteed to fail,
3266 // move to the next case.
3267 MatcherIndex = FailIndex;
3268 }
3269
3270 // If the whole scope failed to match, bail.
3271 if (FailIndex == 0) break;
3272
3273 // Push a MatchScope which indicates where to go if the first child fails
3274 // to match.
3275 MatchScope NewEntry;
3276 NewEntry.FailIndex = FailIndex;
3277 NewEntry.NodeStack.append(in_start: NodeStack.begin(), in_end: NodeStack.end());
3278 NewEntry.NumRecordedNodes = RecordedNodes.size();
3279 NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3280 NewEntry.InputChain = InputChain;
3281 NewEntry.InputGlue = InputGlue;
3282 NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3283 MatchScopes.push_back(Elt: NewEntry);
3284 continue;
3285 }
3286 case OPC_RecordNode: {
3287 // Remember this node, it may end up being an operand in the pattern.
3288 SDNode *Parent = nullptr;
3289 if (NodeStack.size() > 1)
3290 Parent = NodeStack[NodeStack.size()-2].getNode();
3291 RecordedNodes.push_back(Elt: std::make_pair(x&: N, y&: Parent));
3292 continue;
3293 }
3294
3295 case OPC_RecordChild0: case OPC_RecordChild1:
3296 case OPC_RecordChild2: case OPC_RecordChild3:
3297 case OPC_RecordChild4: case OPC_RecordChild5:
3298 case OPC_RecordChild6: case OPC_RecordChild7: {
3299 unsigned ChildNo = Opcode-OPC_RecordChild0;
3300 if (ChildNo >= N.getNumOperands())
3301 break; // Match fails if out of range child #.
3302
3303 RecordedNodes.push_back(Elt: std::make_pair(x: N->getOperand(Num: ChildNo),
3304 y: N.getNode()));
3305 continue;
3306 }
3307 case OPC_RecordMemRef:
3308 if (auto *MN = dyn_cast<MemSDNode>(Val&: N))
3309 MatchedMemRefs.push_back(Elt: MN->getMemOperand());
3310 else {
3311 LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
3312 dbgs() << '\n');
3313 }
3314
3315 continue;
3316
3317 case OPC_CaptureGlueInput:
3318 // If the current node has an input glue, capture it in InputGlue.
3319 if (N->getNumOperands() != 0 &&
3320 N->getOperand(Num: N->getNumOperands()-1).getValueType() == MVT::Glue)
3321 InputGlue = N->getOperand(Num: N->getNumOperands()-1);
3322 continue;
3323
3324 case OPC_MoveChild: {
3325 unsigned ChildNo = MatcherTable[MatcherIndex++];
3326 if (ChildNo >= N.getNumOperands())
3327 break; // Match fails if out of range child #.
3328 N = N.getOperand(i: ChildNo);
3329 NodeStack.push_back(Elt: N);
3330 continue;
3331 }
3332
3333 case OPC_MoveChild0: case OPC_MoveChild1:
3334 case OPC_MoveChild2: case OPC_MoveChild3:
3335 case OPC_MoveChild4: case OPC_MoveChild5:
3336 case OPC_MoveChild6: case OPC_MoveChild7: {
3337 unsigned ChildNo = Opcode-OPC_MoveChild0;
3338 if (ChildNo >= N.getNumOperands())
3339 break; // Match fails if out of range child #.
3340 N = N.getOperand(i: ChildNo);
3341 NodeStack.push_back(Elt: N);
3342 continue;
3343 }
3344
3345 case OPC_MoveSibling:
3346 case OPC_MoveSibling0:
3347 case OPC_MoveSibling1:
3348 case OPC_MoveSibling2:
3349 case OPC_MoveSibling3:
3350 case OPC_MoveSibling4:
3351 case OPC_MoveSibling5:
3352 case OPC_MoveSibling6:
3353 case OPC_MoveSibling7: {
3354 // Pop the current node off the NodeStack.
3355 NodeStack.pop_back();
3356 assert(!NodeStack.empty() && "Node stack imbalance!");
3357 N = NodeStack.back();
3358
3359 unsigned SiblingNo = Opcode == OPC_MoveSibling
3360 ? MatcherTable[MatcherIndex++]
3361 : Opcode - OPC_MoveSibling0;
3362 if (SiblingNo >= N.getNumOperands())
3363 break; // Match fails if out of range sibling #.
3364 N = N.getOperand(i: SiblingNo);
3365 NodeStack.push_back(Elt: N);
3366 continue;
3367 }
3368 case OPC_MoveParent:
3369 // Pop the current node off the NodeStack.
3370 NodeStack.pop_back();
3371 assert(!NodeStack.empty() && "Node stack imbalance!");
3372 N = NodeStack.back();
3373 continue;
3374
3375 case OPC_CheckSame:
3376 if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3377 continue;
3378
3379 case OPC_CheckChild0Same: case OPC_CheckChild1Same:
3380 case OPC_CheckChild2Same: case OPC_CheckChild3Same:
3381 if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3382 ChildNo: Opcode-OPC_CheckChild0Same))
3383 break;
3384 continue;
3385
3386 case OPC_CheckPatternPredicate:
3387 case OPC_CheckPatternPredicate0:
3388 case OPC_CheckPatternPredicate1:
3389 case OPC_CheckPatternPredicate2:
3390 case OPC_CheckPatternPredicate3:
3391 case OPC_CheckPatternPredicate4:
3392 case OPC_CheckPatternPredicate5:
3393 case OPC_CheckPatternPredicate6:
3394 case OPC_CheckPatternPredicate7:
3395 case OPC_CheckPatternPredicateTwoByte:
3396 if (!::CheckPatternPredicate(Opcode, MatcherTable, MatcherIndex, SDISel: *this))
3397 break;
3398 continue;
3399 case SelectionDAGISel::OPC_CheckPredicate0:
3400 case SelectionDAGISel::OPC_CheckPredicate1:
3401 case SelectionDAGISel::OPC_CheckPredicate2:
3402 case SelectionDAGISel::OPC_CheckPredicate3:
3403 case SelectionDAGISel::OPC_CheckPredicate4:
3404 case SelectionDAGISel::OPC_CheckPredicate5:
3405 case SelectionDAGISel::OPC_CheckPredicate6:
3406 case SelectionDAGISel::OPC_CheckPredicate7:
3407 case OPC_CheckPredicate:
3408 if (!::CheckNodePredicate(Opcode, MatcherTable, MatcherIndex, SDISel: *this,
3409 N: N.getNode()))
3410 break;
3411 continue;
3412 case OPC_CheckPredicateWithOperands: {
3413 unsigned OpNum = MatcherTable[MatcherIndex++];
3414 SmallVector<SDValue, 8> Operands;
3415
3416 for (unsigned i = 0; i < OpNum; ++i)
3417 Operands.push_back(Elt: RecordedNodes[MatcherTable[MatcherIndex++]].first);
3418
3419 unsigned PredNo = MatcherTable[MatcherIndex++];
3420 if (!CheckNodePredicateWithOperands(N: N.getNode(), PredNo, Operands))
3421 break;
3422 continue;
3423 }
3424 case OPC_CheckComplexPat:
3425 case OPC_CheckComplexPat0:
3426 case OPC_CheckComplexPat1:
3427 case OPC_CheckComplexPat2:
3428 case OPC_CheckComplexPat3:
3429 case OPC_CheckComplexPat4:
3430 case OPC_CheckComplexPat5:
3431 case OPC_CheckComplexPat6:
3432 case OPC_CheckComplexPat7: {
3433 unsigned CPNum = Opcode == OPC_CheckComplexPat
3434 ? MatcherTable[MatcherIndex++]
3435 : Opcode - OPC_CheckComplexPat0;
3436 unsigned RecNo = MatcherTable[MatcherIndex++];
3437 assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3438
3439 // If target can modify DAG during matching, keep the matching state
3440 // consistent.
3441 std::unique_ptr<MatchStateUpdater> MSU;
3442 if (ComplexPatternFuncMutatesDAG())
3443 MSU.reset(p: new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3444 MatchScopes));
3445
3446 if (!CheckComplexPattern(Root: NodeToMatch, Parent: RecordedNodes[RecNo].second,
3447 N: RecordedNodes[RecNo].first, PatternNo: CPNum,
3448 Result&: RecordedNodes))
3449 break;
3450 continue;
3451 }
3452 case OPC_CheckOpcode:
3453 if (!::CheckOpcode(MatcherTable, MatcherIndex, N: N.getNode())) break;
3454 continue;
3455
3456 case OPC_CheckType:
3457 case OPC_CheckTypeI32:
3458 case OPC_CheckTypeI64:
3459 MVT::SimpleValueType VT;
3460 switch (Opcode) {
3461 case OPC_CheckTypeI32:
3462 VT = MVT::i32;
3463 break;
3464 case OPC_CheckTypeI64:
3465 VT = MVT::i64;
3466 break;
3467 default:
3468 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3469 break;
3470 }
3471 if (!::CheckType(VT, N, TLI, DL: CurDAG->getDataLayout()))
3472 break;
3473 continue;
3474
3475 case OPC_CheckTypeRes: {
3476 unsigned Res = MatcherTable[MatcherIndex++];
3477 if (!::CheckType(
3478 VT: static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]),
3479 N: N.getValue(R: Res), TLI, DL: CurDAG->getDataLayout()))
3480 break;
3481 continue;
3482 }
3483
3484 case OPC_SwitchOpcode: {
3485 unsigned CurNodeOpcode = N.getOpcode();
3486 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3487 unsigned CaseSize;
3488 while (true) {
3489 // Get the size of this case.
3490 CaseSize = MatcherTable[MatcherIndex++];
3491 if (CaseSize & 128)
3492 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx&: MatcherIndex);
3493 if (CaseSize == 0) break;
3494
3495 uint16_t Opc = MatcherTable[MatcherIndex++];
3496 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
3497
3498 // If the opcode matches, then we will execute this case.
3499 if (CurNodeOpcode == Opc)
3500 break;
3501
3502 // Otherwise, skip over this case.
3503 MatcherIndex += CaseSize;
3504 }
3505
3506 // If no cases matched, bail out.
3507 if (CaseSize == 0) break;
3508
3509 // Otherwise, execute the case we found.
3510 LLVM_DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to "
3511 << MatcherIndex << "\n");
3512 continue;
3513 }
3514
3515 case OPC_SwitchType: {
3516 MVT CurNodeVT = N.getSimpleValueType();
3517 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3518 unsigned CaseSize;
3519 while (true) {
3520 // Get the size of this case.
3521 CaseSize = MatcherTable[MatcherIndex++];
3522 if (CaseSize & 128)
3523 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx&: MatcherIndex);
3524 if (CaseSize == 0) break;
3525
3526 MVT CaseVT =
3527 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3528 if (CaseVT == MVT::iPTR)
3529 CaseVT = TLI->getPointerTy(DL: CurDAG->getDataLayout());
3530
3531 // If the VT matches, then we will execute this case.
3532 if (CurNodeVT == CaseVT)
3533 break;
3534
3535 // Otherwise, skip over this case.
3536 MatcherIndex += CaseSize;
3537 }
3538
3539 // If no cases matched, bail out.
3540 if (CaseSize == 0) break;
3541
3542 // Otherwise, execute the case we found.
3543 LLVM_DEBUG(dbgs() << " TypeSwitch[" << CurNodeVT
3544 << "] from " << SwitchStart << " to " << MatcherIndex
3545 << '\n');
3546 continue;
3547 }
3548 case OPC_CheckChild0Type:
3549 case OPC_CheckChild1Type:
3550 case OPC_CheckChild2Type:
3551 case OPC_CheckChild3Type:
3552 case OPC_CheckChild4Type:
3553 case OPC_CheckChild5Type:
3554 case OPC_CheckChild6Type:
3555 case OPC_CheckChild7Type:
3556 case OPC_CheckChild0TypeI32:
3557 case OPC_CheckChild1TypeI32:
3558 case OPC_CheckChild2TypeI32:
3559 case OPC_CheckChild3TypeI32:
3560 case OPC_CheckChild4TypeI32:
3561 case OPC_CheckChild5TypeI32:
3562 case OPC_CheckChild6TypeI32:
3563 case OPC_CheckChild7TypeI32:
3564 case OPC_CheckChild0TypeI64:
3565 case OPC_CheckChild1TypeI64:
3566 case OPC_CheckChild2TypeI64:
3567 case OPC_CheckChild3TypeI64:
3568 case OPC_CheckChild4TypeI64:
3569 case OPC_CheckChild5TypeI64:
3570 case OPC_CheckChild6TypeI64:
3571 case OPC_CheckChild7TypeI64: {
3572 MVT::SimpleValueType VT;
3573 unsigned ChildNo;
3574 if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI32 &&
3575 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI32) {
3576 VT = MVT::i32;
3577 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI32;
3578 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
3579 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI64) {
3580 VT = MVT::i64;
3581 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI64;
3582 } else {
3583 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3584 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
3585 }
3586 if (!::CheckChildType(VT, N, TLI, DL: CurDAG->getDataLayout(), ChildNo))
3587 break;
3588 continue;
3589 }
3590 case OPC_CheckCondCode:
3591 if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3592 continue;
3593 case OPC_CheckChild2CondCode:
3594 if (!::CheckChild2CondCode(MatcherTable, MatcherIndex, N)) break;
3595 continue;
3596 case OPC_CheckValueType:
3597 if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3598 DL: CurDAG->getDataLayout()))
3599 break;
3600 continue;
3601 case OPC_CheckInteger:
3602 if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3603 continue;
3604 case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
3605 case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
3606 case OPC_CheckChild4Integer:
3607 if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3608 ChildNo: Opcode-OPC_CheckChild0Integer)) break;
3609 continue;
3610 case OPC_CheckAndImm:
3611 if (!::CheckAndImm(MatcherTable, MatcherIndex, N, SDISel: *this)) break;
3612 continue;
3613 case OPC_CheckOrImm:
3614 if (!::CheckOrImm(MatcherTable, MatcherIndex, N, SDISel: *this)) break;
3615 continue;
3616 case OPC_CheckImmAllOnesV:
3617 if (!ISD::isConstantSplatVectorAllOnes(N: N.getNode()))
3618 break;
3619 continue;
3620 case OPC_CheckImmAllZerosV:
3621 if (!ISD::isConstantSplatVectorAllZeros(N: N.getNode()))
3622 break;
3623 continue;
3624
3625 case OPC_CheckFoldableChainNode: {
3626 assert(NodeStack.size() != 1 && "No parent node");
3627 // Verify that all intermediate nodes between the root and this one have
3628 // a single use (ignoring chains, which are handled in UpdateChains).
3629 bool HasMultipleUses = false;
3630 for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i) {
3631 unsigned NNonChainUses = 0;
3632 SDNode *NS = NodeStack[i].getNode();
3633 for (auto UI = NS->use_begin(), UE = NS->use_end(); UI != UE; ++UI)
3634 if (UI.getUse().getValueType() != MVT::Other)
3635 if (++NNonChainUses > 1) {
3636 HasMultipleUses = true;
3637 break;
3638 }
3639 if (HasMultipleUses) break;
3640 }
3641 if (HasMultipleUses) break;
3642
3643 // Check to see that the target thinks this is profitable to fold and that
3644 // we can fold it without inducing cycles in the graph.
3645 if (!IsProfitableToFold(N, U: NodeStack[NodeStack.size()-2].getNode(),
3646 Root: NodeToMatch) ||
3647 !IsLegalToFold(N, U: NodeStack[NodeStack.size()-2].getNode(),
3648 Root: NodeToMatch, OptLevel,
3649 IgnoreChains: true/*We validate our own chains*/))
3650 break;
3651
3652 continue;
3653 }
3654 case OPC_EmitInteger:
3655 case OPC_EmitInteger8:
3656 case OPC_EmitInteger16:
3657 case OPC_EmitInteger32:
3658 case OPC_EmitInteger64:
3659 case OPC_EmitStringInteger:
3660 case OPC_EmitStringInteger32: {
3661 MVT::SimpleValueType VT;
3662 switch (Opcode) {
3663 case OPC_EmitInteger8:
3664 VT = MVT::i8;
3665 break;
3666 case OPC_EmitInteger16:
3667 VT = MVT::i16;
3668 break;
3669 case OPC_EmitInteger32:
3670 case OPC_EmitStringInteger32:
3671 VT = MVT::i32;
3672 break;
3673 case OPC_EmitInteger64:
3674 VT = MVT::i64;
3675 break;
3676 default:
3677 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3678 break;
3679 }
3680 int64_t Val = MatcherTable[MatcherIndex++];
3681 if (Val & 128)
3682 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
3683 if (Opcode >= OPC_EmitInteger && Opcode <= OPC_EmitInteger64)
3684 Val = decodeSignRotatedValue(V: Val);
3685 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode *>(
3686 CurDAG->getTargetConstant(Val, DL: SDLoc(NodeToMatch), VT), nullptr));
3687 continue;
3688 }
3689 case OPC_EmitRegister:
3690 case OPC_EmitRegisterI32:
3691 case OPC_EmitRegisterI64: {
3692 MVT::SimpleValueType VT;
3693 switch (Opcode) {
3694 case OPC_EmitRegisterI32:
3695 VT = MVT::i32;
3696 break;
3697 case OPC_EmitRegisterI64:
3698 VT = MVT::i64;
3699 break;
3700 default:
3701 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3702 break;
3703 }
3704 unsigned RegNo = MatcherTable[MatcherIndex++];
3705 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode *>(
3706 CurDAG->getRegister(Reg: RegNo, VT), nullptr));
3707 continue;
3708 }
3709 case OPC_EmitRegister2: {
3710 // For targets w/ more than 256 register names, the register enum
3711 // values are stored in two bytes in the matcher table (just like
3712 // opcodes).
3713 MVT::SimpleValueType VT =
3714 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3715 unsigned RegNo = MatcherTable[MatcherIndex++];
3716 RegNo |= MatcherTable[MatcherIndex++] << 8;
3717 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode*>(
3718 CurDAG->getRegister(Reg: RegNo, VT), nullptr));
3719 continue;
3720 }
3721
3722 case OPC_EmitConvertToTarget:
3723 case OPC_EmitConvertToTarget0:
3724 case OPC_EmitConvertToTarget1:
3725 case OPC_EmitConvertToTarget2:
3726 case OPC_EmitConvertToTarget3:
3727 case OPC_EmitConvertToTarget4:
3728 case OPC_EmitConvertToTarget5:
3729 case OPC_EmitConvertToTarget6:
3730 case OPC_EmitConvertToTarget7: {
3731 // Convert from IMM/FPIMM to target version.
3732 unsigned RecNo = Opcode == OPC_EmitConvertToTarget
3733 ? MatcherTable[MatcherIndex++]
3734 : Opcode - OPC_EmitConvertToTarget0;
3735 assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3736 SDValue Imm = RecordedNodes[RecNo].first;
3737
3738 if (Imm->getOpcode() == ISD::Constant) {
3739 const ConstantInt *Val=cast<ConstantSDNode>(Val&: Imm)->getConstantIntValue();
3740 Imm = CurDAG->getTargetConstant(Val: *Val, DL: SDLoc(NodeToMatch),
3741 VT: Imm.getValueType());
3742 } else if (Imm->getOpcode() == ISD::ConstantFP) {
3743 const ConstantFP *Val=cast<ConstantFPSDNode>(Val&: Imm)->getConstantFPValue();
3744 Imm = CurDAG->getTargetConstantFP(Val: *Val, DL: SDLoc(NodeToMatch),
3745 VT: Imm.getValueType());
3746 }
3747
3748 RecordedNodes.push_back(Elt: std::make_pair(x&: Imm, y&: RecordedNodes[RecNo].second));
3749 continue;
3750 }
3751
3752 case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
3753 case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
3754 case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
3755 // These are space-optimized forms of OPC_EmitMergeInputChains.
3756 assert(!InputChain.getNode() &&
3757 "EmitMergeInputChains should be the first chain producing node");
3758 assert(ChainNodesMatched.empty() &&
3759 "Should only have one EmitMergeInputChains per match");
3760
3761 // Read all of the chained nodes.
3762 unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3763 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3764 ChainNodesMatched.push_back(Elt: RecordedNodes[RecNo].first.getNode());
3765
3766 // If the chained node is not the root, we can't fold it if it has
3767 // multiple uses.
3768 // FIXME: What if other value results of the node have uses not matched
3769 // by this pattern?
3770 if (ChainNodesMatched.back() != NodeToMatch &&
3771 !RecordedNodes[RecNo].first.hasOneUse()) {
3772 ChainNodesMatched.clear();
3773 break;
3774 }
3775
3776 // Merge the input chains if they are not intra-pattern references.
3777 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3778
3779 if (!InputChain.getNode())
3780 break; // Failed to merge.
3781 continue;
3782 }
3783
3784 case OPC_EmitMergeInputChains: {
3785 assert(!InputChain.getNode() &&
3786 "EmitMergeInputChains should be the first chain producing node");
3787 // This node gets a list of nodes we matched in the input that have
3788 // chains. We want to token factor all of the input chains to these nodes
3789 // together. However, if any of the input chains is actually one of the
3790 // nodes matched in this pattern, then we have an intra-match reference.
3791 // Ignore these because the newly token factored chain should not refer to
3792 // the old nodes.
3793 unsigned NumChains = MatcherTable[MatcherIndex++];
3794 assert(NumChains != 0 && "Can't TF zero chains");
3795
3796 assert(ChainNodesMatched.empty() &&
3797 "Should only have one EmitMergeInputChains per match");
3798
3799 // Read all of the chained nodes.
3800 for (unsigned i = 0; i != NumChains; ++i) {
3801 unsigned RecNo = MatcherTable[MatcherIndex++];
3802 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3803 ChainNodesMatched.push_back(Elt: RecordedNodes[RecNo].first.getNode());
3804
3805 // If the chained node is not the root, we can't fold it if it has
3806 // multiple uses.
3807 // FIXME: What if other value results of the node have uses not matched
3808 // by this pattern?
3809 if (ChainNodesMatched.back() != NodeToMatch &&
3810 !RecordedNodes[RecNo].first.hasOneUse()) {
3811 ChainNodesMatched.clear();
3812 break;
3813 }
3814 }
3815
3816 // If the inner loop broke out, the match fails.
3817 if (ChainNodesMatched.empty())
3818 break;
3819
3820 // Merge the input chains if they are not intra-pattern references.
3821 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3822
3823 if (!InputChain.getNode())
3824 break; // Failed to merge.
3825
3826 continue;
3827 }
3828
3829 case OPC_EmitCopyToReg:
3830 case OPC_EmitCopyToReg0:
3831 case OPC_EmitCopyToReg1:
3832 case OPC_EmitCopyToReg2:
3833 case OPC_EmitCopyToReg3:
3834 case OPC_EmitCopyToReg4:
3835 case OPC_EmitCopyToReg5:
3836 case OPC_EmitCopyToReg6:
3837 case OPC_EmitCopyToReg7:
3838 case OPC_EmitCopyToRegTwoByte: {
3839 unsigned RecNo =
3840 Opcode >= OPC_EmitCopyToReg0 && Opcode <= OPC_EmitCopyToReg7
3841 ? Opcode - OPC_EmitCopyToReg0
3842 : MatcherTable[MatcherIndex++];
3843 assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3844 unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3845 if (Opcode == OPC_EmitCopyToRegTwoByte)
3846 DestPhysReg |= MatcherTable[MatcherIndex++] << 8;
3847
3848 if (!InputChain.getNode())
3849 InputChain = CurDAG->getEntryNode();
3850
3851 InputChain = CurDAG->getCopyToReg(Chain: InputChain, dl: SDLoc(NodeToMatch),
3852 Reg: DestPhysReg, N: RecordedNodes[RecNo].first,
3853 Glue: InputGlue);
3854
3855 InputGlue = InputChain.getValue(R: 1);
3856 continue;
3857 }
3858
3859 case OPC_EmitNodeXForm: {
3860 unsigned XFormNo = MatcherTable[MatcherIndex++];
3861 unsigned RecNo = MatcherTable[MatcherIndex++];
3862 assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3863 SDValue Res = RunSDNodeXForm(V: RecordedNodes[RecNo].first, XFormNo);
3864 RecordedNodes.push_back(Elt: std::pair<SDValue,SDNode*>(Res, nullptr));
3865 continue;
3866 }
3867 case OPC_Coverage: {
3868 // This is emitted right before MorphNode/EmitNode.
3869 // So it should be safe to assume that this node has been selected
3870 unsigned index = MatcherTable[MatcherIndex++];
3871 index |= (MatcherTable[MatcherIndex++] << 8);
3872 dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3873 dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3874 continue;
3875 }
3876
3877 case OPC_EmitNode:
3878 case OPC_EmitNode0:
3879 case OPC_EmitNode1:
3880 case OPC_EmitNode2:
3881 case OPC_EmitNode0None:
3882 case OPC_EmitNode1None:
3883 case OPC_EmitNode2None:
3884 case OPC_EmitNode0Chain:
3885 case OPC_EmitNode1Chain:
3886 case OPC_EmitNode2Chain:
3887 case OPC_MorphNodeTo:
3888 case OPC_MorphNodeTo0:
3889 case OPC_MorphNodeTo1:
3890 case OPC_MorphNodeTo2:
3891 case OPC_MorphNodeTo0None:
3892 case OPC_MorphNodeTo1None:
3893 case OPC_MorphNodeTo2None:
3894 case OPC_MorphNodeTo0Chain:
3895 case OPC_MorphNodeTo1Chain:
3896 case OPC_MorphNodeTo2Chain:
3897 case OPC_MorphNodeTo0GlueInput:
3898 case OPC_MorphNodeTo1GlueInput:
3899 case OPC_MorphNodeTo2GlueInput:
3900 case OPC_MorphNodeTo0GlueOutput:
3901 case OPC_MorphNodeTo1GlueOutput:
3902 case OPC_MorphNodeTo2GlueOutput: {
3903 uint16_t TargetOpc = MatcherTable[MatcherIndex++];
3904 TargetOpc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
3905 unsigned EmitNodeInfo;
3906 if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2Chain) {
3907 if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
3908 EmitNodeInfo = OPFL_Chain;
3909 else
3910 EmitNodeInfo = OPFL_None;
3911 } else if (Opcode >= OPC_MorphNodeTo0None &&
3912 Opcode <= OPC_MorphNodeTo2GlueOutput) {
3913 if (Opcode >= OPC_MorphNodeTo0Chain && Opcode <= OPC_MorphNodeTo2Chain)
3914 EmitNodeInfo = OPFL_Chain;
3915 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
3916 Opcode <= OPC_MorphNodeTo2GlueInput)
3917 EmitNodeInfo = OPFL_GlueInput;
3918 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
3919 Opcode <= OPC_MorphNodeTo2GlueOutput)
3920 EmitNodeInfo = OPFL_GlueOutput;
3921 else
3922 EmitNodeInfo = OPFL_None;
3923 } else
3924 EmitNodeInfo = MatcherTable[MatcherIndex++];
3925 // Get the result VT list.
3926 unsigned NumVTs;
3927 // If this is one of the compressed forms, get the number of VTs based
3928 // on the Opcode. Otherwise read the next byte from the table.
3929 if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
3930 NumVTs = Opcode - OPC_MorphNodeTo0;
3931 else if (Opcode >= OPC_MorphNodeTo0None && Opcode <= OPC_MorphNodeTo2None)
3932 NumVTs = Opcode - OPC_MorphNodeTo0None;
3933 else if (Opcode >= OPC_MorphNodeTo0Chain &&
3934 Opcode <= OPC_MorphNodeTo2Chain)
3935 NumVTs = Opcode - OPC_MorphNodeTo0Chain;
3936 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
3937 Opcode <= OPC_MorphNodeTo2GlueInput)
3938 NumVTs = Opcode - OPC_MorphNodeTo0GlueInput;
3939 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
3940 Opcode <= OPC_MorphNodeTo2GlueOutput)
3941 NumVTs = Opcode - OPC_MorphNodeTo0GlueOutput;
3942 else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
3943 NumVTs = Opcode - OPC_EmitNode0;
3944 else if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2None)
3945 NumVTs = Opcode - OPC_EmitNode0None;
3946 else if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
3947 NumVTs = Opcode - OPC_EmitNode0Chain;
3948 else
3949 NumVTs = MatcherTable[MatcherIndex++];
3950 SmallVector<EVT, 4> VTs;
3951 for (unsigned i = 0; i != NumVTs; ++i) {
3952 MVT::SimpleValueType VT =
3953 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3954 if (VT == MVT::iPTR)
3955 VT = TLI->getPointerTy(DL: CurDAG->getDataLayout()).SimpleTy;
3956 VTs.push_back(Elt: VT);
3957 }
3958
3959 if (EmitNodeInfo & OPFL_Chain)
3960 VTs.push_back(MVT::Other);
3961 if (EmitNodeInfo & OPFL_GlueOutput)
3962 VTs.push_back(MVT::Glue);
3963
3964 // This is hot code, so optimize the two most common cases of 1 and 2
3965 // results.
3966 SDVTList VTList;
3967 if (VTs.size() == 1)
3968 VTList = CurDAG->getVTList(VT: VTs[0]);
3969 else if (VTs.size() == 2)
3970 VTList = CurDAG->getVTList(VT1: VTs[0], VT2: VTs[1]);
3971 else
3972 VTList = CurDAG->getVTList(VTs);
3973
3974 // Get the operand list.
3975 unsigned NumOps = MatcherTable[MatcherIndex++];
3976 SmallVector<SDValue, 8> Ops;
3977 for (unsigned i = 0; i != NumOps; ++i) {
3978 unsigned RecNo = MatcherTable[MatcherIndex++];
3979 if (RecNo & 128)
3980 RecNo = GetVBR(Val: RecNo, MatcherTable, Idx&: MatcherIndex);
3981
3982 assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
3983 Ops.push_back(Elt: RecordedNodes[RecNo].first);
3984 }
3985
3986 // If there are variadic operands to add, handle them now.
3987 if (EmitNodeInfo & OPFL_VariadicInfo) {
3988 // Determine the start index to copy from.
3989 unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(Flags: EmitNodeInfo);
3990 FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
3991 assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
3992 "Invalid variadic node");
3993 // Copy all of the variadic operands, not including a potential glue
3994 // input.
3995 for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
3996 i != e; ++i) {
3997 SDValue V = NodeToMatch->getOperand(Num: i);
3998 if (V.getValueType() == MVT::Glue) break;
3999 Ops.push_back(Elt: V);
4000 }
4001 }
4002
4003 // If this has chain/glue inputs, add them.
4004 if (EmitNodeInfo & OPFL_Chain)
4005 Ops.push_back(Elt: InputChain);
4006 if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
4007 Ops.push_back(Elt: InputGlue);
4008
4009 // Check whether any matched node could raise an FP exception. Since all
4010 // such nodes must have a chain, it suffices to check ChainNodesMatched.
4011 // We need to perform this check before potentially modifying one of the
4012 // nodes via MorphNode.
4013 bool MayRaiseFPException =
4014 llvm::any_of(Range&: ChainNodesMatched, P: [this](SDNode *N) {
4015 return mayRaiseFPException(Node: N) && !N->getFlags().hasNoFPExcept();
4016 });
4017
4018 // Create the node.
4019 MachineSDNode *Res = nullptr;
4020 bool IsMorphNodeTo =
4021 Opcode == OPC_MorphNodeTo ||
4022 (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2GlueOutput);
4023 if (!IsMorphNodeTo) {
4024 // If this is a normal EmitNode command, just create the new node and
4025 // add the results to the RecordedNodes list.
4026 Res = CurDAG->getMachineNode(Opcode: TargetOpc, dl: SDLoc(NodeToMatch),
4027 VTs: VTList, Ops);
4028
4029 // Add all the non-glue/non-chain results to the RecordedNodes list.
4030 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
4031 if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
4032 RecordedNodes.push_back(Elt: std::pair<SDValue,SDNode*>(SDValue(Res, i),
4033 nullptr));
4034 }
4035 } else {
4036 assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
4037 "NodeToMatch was removed partway through selection");
4038 SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N,
4039 SDNode *E) {
4040 CurDAG->salvageDebugInfo(N&: *N);
4041 auto &Chain = ChainNodesMatched;
4042 assert((!E || !is_contained(Chain, N)) &&
4043 "Chain node replaced during MorphNode");
4044 llvm::erase(C&: Chain, V: N);
4045 });
4046 Res = cast<MachineSDNode>(Val: MorphNode(Node: NodeToMatch, TargetOpc, VTList,
4047 Ops, EmitNodeInfo));
4048 }
4049
4050 // Set the NoFPExcept flag when no original matched node could
4051 // raise an FP exception, but the new node potentially might.
4052 if (!MayRaiseFPException && mayRaiseFPException(Node: Res)) {
4053 SDNodeFlags Flags = Res->getFlags();
4054 Flags.setNoFPExcept(true);
4055 Res->setFlags(Flags);
4056 }
4057
4058 // If the node had chain/glue results, update our notion of the current
4059 // chain and glue.
4060 if (EmitNodeInfo & OPFL_GlueOutput) {
4061 InputGlue = SDValue(Res, VTs.size()-1);
4062 if (EmitNodeInfo & OPFL_Chain)
4063 InputChain = SDValue(Res, VTs.size()-2);
4064 } else if (EmitNodeInfo & OPFL_Chain)
4065 InputChain = SDValue(Res, VTs.size()-1);
4066
4067 // If the OPFL_MemRefs glue is set on this node, slap all of the
4068 // accumulated memrefs onto it.
4069 //
4070 // FIXME: This is vastly incorrect for patterns with multiple outputs
4071 // instructions that access memory and for ComplexPatterns that match
4072 // loads.
4073 if (EmitNodeInfo & OPFL_MemRefs) {
4074 // Only attach load or store memory operands if the generated
4075 // instruction may load or store.
4076 const MCInstrDesc &MCID = TII->get(Opcode: TargetOpc);
4077 bool mayLoad = MCID.mayLoad();
4078 bool mayStore = MCID.mayStore();
4079
4080 // We expect to have relatively few of these so just filter them into a
4081 // temporary buffer so that we can easily add them to the instruction.
4082 SmallVector<MachineMemOperand *, 4> FilteredMemRefs;
4083 for (MachineMemOperand *MMO : MatchedMemRefs) {
4084 if (MMO->isLoad()) {
4085 if (mayLoad)
4086 FilteredMemRefs.push_back(Elt: MMO);
4087 } else if (MMO->isStore()) {
4088 if (mayStore)
4089 FilteredMemRefs.push_back(Elt: MMO);
4090 } else {
4091 FilteredMemRefs.push_back(Elt: MMO);
4092 }
4093 }
4094
4095 CurDAG->setNodeMemRefs(N: Res, NewMemRefs: FilteredMemRefs);
4096 }
4097
4098 LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
4099 << " Dropping mem operands\n";
4100 dbgs() << " " << (IsMorphNodeTo ? "Morphed" : "Created")
4101 << " node: ";
4102 Res->dump(CurDAG););
4103
4104 // If this was a MorphNodeTo then we're completely done!
4105 if (IsMorphNodeTo) {
4106 // Update chain uses.
4107 UpdateChains(NodeToMatch: Res, InputChain, ChainNodesMatched, isMorphNodeTo: true);
4108 return;
4109 }
4110 continue;
4111 }
4112
4113 case OPC_CompleteMatch: {
4114 // The match has been completed, and any new nodes (if any) have been
4115 // created. Patch up references to the matched dag to use the newly
4116 // created nodes.
4117 unsigned NumResults = MatcherTable[MatcherIndex++];
4118
4119 for (unsigned i = 0; i != NumResults; ++i) {
4120 unsigned ResSlot = MatcherTable[MatcherIndex++];
4121 if (ResSlot & 128)
4122 ResSlot = GetVBR(Val: ResSlot, MatcherTable, Idx&: MatcherIndex);
4123
4124 assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
4125 SDValue Res = RecordedNodes[ResSlot].first;
4126
4127 assert(i < NodeToMatch->getNumValues() &&
4128 NodeToMatch->getValueType(i) != MVT::Other &&
4129 NodeToMatch->getValueType(i) != MVT::Glue &&
4130 "Invalid number of results to complete!");
4131 assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
4132 NodeToMatch->getValueType(i) == MVT::iPTR ||
4133 Res.getValueType() == MVT::iPTR ||
4134 NodeToMatch->getValueType(i).getSizeInBits() ==
4135 Res.getValueSizeInBits()) &&
4136 "invalid replacement");
4137 ReplaceUses(F: SDValue(NodeToMatch, i), T: Res);
4138 }
4139
4140 // Update chain uses.
4141 UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, isMorphNodeTo: false);
4142
4143 // If the root node defines glue, we need to update it to the glue result.
4144 // TODO: This never happens in our tests and I think it can be removed /
4145 // replaced with an assert, but if we do it this the way the change is
4146 // NFC.
4147 if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
4148 MVT::Glue &&
4149 InputGlue.getNode())
4150 ReplaceUses(F: SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1),
4151 T: InputGlue);
4152
4153 assert(NodeToMatch->use_empty() &&
4154 "Didn't replace all uses of the node?");
4155 CurDAG->RemoveDeadNode(N: NodeToMatch);
4156
4157 return;
4158 }
4159 }
4160
4161 // If the code reached this point, then the match failed. See if there is
4162 // another child to try in the current 'Scope', otherwise pop it until we
4163 // find a case to check.
4164 LLVM_DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex
4165 << "\n");
4166 ++NumDAGIselRetries;
4167 while (true) {
4168 if (MatchScopes.empty()) {
4169 CannotYetSelect(N: NodeToMatch);
4170 return;
4171 }
4172
4173 // Restore the interpreter state back to the point where the scope was
4174 // formed.
4175 MatchScope &LastScope = MatchScopes.back();
4176 RecordedNodes.resize(N: LastScope.NumRecordedNodes);
4177 NodeStack.clear();
4178 NodeStack.append(in_start: LastScope.NodeStack.begin(), in_end: LastScope.NodeStack.end());
4179 N = NodeStack.back();
4180
4181 if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
4182 MatchedMemRefs.resize(N: LastScope.NumMatchedMemRefs);
4183 MatcherIndex = LastScope.FailIndex;
4184
4185 LLVM_DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
4186
4187 InputChain = LastScope.InputChain;
4188 InputGlue = LastScope.InputGlue;
4189 if (!LastScope.HasChainNodesMatched)
4190 ChainNodesMatched.clear();
4191
4192 // Check to see what the offset is at the new MatcherIndex. If it is zero
4193 // we have reached the end of this scope, otherwise we have another child
4194 // in the current scope to try.
4195 unsigned NumToSkip = MatcherTable[MatcherIndex++];
4196 if (NumToSkip & 128)
4197 NumToSkip = GetVBR(Val: NumToSkip, MatcherTable, Idx&: MatcherIndex);
4198
4199 // If we have another child in this scope to match, update FailIndex and
4200 // try it.
4201 if (NumToSkip != 0) {
4202 LastScope.FailIndex = MatcherIndex+NumToSkip;
4203 break;
4204 }
4205
4206 // End of this scope, pop it and try the next child in the containing
4207 // scope.
4208 MatchScopes.pop_back();
4209 }
4210 }
4211}
4212
4213/// Return whether the node may raise an FP exception.
4214bool SelectionDAGISel::mayRaiseFPException(SDNode *N) const {
4215 // For machine opcodes, consult the MCID flag.
4216 if (N->isMachineOpcode()) {
4217 const MCInstrDesc &MCID = TII->get(Opcode: N->getMachineOpcode());
4218 return MCID.mayRaiseFPException();
4219 }
4220
4221 // For ISD opcodes, only StrictFP opcodes may raise an FP
4222 // exception.
4223 if (N->isTargetOpcode())
4224 return N->isTargetStrictFPOpcode();
4225 return N->isStrictFPOpcode();
4226}
4227
4228bool SelectionDAGISel::isOrEquivalentToAdd(const SDNode *N) const {
4229 assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
4230 auto *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
4231 if (!C)
4232 return false;
4233
4234 // Detect when "or" is used to add an offset to a stack object.
4235 if (auto *FN = dyn_cast<FrameIndexSDNode>(Val: N->getOperand(Num: 0))) {
4236 MachineFrameInfo &MFI = MF->getFrameInfo();
4237 Align A = MFI.getObjectAlign(ObjectIdx: FN->getIndex());
4238 int32_t Off = C->getSExtValue();
4239 // If the alleged offset fits in the zero bits guaranteed by
4240 // the alignment, then this or is really an add.
4241 return (Off >= 0) && (((A.value() - 1) & Off) == unsigned(Off));
4242 }
4243 return false;
4244}
4245
4246void SelectionDAGISel::CannotYetSelect(SDNode *N) {
4247 std::string msg;
4248 raw_string_ostream Msg(msg);
4249 Msg << "Cannot select: ";
4250
4251 if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
4252 N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
4253 N->getOpcode() != ISD::INTRINSIC_VOID) {
4254 N->printrFull(O&: Msg, G: CurDAG);
4255 Msg << "\nIn function: " << MF->getName();
4256 } else {
4257 bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
4258 unsigned iid = N->getConstantOperandVal(Num: HasInputChain);
4259 if (iid < Intrinsic::num_intrinsics)
4260 Msg << "intrinsic %" << Intrinsic::getBaseName(id: (Intrinsic::ID)iid);
4261 else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
4262 Msg << "target intrinsic %" << TII->getName(IID: iid);
4263 else
4264 Msg << "unknown intrinsic #" << iid;
4265 }
4266 report_fatal_error(reason: Twine(Msg.str()));
4267}
4268

source code of llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp