1//===- HexagonInstrInfo.h - Hexagon Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the Hexagon implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONINSTRINFO_H
14#define LLVM_LIB_TARGET_HEXAGON_HEXAGONINSTRINFO_H
15
16#include "MCTargetDesc/HexagonBaseInfo.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/CodeGen/MachineBasicBlock.h"
20#include "llvm/CodeGen/TargetInstrInfo.h"
21#include "llvm/CodeGen/ValueTypes.h"
22#include "llvm/CodeGenTypes/MachineValueType.h"
23#include <cstdint>
24#include <vector>
25
26#define GET_INSTRINFO_HEADER
27#include "HexagonGenInstrInfo.inc"
28
29namespace llvm {
30
31class HexagonSubtarget;
32class MachineBranchProbabilityInfo;
33class MachineFunction;
34class MachineInstr;
35class MachineOperand;
36class TargetRegisterInfo;
37
38class HexagonInstrInfo : public HexagonGenInstrInfo {
39 const HexagonSubtarget &Subtarget;
40
41 enum BundleAttribute {
42 memShufDisabledMask = 0x4
43 };
44
45 virtual void anchor();
46
47public:
48 explicit HexagonInstrInfo(HexagonSubtarget &ST);
49
50 /// TargetInstrInfo overrides.
51
52 /// If the specified machine instruction is a direct
53 /// load from a stack slot, return the virtual or physical register number of
54 /// the destination along with the FrameIndex of the loaded stack slot. If
55 /// not, return 0. This predicate must return 0 if the instruction has
56 /// any side effects other than loading from the stack slot.
57 Register isLoadFromStackSlot(const MachineInstr &MI,
58 int &FrameIndex) const override;
59
60 /// If the specified machine instruction is a direct
61 /// store to a stack slot, return the virtual or physical register number of
62 /// the source reg along with the FrameIndex of the loaded stack slot. If
63 /// not, return 0. This predicate must return 0 if the instruction has
64 /// any side effects other than storing to the stack slot.
65 Register isStoreToStackSlot(const MachineInstr &MI,
66 int &FrameIndex) const override;
67
68 /// Check if the instruction or the bundle of instructions has
69 /// load from stack slots. Return the frameindex and machine memory operand
70 /// if true.
71 bool hasLoadFromStackSlot(
72 const MachineInstr &MI,
73 SmallVectorImpl<const MachineMemOperand *> &Accesses) const override;
74
75 /// Check if the instruction or the bundle of instructions has
76 /// store to stack slots. Return the frameindex and machine memory operand
77 /// if true.
78 bool hasStoreToStackSlot(
79 const MachineInstr &MI,
80 SmallVectorImpl<const MachineMemOperand *> &Accesses) const override;
81
82 /// Analyze the branching code at the end of MBB, returning
83 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
84 /// implemented for a target). Upon success, this returns false and returns
85 /// with the following information in various cases:
86 ///
87 /// 1. If this block ends with no branches (it just falls through to its succ)
88 /// just return false, leaving TBB/FBB null.
89 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
90 /// the destination block.
91 /// 3. If this block ends with a conditional branch and it falls through to a
92 /// successor block, it sets TBB to be the branch destination block and a
93 /// list of operands that evaluate the condition. These operands can be
94 /// passed to other TargetInstrInfo methods to create new branches.
95 /// 4. If this block ends with a conditional branch followed by an
96 /// unconditional branch, it returns the 'true' destination in TBB, the
97 /// 'false' destination in FBB, and a list of operands that evaluate the
98 /// condition. These operands can be passed to other TargetInstrInfo
99 /// methods to create new branches.
100 ///
101 /// Note that removeBranch and insertBranch must be implemented to support
102 /// cases where this method returns success.
103 ///
104 /// If AllowModify is true, then this routine is allowed to modify the basic
105 /// block (e.g. delete instructions after the unconditional branch).
106 bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
107 MachineBasicBlock *&FBB,
108 SmallVectorImpl<MachineOperand> &Cond,
109 bool AllowModify) const override;
110
111 /// Remove the branching code at the end of the specific MBB.
112 /// This is only invoked in cases where analyzeBranch returns success. It
113 /// returns the number of instructions that were removed.
114 unsigned removeBranch(MachineBasicBlock &MBB,
115 int *BytesRemoved = nullptr) const override;
116
117 /// Insert branch code into the end of the specified MachineBasicBlock.
118 /// The operands to this method are the same as those
119 /// returned by analyzeBranch. This is only invoked in cases where
120 /// analyzeBranch returns success. It returns the number of instructions
121 /// inserted.
122 ///
123 /// It is also invoked by tail merging to add unconditional branches in
124 /// cases where analyzeBranch doesn't apply because there was no original
125 /// branch to analyze. At least this much must be implemented, else tail
126 /// merging needs to be disabled.
127 unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
128 MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
129 const DebugLoc &DL,
130 int *BytesAdded = nullptr) const override;
131
132 /// Analyze loop L, which must be a single-basic-block loop, and if the
133 /// conditions can be understood enough produce a PipelinerLoopInfo object.
134 std::unique_ptr<PipelinerLoopInfo>
135 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
136
137 /// Return true if it's profitable to predicate
138 /// instructions with accumulated instruction latency of "NumCycles"
139 /// of the specified basic block, where the probability of the instructions
140 /// being executed is given by Probability, and Confidence is a measure
141 /// of our confidence that it will be properly predicted.
142 bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
143 unsigned ExtraPredCycles,
144 BranchProbability Probability) const override;
145
146 /// Second variant of isProfitableToIfCvt. This one
147 /// checks for the case where two basic blocks from true and false path
148 /// of a if-then-else (diamond) are predicated on mutally exclusive
149 /// predicates, where the probability of the true path being taken is given
150 /// by Probability, and Confidence is a measure of our confidence that it
151 /// will be properly predicted.
152 bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
153 unsigned NumTCycles, unsigned ExtraTCycles,
154 MachineBasicBlock &FMBB,
155 unsigned NumFCycles, unsigned ExtraFCycles,
156 BranchProbability Probability) const override;
157
158 /// Return true if it's profitable for if-converter to duplicate instructions
159 /// of specified accumulated instruction latencies in the specified MBB to
160 /// enable if-conversion.
161 /// The probability of the instructions being executed is given by
162 /// Probability, and Confidence is a measure of our confidence that it
163 /// will be properly predicted.
164 bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
165 BranchProbability Probability) const override;
166
167 /// Emit instructions to copy a pair of physical registers.
168 ///
169 /// This function should support copies within any legal register class as
170 /// well as any cross-class copies created during instruction selection.
171 ///
172 /// The source and destination registers may overlap, which may require a
173 /// careful implementation when multiple copy instructions are required for
174 /// large registers. See for example the ARM target.
175 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
176 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
177 bool KillSrc) const override;
178
179 /// Store the specified register of the given register class to the specified
180 /// stack frame index. The store instruction is to be added to the given
181 /// machine basic block before the specified machine instruction. If isKill
182 /// is true, the register operand is the last use and must be marked kill.
183 void storeRegToStackSlot(MachineBasicBlock &MBB,
184 MachineBasicBlock::iterator MBBI, Register SrcReg,
185 bool isKill, int FrameIndex,
186 const TargetRegisterClass *RC,
187 const TargetRegisterInfo *TRI,
188 Register VReg) const override;
189
190 /// Load the specified register of the given register class from the specified
191 /// stack frame index. The load instruction is to be added to the given
192 /// machine basic block before the specified machine instruction.
193 void loadRegFromStackSlot(MachineBasicBlock &MBB,
194 MachineBasicBlock::iterator MBBI, Register DestReg,
195 int FrameIndex, const TargetRegisterClass *RC,
196 const TargetRegisterInfo *TRI,
197 Register VReg) const override;
198
199 /// This function is called for all pseudo instructions
200 /// that remain after register allocation. Many pseudo instructions are
201 /// created to help register allocation. This is the place to convert them
202 /// into real instructions. The target can edit MI in place, or it can insert
203 /// new instructions and erase MI. The function should return true if
204 /// anything was changed.
205 bool expandPostRAPseudo(MachineInstr &MI) const override;
206
207 /// Get the base register and byte offset of a load/store instr.
208 bool getMemOperandsWithOffsetWidth(
209 const MachineInstr &LdSt,
210 SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
211 bool &OffsetIsScalable, LocationSize &Width,
212 const TargetRegisterInfo *TRI) const override;
213
214 /// Reverses the branch condition of the specified condition list,
215 /// returning false on success and true if it cannot be reversed.
216 bool reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
217 const override;
218
219 /// Insert a noop into the instruction stream at the specified point.
220 void insertNoop(MachineBasicBlock &MBB,
221 MachineBasicBlock::iterator MI) const override;
222
223 /// Returns true if the instruction is already predicated.
224 bool isPredicated(const MachineInstr &MI) const override;
225
226 /// Return true for post-incremented instructions.
227 bool isPostIncrement(const MachineInstr &MI) const override;
228
229 /// Convert the instruction into a predicated instruction.
230 /// It returns true if the operation was successful.
231 bool PredicateInstruction(MachineInstr &MI,
232 ArrayRef<MachineOperand> Cond) const override;
233
234 /// Returns true if the first specified predicate
235 /// subsumes the second, e.g. GE subsumes GT.
236 bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
237 ArrayRef<MachineOperand> Pred2) const override;
238
239 /// If the specified instruction defines any predicate
240 /// or condition code register(s) used for predication, returns true as well
241 /// as the definition predicate(s) by reference.
242 bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred,
243 bool SkipDead) const override;
244
245 /// Return true if the specified instruction can be predicated.
246 /// By default, this returns true for every instruction with a
247 /// PredicateOperand.
248 bool isPredicable(const MachineInstr &MI) const override;
249
250 /// Test if the given instruction should be considered a scheduling boundary.
251 /// This primarily includes labels and terminators.
252 bool isSchedulingBoundary(const MachineInstr &MI,
253 const MachineBasicBlock *MBB,
254 const MachineFunction &MF) const override;
255
256 /// Measure the specified inline asm to determine an approximation of its
257 /// length.
258 unsigned getInlineAsmLength(
259 const char *Str,
260 const MCAsmInfo &MAI,
261 const TargetSubtargetInfo *STI = nullptr) const override;
262
263 /// Allocate and return a hazard recognizer to use for this target when
264 /// scheduling the machine instructions after register allocation.
265 ScheduleHazardRecognizer*
266 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
267 const ScheduleDAG *DAG) const override;
268
269 /// For a comparison instruction, return the source registers
270 /// in SrcReg and SrcReg2 if having two register operands, and the value it
271 /// compares against in CmpValue. Return true if the comparison instruction
272 /// can be analyzed.
273 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
274 Register &SrcReg2, int64_t &Mask,
275 int64_t &Value) const override;
276
277 /// Compute the instruction latency of a given instruction.
278 /// If the instruction has higher cost when predicated, it's returned via
279 /// PredCost.
280 unsigned getInstrLatency(const InstrItineraryData *ItinData,
281 const MachineInstr &MI,
282 unsigned *PredCost = nullptr) const override;
283
284 /// Create machine specific model for scheduling.
285 DFAPacketizer *
286 CreateTargetScheduleState(const TargetSubtargetInfo &STI) const override;
287
288 // Sometimes, it is possible for the target
289 // to tell, even without aliasing information, that two MIs access different
290 // memory addresses. This function returns true if two MIs access different
291 // memory addresses and false otherwise.
292 bool
293 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
294 const MachineInstr &MIb) const override;
295
296 /// For instructions with a base and offset, return the position of the
297 /// base register and offset operands.
298 bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos,
299 unsigned &OffsetPos) const override;
300
301 /// If the instruction is an increment of a constant value, return the amount.
302 bool getIncrementValue(const MachineInstr &MI, int &Value) const override;
303
304 /// getOperandLatency - Compute and return the use operand latency of a given
305 /// pair of def and use.
306 /// In most cases, the static scheduling itinerary was enough to determine the
307 /// operand latency. But it may not be possible for instructions with variable
308 /// number of defs / uses.
309 ///
310 /// This is a raw interface to the itinerary that may be directly overriden by
311 /// a target. Use computeOperandLatency to get the best estimate of latency.
312 std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData,
313 const MachineInstr &DefMI,
314 unsigned DefIdx,
315 const MachineInstr &UseMI,
316 unsigned UseIdx) const override;
317
318 /// Decompose the machine operand's target flags into two values - the direct
319 /// target flag value and any of bit flags that are applied.
320 std::pair<unsigned, unsigned>
321 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
322
323 /// Return an array that contains the direct target flag values and their
324 /// names.
325 ///
326 /// MIR Serialization is able to serialize only the target flags that are
327 /// defined by this method.
328 ArrayRef<std::pair<unsigned, const char *>>
329 getSerializableDirectMachineOperandTargetFlags() const override;
330
331 /// Return an array that contains the bitmask target flag values and their
332 /// names.
333 ///
334 /// MIR Serialization is able to serialize only the target flags that are
335 /// defined by this method.
336 ArrayRef<std::pair<unsigned, const char *>>
337 getSerializableBitmaskMachineOperandTargetFlags() const override;
338
339 bool isTailCall(const MachineInstr &MI) const override;
340 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
341
342 // Return true if the instruction should be sunk by MachineSink.
343 // MachineSink determines on its own whether the instruction is safe to sink;
344 // this gives the target a hook to override the default behavior with regards
345 // to which instructions should be sunk.
346 bool shouldSink(const MachineInstr &MI) const override;
347
348 /// HexagonInstrInfo specifics.
349
350 Register createVR(MachineFunction *MF, MVT VT) const;
351 MachineInstr *findLoopInstr(MachineBasicBlock *BB, unsigned EndLoopOp,
352 MachineBasicBlock *TargetBB,
353 SmallPtrSet<MachineBasicBlock *, 8> &Visited) const;
354
355 bool isAbsoluteSet(const MachineInstr &MI) const;
356 bool isAccumulator(const MachineInstr &MI) const;
357 bool isAddrModeWithOffset(const MachineInstr &MI) const;
358 bool isBaseImmOffset(const MachineInstr &MI) const;
359 bool isComplex(const MachineInstr &MI) const;
360 bool isCompoundBranchInstr(const MachineInstr &MI) const;
361 bool isConstExtended(const MachineInstr &MI) const;
362 bool isDeallocRet(const MachineInstr &MI) const;
363 bool isDependent(const MachineInstr &ProdMI,
364 const MachineInstr &ConsMI) const;
365 bool isDotCurInst(const MachineInstr &MI) const;
366 bool isDotNewInst(const MachineInstr &MI) const;
367 bool isDuplexPair(const MachineInstr &MIa, const MachineInstr &MIb) const;
368 bool isEndLoopN(unsigned Opcode) const;
369 bool isExpr(unsigned OpType) const;
370 bool isExtendable(const MachineInstr &MI) const;
371 bool isExtended(const MachineInstr &MI) const;
372 bool isFloat(const MachineInstr &MI) const;
373 bool isHVXMemWithAIndirect(const MachineInstr &I,
374 const MachineInstr &J) const;
375 bool isIndirectCall(const MachineInstr &MI) const;
376 bool isIndirectL4Return(const MachineInstr &MI) const;
377 bool isJumpR(const MachineInstr &MI) const;
378 bool isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const;
379 bool isLateSourceInstr(const MachineInstr &MI) const;
380 bool isLoopN(const MachineInstr &MI) const;
381 bool isMemOp(const MachineInstr &MI) const;
382 bool isNewValue(const MachineInstr &MI) const;
383 bool isNewValue(unsigned Opcode) const;
384 bool isNewValueInst(const MachineInstr &MI) const;
385 bool isNewValueJump(const MachineInstr &MI) const;
386 bool isNewValueJump(unsigned Opcode) const;
387 bool isNewValueStore(const MachineInstr &MI) const;
388 bool isNewValueStore(unsigned Opcode) const;
389 bool isOperandExtended(const MachineInstr &MI, unsigned OperandNum) const;
390 bool isPredicatedNew(const MachineInstr &MI) const;
391 bool isPredicatedNew(unsigned Opcode) const;
392 bool isPredicatedTrue(const MachineInstr &MI) const;
393 bool isPredicatedTrue(unsigned Opcode) const;
394 bool isPredicated(unsigned Opcode) const;
395 bool isPredicateLate(unsigned Opcode) const;
396 bool isPredictedTaken(unsigned Opcode) const;
397 bool isPureSlot0(const MachineInstr &MI) const;
398 bool isRestrictNoSlot1Store(const MachineInstr &MI) const;
399 bool isSaveCalleeSavedRegsCall(const MachineInstr &MI) const;
400 bool isSignExtendingLoad(const MachineInstr &MI) const;
401 bool isSolo(const MachineInstr &MI) const;
402 bool isSpillPredRegOp(const MachineInstr &MI) const;
403 bool isTC1(const MachineInstr &MI) const;
404 bool isTC2(const MachineInstr &MI) const;
405 bool isTC2Early(const MachineInstr &MI) const;
406 bool isTC4x(const MachineInstr &MI) const;
407 bool isToBeScheduledASAP(const MachineInstr &MI1,
408 const MachineInstr &MI2) const;
409 bool isHVXVec(const MachineInstr &MI) const;
410 bool isValidAutoIncImm(const EVT VT, const int Offset) const;
411 bool isValidOffset(unsigned Opcode, int Offset,
412 const TargetRegisterInfo *TRI, bool Extend = true) const;
413 bool isVecAcc(const MachineInstr &MI) const;
414 bool isVecALU(const MachineInstr &MI) const;
415 bool isVecUsableNextPacket(const MachineInstr &ProdMI,
416 const MachineInstr &ConsMI) const;
417 bool isZeroExtendingLoad(const MachineInstr &MI) const;
418
419 bool addLatencyToSchedule(const MachineInstr &MI1,
420 const MachineInstr &MI2) const;
421 bool canExecuteInBundle(const MachineInstr &First,
422 const MachineInstr &Second) const;
423 bool doesNotReturn(const MachineInstr &CallMI) const;
424 bool hasEHLabel(const MachineBasicBlock *B) const;
425 bool hasNonExtEquivalent(const MachineInstr &MI) const;
426 bool hasPseudoInstrPair(const MachineInstr &MI) const;
427 bool hasUncondBranch(const MachineBasicBlock *B) const;
428 bool mayBeCurLoad(const MachineInstr &MI) const;
429 bool mayBeNewStore(const MachineInstr &MI) const;
430 bool producesStall(const MachineInstr &ProdMI,
431 const MachineInstr &ConsMI) const;
432 bool producesStall(const MachineInstr &MI,
433 MachineBasicBlock::const_instr_iterator MII) const;
434 bool predCanBeUsedAsDotNew(const MachineInstr &MI, Register PredReg) const;
435 bool PredOpcodeHasJMP_c(unsigned Opcode) const;
436 bool predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const;
437
438 unsigned getAddrMode(const MachineInstr &MI) const;
439 MachineOperand *getBaseAndOffset(const MachineInstr &MI, int64_t &Offset,
440 LocationSize &AccessSize) const;
441 SmallVector<MachineInstr*,2> getBranchingInstrs(MachineBasicBlock& MBB) const;
442 unsigned getCExtOpNum(const MachineInstr &MI) const;
443 HexagonII::CompoundGroup
444 getCompoundCandidateGroup(const MachineInstr &MI) const;
445 unsigned getCompoundOpcode(const MachineInstr &GA,
446 const MachineInstr &GB) const;
447 int getDuplexOpcode(const MachineInstr &MI, bool ForBigCore = true) const;
448 int getCondOpcode(int Opc, bool sense) const;
449 int getDotCurOp(const MachineInstr &MI) const;
450 int getNonDotCurOp(const MachineInstr &MI) const;
451 int getDotNewOp(const MachineInstr &MI) const;
452 int getDotNewPredJumpOp(const MachineInstr &MI,
453 const MachineBranchProbabilityInfo *MBPI) const;
454 int getDotNewPredOp(const MachineInstr &MI,
455 const MachineBranchProbabilityInfo *MBPI) const;
456 int getDotOldOp(const MachineInstr &MI) const;
457 HexagonII::SubInstructionGroup getDuplexCandidateGroup(const MachineInstr &MI)
458 const;
459 short getEquivalentHWInstr(const MachineInstr &MI) const;
460 unsigned getInstrTimingClassLatency(const InstrItineraryData *ItinData,
461 const MachineInstr &MI) const;
462 bool getInvertedPredSense(SmallVectorImpl<MachineOperand> &Cond) const;
463 unsigned getInvertedPredicatedOpcode(const int Opc) const;
464 int getMaxValue(const MachineInstr &MI) const;
465 unsigned getMemAccessSize(const MachineInstr &MI) const;
466 int getMinValue(const MachineInstr &MI) const;
467 short getNonExtOpcode(const MachineInstr &MI) const;
468 bool getPredReg(ArrayRef<MachineOperand> Cond, Register &PredReg,
469 unsigned &PredRegPos, unsigned &PredRegFlags) const;
470 short getPseudoInstrPair(const MachineInstr &MI) const;
471 short getRegForm(const MachineInstr &MI) const;
472 unsigned getSize(const MachineInstr &MI) const;
473 uint64_t getType(const MachineInstr &MI) const;
474 InstrStage::FuncUnits getUnits(const MachineInstr &MI) const;
475
476 MachineBasicBlock::instr_iterator expandVGatherPseudo(MachineInstr &MI) const;
477
478 /// getInstrTimingClassLatency - Compute the instruction latency of a given
479 /// instruction using Timing Class information, if available.
480 unsigned nonDbgBBSize(const MachineBasicBlock *BB) const;
481 unsigned nonDbgBundleSize(MachineBasicBlock::const_iterator BundleHead) const;
482
483 void immediateExtend(MachineInstr &MI) const;
484 bool invertAndChangeJumpTarget(MachineInstr &MI,
485 MachineBasicBlock *NewTarget) const;
486 void genAllInsnTimingClasses(MachineFunction &MF) const;
487 bool reversePredSense(MachineInstr &MI) const;
488 unsigned reversePrediction(unsigned Opcode) const;
489 bool validateBranchCond(const ArrayRef<MachineOperand> &Cond) const;
490
491 void setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const;
492 bool getBundleNoShuf(const MachineInstr &MIB) const;
493
494 // When TinyCore with Duplexes is enabled, this function is used to translate
495 // tiny-instructions to big-instructions and vice versa to get the slot
496 // consumption.
497 void changeDuplexOpcode(MachineBasicBlock::instr_iterator MII,
498 bool ToBigInstrs) const;
499 void translateInstrsForDup(MachineFunction &MF,
500 bool ToBigInstrs = true) const;
501 void translateInstrsForDup(MachineBasicBlock::instr_iterator MII,
502 bool ToBigInstrs) const;
503
504 // Addressing mode relations.
505 short changeAddrMode_abs_io(short Opc) const;
506 short changeAddrMode_io_abs(short Opc) const;
507 short changeAddrMode_io_pi(short Opc) const;
508 short changeAddrMode_io_rr(short Opc) const;
509 short changeAddrMode_pi_io(short Opc) const;
510 short changeAddrMode_rr_io(short Opc) const;
511 short changeAddrMode_rr_ur(short Opc) const;
512 short changeAddrMode_ur_rr(short Opc) const;
513
514 short changeAddrMode_abs_io(const MachineInstr &MI) const {
515 return changeAddrMode_abs_io(Opc: MI.getOpcode());
516 }
517 short changeAddrMode_io_abs(const MachineInstr &MI) const {
518 return changeAddrMode_io_abs(Opc: MI.getOpcode());
519 }
520 short changeAddrMode_io_rr(const MachineInstr &MI) const {
521 return changeAddrMode_io_rr(Opc: MI.getOpcode());
522 }
523 short changeAddrMode_rr_io(const MachineInstr &MI) const {
524 return changeAddrMode_rr_io(Opc: MI.getOpcode());
525 }
526 short changeAddrMode_rr_ur(const MachineInstr &MI) const {
527 return changeAddrMode_rr_ur(Opc: MI.getOpcode());
528 }
529 short changeAddrMode_ur_rr(const MachineInstr &MI) const {
530 return changeAddrMode_ur_rr(Opc: MI.getOpcode());
531 }
532
533 MCInst getNop() const override;
534};
535
536} // end namespace llvm
537
538#endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONINSTRINFO_H
539

source code of llvm/lib/Target/Hexagon/HexagonInstrInfo.h