1//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MachineMemOperand class, which is a
10// description of a memory reference. It is used to help track dependencies
11// in the backend.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
16#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
17
18#include "llvm/ADT/BitmaskEnum.h"
19#include "llvm/ADT/PointerUnion.h"
20#include "llvm/CodeGen/PseudoSourceValue.h"
21#include "llvm/CodeGenTypes/LowLevelType.h"
22#include "llvm/IR/DerivedTypes.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Metadata.h"
25#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
26#include "llvm/Support/AtomicOrdering.h"
27#include "llvm/Support/DataTypes.h"
28
29namespace llvm {
30
31class MDNode;
32class raw_ostream;
33class MachineFunction;
34class ModuleSlotTracker;
35class TargetInstrInfo;
36
37/// This class contains a discriminated union of information about pointers in
38/// memory operands, relating them back to LLVM IR or to virtual locations (such
39/// as frame indices) that are exposed during codegen.
40struct MachinePointerInfo {
41 /// This is the IR pointer value for the access, or it is null if unknown.
42 PointerUnion<const Value *, const PseudoSourceValue *> V;
43
44 /// Offset - This is an offset from the base Value*.
45 int64_t Offset;
46
47 unsigned AddrSpace = 0;
48
49 uint8_t StackID;
50
51 explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
52 uint8_t ID = 0)
53 : V(v), Offset(offset), StackID(ID) {
54 AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
55 }
56
57 explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
58 uint8_t ID = 0)
59 : V(v), Offset(offset), StackID(ID) {
60 AddrSpace = v ? v->getAddressSpace() : 0;
61 }
62
63 explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0)
64 : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace),
65 StackID(0) {}
66
67 explicit MachinePointerInfo(
68 PointerUnion<const Value *, const PseudoSourceValue *> v,
69 int64_t offset = 0,
70 uint8_t ID = 0)
71 : V(v), Offset(offset), StackID(ID) {
72 if (V) {
73 if (const auto *ValPtr = dyn_cast_if_present<const Value *>(Val&: V))
74 AddrSpace = ValPtr->getType()->getPointerAddressSpace();
75 else
76 AddrSpace = cast<const PseudoSourceValue *>(Val&: V)->getAddressSpace();
77 }
78 }
79
80 MachinePointerInfo getWithOffset(int64_t O) const {
81 if (V.isNull())
82 return MachinePointerInfo(AddrSpace, Offset + O);
83 if (isa<const Value *>(Val: V))
84 return MachinePointerInfo(cast<const Value *>(Val: V), Offset + O, StackID);
85 return MachinePointerInfo(cast<const PseudoSourceValue *>(Val: V), Offset + O,
86 StackID);
87 }
88
89 /// Return true if memory region [V, V+Offset+Size) is known to be
90 /// dereferenceable.
91 bool isDereferenceable(unsigned Size, LLVMContext &C,
92 const DataLayout &DL) const;
93
94 /// Return the LLVM IR address space number that this pointer points into.
95 unsigned getAddrSpace() const;
96
97 /// Return a MachinePointerInfo record that refers to the constant pool.
98 static MachinePointerInfo getConstantPool(MachineFunction &MF);
99
100 /// Return a MachinePointerInfo record that refers to the specified
101 /// FrameIndex.
102 static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
103 int64_t Offset = 0);
104
105 /// Return a MachinePointerInfo record that refers to a jump table entry.
106 static MachinePointerInfo getJumpTable(MachineFunction &MF);
107
108 /// Return a MachinePointerInfo record that refers to a GOT entry.
109 static MachinePointerInfo getGOT(MachineFunction &MF);
110
111 /// Stack pointer relative access.
112 static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
113 uint8_t ID = 0);
114
115 /// Stack memory without other information.
116 static MachinePointerInfo getUnknownStack(MachineFunction &MF);
117};
118
119
120//===----------------------------------------------------------------------===//
121/// A description of a memory reference used in the backend.
122/// Instead of holding a StoreInst or LoadInst, this class holds the address
123/// Value of the reference along with a byte size and offset. This allows it
124/// to describe lowered loads and stores. Also, the special PseudoSourceValue
125/// objects can be used to represent loads and stores to memory locations
126/// that aren't explicit in the regular LLVM IR.
127///
128class MachineMemOperand {
129public:
130 /// Flags values. These may be or'd together.
131 enum Flags : uint16_t {
132 // No flags set.
133 MONone = 0,
134 /// The memory access reads data.
135 MOLoad = 1u << 0,
136 /// The memory access writes data.
137 MOStore = 1u << 1,
138 /// The memory access is volatile.
139 MOVolatile = 1u << 2,
140 /// The memory access is non-temporal.
141 MONonTemporal = 1u << 3,
142 /// The memory access is dereferenceable (i.e., doesn't trap).
143 MODereferenceable = 1u << 4,
144 /// The memory access always returns the same value (or traps).
145 MOInvariant = 1u << 5,
146
147 // Reserved for use by target-specific passes.
148 // Targets may override getSerializableMachineMemOperandTargetFlags() to
149 // enable MIR serialization/parsing of these flags. If more of these flags
150 // are added, the MIR printing/parsing code will need to be updated as well.
151 MOTargetFlag1 = 1u << 6,
152 MOTargetFlag2 = 1u << 7,
153 MOTargetFlag3 = 1u << 8,
154
155 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
156 };
157
158private:
159 /// Atomic information for this memory operation.
160 struct MachineAtomicInfo {
161 /// Synchronization scope ID for this memory operation.
162 unsigned SSID : 8; // SyncScope::ID
163 /// Atomic ordering requirements for this memory operation. For cmpxchg
164 /// atomic operations, atomic ordering requirements when store occurs.
165 unsigned Ordering : 4; // enum AtomicOrdering
166 /// For cmpxchg atomic operations, atomic ordering requirements when store
167 /// does not occur.
168 unsigned FailureOrdering : 4; // enum AtomicOrdering
169 };
170
171 MachinePointerInfo PtrInfo;
172
173 /// Track the memory type of the access. An access size which is unknown or
174 /// too large to be represented by LLT should use the invalid LLT.
175 LLT MemoryType;
176
177 Flags FlagVals;
178 Align BaseAlign;
179 MachineAtomicInfo AtomicInfo;
180 AAMDNodes AAInfo;
181 const MDNode *Ranges;
182
183public:
184 /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
185 /// size, and base alignment. For atomic operations the synchronization scope
186 /// and atomic ordering requirements must also be specified. For cmpxchg
187 /// atomic operations the atomic ordering requirements when store does not
188 /// occur must also be specified.
189 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
190 Align a, const AAMDNodes &AAInfo = AAMDNodes(),
191 const MDNode *Ranges = nullptr,
192 SyncScope::ID SSID = SyncScope::System,
193 AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
194 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
195 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a,
196 const AAMDNodes &AAInfo = AAMDNodes(),
197 const MDNode *Ranges = nullptr,
198 SyncScope::ID SSID = SyncScope::System,
199 AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
200 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
201
202 const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
203
204 /// Return the base address of the memory access. This may either be a normal
205 /// LLVM IR Value, or one of the special values used in CodeGen.
206 /// Special values are those obtained via
207 /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
208 /// other PseudoSourceValue member functions which return objects which stand
209 /// for frame/stack pointer relative references and other special references
210 /// which are not representable in the high-level IR.
211 const Value *getValue() const {
212 return dyn_cast_if_present<const Value *>(Val: PtrInfo.V);
213 }
214
215 const PseudoSourceValue *getPseudoValue() const {
216 return dyn_cast_if_present<const PseudoSourceValue *>(Val: PtrInfo.V);
217 }
218
219 const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
220
221 /// Return the raw flags of the source value, \see Flags.
222 Flags getFlags() const { return FlagVals; }
223
224 /// Bitwise OR the current flags with the given flags.
225 void setFlags(Flags f) { FlagVals |= f; }
226
227 /// For normal values, this is a byte offset added to the base address.
228 /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
229 int64_t getOffset() const { return PtrInfo.Offset; }
230
231 unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
232
233 /// Return the memory type of the memory reference. This should only be relied
234 /// on for GlobalISel G_* operation legalization.
235 LLT getMemoryType() const { return MemoryType; }
236
237 /// Return the size in bytes of the memory reference.
238 uint64_t getSize() const {
239 return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0);
240 }
241
242 /// Return the size in bits of the memory reference.
243 uint64_t getSizeInBits() const {
244 return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0);
245 }
246
247 LLT getType() const {
248 return MemoryType;
249 }
250
251 /// Return the minimum known alignment in bytes of the actual memory
252 /// reference.
253 Align getAlign() const;
254
255 /// Return the minimum known alignment in bytes of the base address, without
256 /// the offset.
257 Align getBaseAlign() const { return BaseAlign; }
258
259 /// Return the AA tags for the memory reference.
260 AAMDNodes getAAInfo() const { return AAInfo; }
261
262 /// Return the range tag for the memory reference.
263 const MDNode *getRanges() const { return Ranges; }
264
265 /// Returns the synchronization scope ID for this memory operation.
266 SyncScope::ID getSyncScopeID() const {
267 return static_cast<SyncScope::ID>(AtomicInfo.SSID);
268 }
269
270 /// Return the atomic ordering requirements for this memory operation. For
271 /// cmpxchg atomic operations, return the atomic ordering requirements when
272 /// store occurs.
273 AtomicOrdering getSuccessOrdering() const {
274 return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
275 }
276
277 /// For cmpxchg atomic operations, return the atomic ordering requirements
278 /// when store does not occur.
279 AtomicOrdering getFailureOrdering() const {
280 return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
281 }
282
283 /// Return a single atomic ordering that is at least as strong as both the
284 /// success and failure orderings for an atomic operation. (For operations
285 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
286 AtomicOrdering getMergedOrdering() const {
287 return getMergedAtomicOrdering(AO: getSuccessOrdering(), Other: getFailureOrdering());
288 }
289
290 bool isLoad() const { return FlagVals & MOLoad; }
291 bool isStore() const { return FlagVals & MOStore; }
292 bool isVolatile() const { return FlagVals & MOVolatile; }
293 bool isNonTemporal() const { return FlagVals & MONonTemporal; }
294 bool isDereferenceable() const { return FlagVals & MODereferenceable; }
295 bool isInvariant() const { return FlagVals & MOInvariant; }
296
297 /// Returns true if this operation has an atomic ordering requirement of
298 /// unordered or higher, false otherwise.
299 bool isAtomic() const {
300 return getSuccessOrdering() != AtomicOrdering::NotAtomic;
301 }
302
303 /// Returns true if this memory operation doesn't have any ordering
304 /// constraints other than normal aliasing. Volatile and (ordered) atomic
305 /// memory operations can't be reordered.
306 bool isUnordered() const {
307 return (getSuccessOrdering() == AtomicOrdering::NotAtomic ||
308 getSuccessOrdering() == AtomicOrdering::Unordered) &&
309 !isVolatile();
310 }
311
312 /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
313 /// greater alignment. This must only be used when the new alignment applies
314 /// to all users of this MachineMemOperand.
315 void refineAlignment(const MachineMemOperand *MMO);
316
317 /// Change the SourceValue for this MachineMemOperand. This should only be
318 /// used when an object is being relocated and all references to it are being
319 /// updated.
320 void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
321 void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
322 void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
323
324 /// Reset the tracked memory type.
325 void setType(LLT NewTy) {
326 MemoryType = NewTy;
327 }
328
329 /// Support for operator<<.
330 /// @{
331 void print(raw_ostream &OS, ModuleSlotTracker &MST,
332 SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
333 const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
334 /// @}
335
336 friend bool operator==(const MachineMemOperand &LHS,
337 const MachineMemOperand &RHS) {
338 return LHS.getValue() == RHS.getValue() &&
339 LHS.getPseudoValue() == RHS.getPseudoValue() &&
340 LHS.getSize() == RHS.getSize() &&
341 LHS.getOffset() == RHS.getOffset() &&
342 LHS.getFlags() == RHS.getFlags() &&
343 LHS.getAAInfo() == RHS.getAAInfo() &&
344 LHS.getRanges() == RHS.getRanges() &&
345 LHS.getAlign() == RHS.getAlign() &&
346 LHS.getAddrSpace() == RHS.getAddrSpace();
347 }
348
349 friend bool operator!=(const MachineMemOperand &LHS,
350 const MachineMemOperand &RHS) {
351 return !(LHS == RHS);
352 }
353};
354
355} // End llvm namespace
356
357#endif
358

source code of llvm/include/llvm/CodeGen/MachineMemOperand.h