1 | //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the declaration of the MachineMemOperand class, which is a |
10 | // description of a memory reference. It is used to help track dependencies |
11 | // in the backend. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H |
16 | #define LLVM_CODEGEN_MACHINEMEMOPERAND_H |
17 | |
18 | #include "llvm/ADT/BitmaskEnum.h" |
19 | #include "llvm/ADT/PointerUnion.h" |
20 | #include "llvm/Analysis/MemoryLocation.h" |
21 | #include "llvm/CodeGen/PseudoSourceValue.h" |
22 | #include "llvm/CodeGenTypes/LowLevelType.h" |
23 | #include "llvm/IR/DerivedTypes.h" |
24 | #include "llvm/IR/LLVMContext.h" |
25 | #include "llvm/IR/Metadata.h" |
26 | #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*> |
27 | #include "llvm/Support/AtomicOrdering.h" |
28 | #include "llvm/Support/DataTypes.h" |
29 | |
30 | namespace llvm { |
31 | |
32 | class MDNode; |
33 | class raw_ostream; |
34 | class MachineFunction; |
35 | class ModuleSlotTracker; |
36 | class TargetInstrInfo; |
37 | |
38 | /// This class contains a discriminated union of information about pointers in |
39 | /// memory operands, relating them back to LLVM IR or to virtual locations (such |
40 | /// as frame indices) that are exposed during codegen. |
41 | struct MachinePointerInfo { |
42 | /// This is the IR pointer value for the access, or it is null if unknown. |
43 | PointerUnion<const Value *, const PseudoSourceValue *> V; |
44 | |
45 | /// Offset - This is an offset from the base Value*. |
46 | int64_t Offset; |
47 | |
48 | unsigned AddrSpace = 0; |
49 | |
50 | uint8_t StackID; |
51 | |
52 | explicit MachinePointerInfo(const Value *v, int64_t offset = 0, |
53 | uint8_t ID = 0) |
54 | : V(v), Offset(offset), StackID(ID) { |
55 | AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0; |
56 | } |
57 | |
58 | explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0, |
59 | uint8_t ID = 0) |
60 | : V(v), Offset(offset), StackID(ID) { |
61 | AddrSpace = v ? v->getAddressSpace() : 0; |
62 | } |
63 | |
64 | explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0) |
65 | : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace), |
66 | StackID(0) {} |
67 | |
68 | explicit MachinePointerInfo( |
69 | PointerUnion<const Value *, const PseudoSourceValue *> v, |
70 | int64_t offset = 0, |
71 | uint8_t ID = 0) |
72 | : V(v), Offset(offset), StackID(ID) { |
73 | if (V) { |
74 | if (const auto *ValPtr = dyn_cast_if_present<const Value *>(Val&: V)) |
75 | AddrSpace = ValPtr->getType()->getPointerAddressSpace(); |
76 | else |
77 | AddrSpace = cast<const PseudoSourceValue *>(Val&: V)->getAddressSpace(); |
78 | } |
79 | } |
80 | |
81 | MachinePointerInfo getWithOffset(int64_t O) const { |
82 | if (V.isNull()) |
83 | return MachinePointerInfo(AddrSpace, Offset + O); |
84 | if (isa<const Value *>(Val: V)) |
85 | return MachinePointerInfo(cast<const Value *>(Val: V), Offset + O, StackID); |
86 | return MachinePointerInfo(cast<const PseudoSourceValue *>(Val: V), Offset + O, |
87 | StackID); |
88 | } |
89 | |
90 | /// Return true if memory region [V, V+Offset+Size) is known to be |
91 | /// dereferenceable. |
92 | bool isDereferenceable(unsigned Size, LLVMContext &C, |
93 | const DataLayout &DL) const; |
94 | |
95 | /// Return the LLVM IR address space number that this pointer points into. |
96 | unsigned getAddrSpace() const; |
97 | |
98 | /// Return a MachinePointerInfo record that refers to the constant pool. |
99 | static MachinePointerInfo getConstantPool(MachineFunction &MF); |
100 | |
101 | /// Return a MachinePointerInfo record that refers to the specified |
102 | /// FrameIndex. |
103 | static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, |
104 | int64_t Offset = 0); |
105 | |
106 | /// Return a MachinePointerInfo record that refers to a jump table entry. |
107 | static MachinePointerInfo getJumpTable(MachineFunction &MF); |
108 | |
109 | /// Return a MachinePointerInfo record that refers to a GOT entry. |
110 | static MachinePointerInfo getGOT(MachineFunction &MF); |
111 | |
112 | /// Stack pointer relative access. |
113 | static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, |
114 | uint8_t ID = 0); |
115 | |
116 | /// Stack memory without other information. |
117 | static MachinePointerInfo getUnknownStack(MachineFunction &MF); |
118 | }; |
119 | |
120 | |
121 | //===----------------------------------------------------------------------===// |
122 | /// A description of a memory reference used in the backend. |
123 | /// Instead of holding a StoreInst or LoadInst, this class holds the address |
124 | /// Value of the reference along with a byte size and offset. This allows it |
125 | /// to describe lowered loads and stores. Also, the special PseudoSourceValue |
126 | /// objects can be used to represent loads and stores to memory locations |
127 | /// that aren't explicit in the regular LLVM IR. |
128 | /// |
129 | class MachineMemOperand { |
130 | public: |
131 | /// Flags values. These may be or'd together. |
132 | enum Flags : uint16_t { |
133 | // No flags set. |
134 | MONone = 0, |
135 | /// The memory access reads data. |
136 | MOLoad = 1u << 0, |
137 | /// The memory access writes data. |
138 | MOStore = 1u << 1, |
139 | /// The memory access is volatile. |
140 | MOVolatile = 1u << 2, |
141 | /// The memory access is non-temporal. |
142 | MONonTemporal = 1u << 3, |
143 | /// The memory access is dereferenceable (i.e., doesn't trap). |
144 | MODereferenceable = 1u << 4, |
145 | /// The memory access always returns the same value (or traps). |
146 | MOInvariant = 1u << 5, |
147 | |
148 | // Reserved for use by target-specific passes. |
149 | // Targets may override getSerializableMachineMemOperandTargetFlags() to |
150 | // enable MIR serialization/parsing of these flags. If more of these flags |
151 | // are added, the MIR printing/parsing code will need to be updated as well. |
152 | MOTargetFlag1 = 1u << 6, |
153 | MOTargetFlag2 = 1u << 7, |
154 | MOTargetFlag3 = 1u << 8, |
155 | |
156 | LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3) |
157 | }; |
158 | |
159 | private: |
160 | /// Atomic information for this memory operation. |
161 | struct MachineAtomicInfo { |
162 | /// Synchronization scope ID for this memory operation. |
163 | unsigned SSID : 8; // SyncScope::ID |
164 | /// Atomic ordering requirements for this memory operation. For cmpxchg |
165 | /// atomic operations, atomic ordering requirements when store occurs. |
166 | unsigned Ordering : 4; // enum AtomicOrdering |
167 | /// For cmpxchg atomic operations, atomic ordering requirements when store |
168 | /// does not occur. |
169 | unsigned FailureOrdering : 4; // enum AtomicOrdering |
170 | }; |
171 | |
172 | MachinePointerInfo PtrInfo; |
173 | |
174 | /// Track the memory type of the access. An access size which is unknown or |
175 | /// too large to be represented by LLT should use the invalid LLT. |
176 | LLT MemoryType; |
177 | |
178 | Flags FlagVals; |
179 | Align BaseAlign; |
180 | MachineAtomicInfo AtomicInfo; |
181 | AAMDNodes AAInfo; |
182 | const MDNode *Ranges; |
183 | |
184 | public: |
185 | /// Construct a MachineMemOperand object with the specified PtrInfo, flags, |
186 | /// size, and base alignment. For atomic operations the synchronization scope |
187 | /// and atomic ordering requirements must also be specified. For cmpxchg |
188 | /// atomic operations the atomic ordering requirements when store does not |
189 | /// occur must also be specified. |
190 | MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LocationSize TS, |
191 | Align a, const AAMDNodes &AAInfo = AAMDNodes(), |
192 | const MDNode *Ranges = nullptr, |
193 | SyncScope::ID SSID = SyncScope::System, |
194 | AtomicOrdering Ordering = AtomicOrdering::NotAtomic, |
195 | AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); |
196 | MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a, |
197 | const AAMDNodes &AAInfo = AAMDNodes(), |
198 | const MDNode *Ranges = nullptr, |
199 | SyncScope::ID SSID = SyncScope::System, |
200 | AtomicOrdering Ordering = AtomicOrdering::NotAtomic, |
201 | AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); |
202 | |
203 | const MachinePointerInfo &getPointerInfo() const { return PtrInfo; } |
204 | |
205 | /// Return the base address of the memory access. This may either be a normal |
206 | /// LLVM IR Value, or one of the special values used in CodeGen. |
207 | /// Special values are those obtained via |
208 | /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and |
209 | /// other PseudoSourceValue member functions which return objects which stand |
210 | /// for frame/stack pointer relative references and other special references |
211 | /// which are not representable in the high-level IR. |
212 | const Value *getValue() const { |
213 | return dyn_cast_if_present<const Value *>(Val: PtrInfo.V); |
214 | } |
215 | |
216 | const PseudoSourceValue *getPseudoValue() const { |
217 | return dyn_cast_if_present<const PseudoSourceValue *>(Val: PtrInfo.V); |
218 | } |
219 | |
220 | const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); } |
221 | |
222 | /// Return the raw flags of the source value, \see Flags. |
223 | Flags getFlags() const { return FlagVals; } |
224 | |
225 | /// Bitwise OR the current flags with the given flags. |
226 | void setFlags(Flags f) { FlagVals |= f; } |
227 | |
228 | /// For normal values, this is a byte offset added to the base address. |
229 | /// For PseudoSourceValue::FPRel values, this is the FrameIndex number. |
230 | int64_t getOffset() const { return PtrInfo.Offset; } |
231 | |
232 | unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); } |
233 | |
234 | /// Return the memory type of the memory reference. This should only be relied |
235 | /// on for GlobalISel G_* operation legalization. |
236 | LLT getMemoryType() const { return MemoryType; } |
237 | |
238 | /// Return the size in bytes of the memory reference. |
239 | LocationSize getSize() const { |
240 | return MemoryType.isValid() |
241 | ? LocationSize::precise(Value: MemoryType.getSizeInBytes()) |
242 | : LocationSize::beforeOrAfterPointer(); |
243 | } |
244 | |
245 | /// Return the size in bits of the memory reference. |
246 | LocationSize getSizeInBits() const { |
247 | return MemoryType.isValid() |
248 | ? LocationSize::precise(Value: MemoryType.getSizeInBits()) |
249 | : LocationSize::beforeOrAfterPointer(); |
250 | } |
251 | |
252 | LLT getType() const { |
253 | return MemoryType; |
254 | } |
255 | |
256 | /// Return the minimum known alignment in bytes of the actual memory |
257 | /// reference. |
258 | Align getAlign() const; |
259 | |
260 | /// Return the minimum known alignment in bytes of the base address, without |
261 | /// the offset. |
262 | Align getBaseAlign() const { return BaseAlign; } |
263 | |
264 | /// Return the AA tags for the memory reference. |
265 | AAMDNodes getAAInfo() const { return AAInfo; } |
266 | |
267 | /// Return the range tag for the memory reference. |
268 | const MDNode *getRanges() const { return Ranges; } |
269 | |
270 | /// Returns the synchronization scope ID for this memory operation. |
271 | SyncScope::ID getSyncScopeID() const { |
272 | return static_cast<SyncScope::ID>(AtomicInfo.SSID); |
273 | } |
274 | |
275 | /// Return the atomic ordering requirements for this memory operation. For |
276 | /// cmpxchg atomic operations, return the atomic ordering requirements when |
277 | /// store occurs. |
278 | AtomicOrdering getSuccessOrdering() const { |
279 | return static_cast<AtomicOrdering>(AtomicInfo.Ordering); |
280 | } |
281 | |
282 | /// For cmpxchg atomic operations, return the atomic ordering requirements |
283 | /// when store does not occur. |
284 | AtomicOrdering getFailureOrdering() const { |
285 | return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering); |
286 | } |
287 | |
288 | /// Return a single atomic ordering that is at least as strong as both the |
289 | /// success and failure orderings for an atomic operation. (For operations |
290 | /// other than cmpxchg, this is equivalent to getSuccessOrdering().) |
291 | AtomicOrdering getMergedOrdering() const { |
292 | return getMergedAtomicOrdering(AO: getSuccessOrdering(), Other: getFailureOrdering()); |
293 | } |
294 | |
295 | bool isLoad() const { return FlagVals & MOLoad; } |
296 | bool isStore() const { return FlagVals & MOStore; } |
297 | bool isVolatile() const { return FlagVals & MOVolatile; } |
298 | bool isNonTemporal() const { return FlagVals & MONonTemporal; } |
299 | bool isDereferenceable() const { return FlagVals & MODereferenceable; } |
300 | bool isInvariant() const { return FlagVals & MOInvariant; } |
301 | |
302 | /// Returns true if this operation has an atomic ordering requirement of |
303 | /// unordered or higher, false otherwise. |
304 | bool isAtomic() const { |
305 | return getSuccessOrdering() != AtomicOrdering::NotAtomic; |
306 | } |
307 | |
308 | /// Returns true if this memory operation doesn't have any ordering |
309 | /// constraints other than normal aliasing. Volatile and (ordered) atomic |
310 | /// memory operations can't be reordered. |
311 | bool isUnordered() const { |
312 | return (getSuccessOrdering() == AtomicOrdering::NotAtomic || |
313 | getSuccessOrdering() == AtomicOrdering::Unordered) && |
314 | !isVolatile(); |
315 | } |
316 | |
317 | /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a |
318 | /// greater alignment. This must only be used when the new alignment applies |
319 | /// to all users of this MachineMemOperand. |
320 | void refineAlignment(const MachineMemOperand *MMO); |
321 | |
322 | /// Change the SourceValue for this MachineMemOperand. This should only be |
323 | /// used when an object is being relocated and all references to it are being |
324 | /// updated. |
325 | void setValue(const Value *NewSV) { PtrInfo.V = NewSV; } |
326 | void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; } |
327 | void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; } |
328 | |
329 | /// Reset the tracked memory type. |
330 | void setType(LLT NewTy) { |
331 | MemoryType = NewTy; |
332 | } |
333 | |
334 | /// Support for operator<<. |
335 | /// @{ |
336 | void print(raw_ostream &OS, ModuleSlotTracker &MST, |
337 | SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context, |
338 | const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const; |
339 | /// @} |
340 | |
341 | friend bool operator==(const MachineMemOperand &LHS, |
342 | const MachineMemOperand &RHS) { |
343 | return LHS.getValue() == RHS.getValue() && |
344 | LHS.getPseudoValue() == RHS.getPseudoValue() && |
345 | LHS.getSize() == RHS.getSize() && |
346 | LHS.getOffset() == RHS.getOffset() && |
347 | LHS.getFlags() == RHS.getFlags() && |
348 | LHS.getAAInfo() == RHS.getAAInfo() && |
349 | LHS.getRanges() == RHS.getRanges() && |
350 | LHS.getAlign() == RHS.getAlign() && |
351 | LHS.getAddrSpace() == RHS.getAddrSpace(); |
352 | } |
353 | |
354 | friend bool operator!=(const MachineMemOperand &LHS, |
355 | const MachineMemOperand &RHS) { |
356 | return !(LHS == RHS); |
357 | } |
358 | }; |
359 | |
360 | } // End llvm namespace |
361 | |
362 | #endif |
363 | |