1 //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the declaration of the MachineMemOperand class, which is a 11 // description of a memory reference. It is used to help track dependencies 12 // in the backend. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H 17 #define LLVM_CODEGEN_MACHINEMEMOPERAND_H 18 19 #include "llvm/ADT/BitmaskEnum.h" 20 #include "llvm/ADT/PointerUnion.h" 21 #include "llvm/CodeGen/PseudoSourceValue.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/Metadata.h" 24 #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*> 25 #include "llvm/Support/AtomicOrdering.h" 26 #include "llvm/Support/DataTypes.h" 27 28 namespace llvm { 29 30 class FoldingSetNodeID; 31 class MDNode; 32 class raw_ostream; 33 class MachineFunction; 34 class ModuleSlotTracker; 35 36 /// This class contains a discriminated union of information about pointers in 37 /// memory operands, relating them back to LLVM IR or to virtual locations (such 38 /// as frame indices) that are exposed during codegen. 39 struct MachinePointerInfo { 40 /// This is the IR pointer value for the access, or it is null if unknown. 41 /// If this is null, then the access is to a pointer in the default address 42 /// space. 43 PointerUnion<const Value *, const PseudoSourceValue *> V; 44 45 /// Offset - This is an offset from the base Value*. 46 int64_t Offset; 47 48 uint8_t StackID; 49 50 unsigned AddrSpace = 0; 51 52 explicit MachinePointerInfo(const Value *v, int64_t offset = 0, 53 uint8_t ID = 0) VMachinePointerInfo54 : V(v), Offset(offset), StackID(ID) { 55 AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0; 56 } 57 58 explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0, 59 uint8_t ID = 0) VMachinePointerInfo60 : V(v), Offset(offset), StackID(ID) { 61 AddrSpace = v ? v->getAddressSpace() : 0; 62 } 63 64 explicit MachinePointerInfo(unsigned AddressSpace = 0) 65 : V((const Value *)nullptr), Offset(0), StackID(0), 66 AddrSpace(AddressSpace) {} 67 68 explicit MachinePointerInfo( 69 PointerUnion<const Value *, const PseudoSourceValue *> v, 70 int64_t offset = 0, 71 uint8_t ID = 0) VMachinePointerInfo72 : V(v), Offset(offset), StackID(ID) { 73 if (V) { 74 if (const auto *ValPtr = V.dyn_cast<const Value*>()) 75 AddrSpace = ValPtr->getType()->getPointerAddressSpace(); 76 else 77 AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace(); 78 } 79 } 80 getWithOffsetMachinePointerInfo81 MachinePointerInfo getWithOffset(int64_t O) const { 82 if (V.isNull()) 83 return MachinePointerInfo(AddrSpace); 84 if (V.is<const Value*>()) 85 return MachinePointerInfo(V.get<const Value*>(), Offset+O, StackID); 86 return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O, 87 StackID); 88 } 89 90 /// Return true if memory region [V, V+Offset+Size) is known to be 91 /// dereferenceable. 92 bool isDereferenceable(unsigned Size, LLVMContext &C, 93 const DataLayout &DL) const; 94 95 /// Return the LLVM IR address space number that this pointer points into. 96 unsigned getAddrSpace() const; 97 98 /// Return a MachinePointerInfo record that refers to the constant pool. 99 static MachinePointerInfo getConstantPool(MachineFunction &MF); 100 101 /// Return a MachinePointerInfo record that refers to the specified 102 /// FrameIndex. 103 static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, 104 int64_t Offset = 0); 105 106 /// Return a MachinePointerInfo record that refers to a jump table entry. 107 static MachinePointerInfo getJumpTable(MachineFunction &MF); 108 109 /// Return a MachinePointerInfo record that refers to a GOT entry. 110 static MachinePointerInfo getGOT(MachineFunction &MF); 111 112 /// Stack pointer relative access. 113 static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, 114 uint8_t ID = 0); 115 116 /// Stack memory without other information. 117 static MachinePointerInfo getUnknownStack(MachineFunction &MF); 118 }; 119 120 121 //===----------------------------------------------------------------------===// 122 /// A description of a memory reference used in the backend. 123 /// Instead of holding a StoreInst or LoadInst, this class holds the address 124 /// Value of the reference along with a byte size and offset. This allows it 125 /// to describe lowered loads and stores. Also, the special PseudoSourceValue 126 /// objects can be used to represent loads and stores to memory locations 127 /// that aren't explicit in the regular LLVM IR. 128 /// 129 class MachineMemOperand { 130 public: 131 /// Flags values. These may be or'd together. 132 enum Flags : uint16_t { 133 // No flags set. 134 MONone = 0, 135 /// The memory access reads data. 136 MOLoad = 1u << 0, 137 /// The memory access writes data. 138 MOStore = 1u << 1, 139 /// The memory access is volatile. 140 MOVolatile = 1u << 2, 141 /// The memory access is non-temporal. 142 MONonTemporal = 1u << 3, 143 /// The memory access is dereferenceable (i.e., doesn't trap). 144 MODereferenceable = 1u << 4, 145 /// The memory access always returns the same value (or traps). 146 MOInvariant = 1u << 5, 147 148 // Reserved for use by target-specific passes. 149 // Targets may override getSerializableMachineMemOperandTargetFlags() to 150 // enable MIR serialization/parsing of these flags. If more of these flags 151 // are added, the MIR printing/parsing code will need to be updated as well. 152 MOTargetFlag1 = 1u << 6, 153 MOTargetFlag2 = 1u << 7, 154 MOTargetFlag3 = 1u << 8, 155 156 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3) 157 }; 158 159 private: 160 /// Atomic information for this memory operation. 161 struct MachineAtomicInfo { 162 /// Synchronization scope ID for this memory operation. 163 unsigned SSID : 8; // SyncScope::ID 164 /// Atomic ordering requirements for this memory operation. For cmpxchg 165 /// atomic operations, atomic ordering requirements when store occurs. 166 unsigned Ordering : 4; // enum AtomicOrdering 167 /// For cmpxchg atomic operations, atomic ordering requirements when store 168 /// does not occur. 169 unsigned FailureOrdering : 4; // enum AtomicOrdering 170 }; 171 172 MachinePointerInfo PtrInfo; 173 uint64_t Size; 174 Flags FlagVals; 175 uint16_t BaseAlignLog2; // log_2(base_alignment) + 1 176 MachineAtomicInfo AtomicInfo; 177 AAMDNodes AAInfo; 178 const MDNode *Ranges; 179 180 public: 181 /// Construct a MachineMemOperand object with the specified PtrInfo, flags, 182 /// size, and base alignment. For atomic operations the synchronization scope 183 /// and atomic ordering requirements must also be specified. For cmpxchg 184 /// atomic operations the atomic ordering requirements when store does not 185 /// occur must also be specified. 186 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s, 187 uint64_t a, 188 const AAMDNodes &AAInfo = AAMDNodes(), 189 const MDNode *Ranges = nullptr, 190 SyncScope::ID SSID = SyncScope::System, 191 AtomicOrdering Ordering = AtomicOrdering::NotAtomic, 192 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); 193 getPointerInfo()194 const MachinePointerInfo &getPointerInfo() const { return PtrInfo; } 195 196 /// Return the base address of the memory access. This may either be a normal 197 /// LLVM IR Value, or one of the special values used in CodeGen. 198 /// Special values are those obtained via 199 /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and 200 /// other PseudoSourceValue member functions which return objects which stand 201 /// for frame/stack pointer relative references and other special references 202 /// which are not representable in the high-level IR. getValue()203 const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); } 204 getPseudoValue()205 const PseudoSourceValue *getPseudoValue() const { 206 return PtrInfo.V.dyn_cast<const PseudoSourceValue*>(); 207 } 208 getOpaqueValue()209 const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); } 210 211 /// Return the raw flags of the source value, \see Flags. getFlags()212 Flags getFlags() const { return FlagVals; } 213 214 /// Bitwise OR the current flags with the given flags. setFlags(Flags f)215 void setFlags(Flags f) { FlagVals |= f; } 216 217 /// For normal values, this is a byte offset added to the base address. 218 /// For PseudoSourceValue::FPRel values, this is the FrameIndex number. getOffset()219 int64_t getOffset() const { return PtrInfo.Offset; } 220 getAddrSpace()221 unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); } 222 223 /// Return the size in bytes of the memory reference. getSize()224 uint64_t getSize() const { return Size; } 225 226 /// Return the minimum known alignment in bytes of the actual memory 227 /// reference. 228 uint64_t getAlignment() const; 229 230 /// Return the minimum known alignment in bytes of the base address, without 231 /// the offset. getBaseAlignment()232 uint64_t getBaseAlignment() const { return (1u << BaseAlignLog2) >> 1; } 233 234 /// Return the AA tags for the memory reference. getAAInfo()235 AAMDNodes getAAInfo() const { return AAInfo; } 236 237 /// Return the range tag for the memory reference. getRanges()238 const MDNode *getRanges() const { return Ranges; } 239 240 /// Returns the synchronization scope ID for this memory operation. getSyncScopeID()241 SyncScope::ID getSyncScopeID() const { 242 return static_cast<SyncScope::ID>(AtomicInfo.SSID); 243 } 244 245 /// Return the atomic ordering requirements for this memory operation. For 246 /// cmpxchg atomic operations, return the atomic ordering requirements when 247 /// store occurs. getOrdering()248 AtomicOrdering getOrdering() const { 249 return static_cast<AtomicOrdering>(AtomicInfo.Ordering); 250 } 251 252 /// For cmpxchg atomic operations, return the atomic ordering requirements 253 /// when store does not occur. getFailureOrdering()254 AtomicOrdering getFailureOrdering() const { 255 return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering); 256 } 257 isLoad()258 bool isLoad() const { return FlagVals & MOLoad; } isStore()259 bool isStore() const { return FlagVals & MOStore; } isVolatile()260 bool isVolatile() const { return FlagVals & MOVolatile; } isNonTemporal()261 bool isNonTemporal() const { return FlagVals & MONonTemporal; } isDereferenceable()262 bool isDereferenceable() const { return FlagVals & MODereferenceable; } isInvariant()263 bool isInvariant() const { return FlagVals & MOInvariant; } 264 265 /// Returns true if this operation has an atomic ordering requirement of 266 /// unordered or higher, false otherwise. isAtomic()267 bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic; } 268 269 /// Returns true if this memory operation doesn't have any ordering 270 /// constraints other than normal aliasing. Volatile and atomic memory 271 /// operations can't be reordered. 272 /// 273 /// Currently, we don't model the difference between volatile and atomic 274 /// operations. They should retain their ordering relative to all memory 275 /// operations. isUnordered()276 bool isUnordered() const { return !isVolatile(); } 277 278 /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a 279 /// greater alignment. This must only be used when the new alignment applies 280 /// to all users of this MachineMemOperand. 281 void refineAlignment(const MachineMemOperand *MMO); 282 283 /// Change the SourceValue for this MachineMemOperand. This should only be 284 /// used when an object is being relocated and all references to it are being 285 /// updated. setValue(const Value * NewSV)286 void setValue(const Value *NewSV) { PtrInfo.V = NewSV; } setValue(const PseudoSourceValue * NewSV)287 void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; } setOffset(int64_t NewOffset)288 void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; } 289 290 /// Profile - Gather unique data for the object. 291 /// 292 void Profile(FoldingSetNodeID &ID) const; 293 294 /// Support for operator<<. 295 /// @{ 296 void print(raw_ostream &OS) const; 297 void print(raw_ostream &OS, ModuleSlotTracker &MST) const; 298 void print(raw_ostream &OS, ModuleSlotTracker &MST, 299 SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context, 300 const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const; 301 /// @} 302 303 friend bool operator==(const MachineMemOperand &LHS, 304 const MachineMemOperand &RHS) { 305 return LHS.getValue() == RHS.getValue() && 306 LHS.getPseudoValue() == RHS.getPseudoValue() && 307 LHS.getSize() == RHS.getSize() && 308 LHS.getOffset() == RHS.getOffset() && 309 LHS.getFlags() == RHS.getFlags() && 310 LHS.getAAInfo() == RHS.getAAInfo() && 311 LHS.getRanges() == RHS.getRanges() && 312 LHS.getAlignment() == RHS.getAlignment() && 313 LHS.getAddrSpace() == RHS.getAddrSpace(); 314 } 315 316 friend bool operator!=(const MachineMemOperand &LHS, 317 const MachineMemOperand &RHS) { 318 return !(LHS == RHS); 319 } 320 }; 321 322 inline raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MRO) { 323 MRO.print(OS); 324 return OS; 325 } 326 327 } // End llvm namespace 328 329 #endif 330