/external/llvm/lib/Target/Lanai/ |
D | LanaiSchedule.td | 56 def LdSt : ProcResource<1> { let BufferSize = 0; } 65 def : WriteRes<WriteLD, [LdSt]> { let Latency = 2; } 66 def : WriteRes<WriteST, [LdSt]> { let Latency = 2; } 67 def : WriteRes<WriteLDSW, [LdSt]> { let Latency = 2; } 68 def : WriteRes<WriteSTSW, [LdSt]> { let Latency = 4; }
|
D | LanaiInstrInfo.cpp | 751 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width, in getMemOpBaseRegImmOfsWidth() argument 755 if (LdSt.getNumOperands() != 4) in getMemOpBaseRegImmOfsWidth() 757 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm() || in getMemOpBaseRegImmOfsWidth() 758 !(LdSt.getOperand(3).isImm() && LdSt.getOperand(3).getImm() == LPAC::ADD)) in getMemOpBaseRegImmOfsWidth() 761 switch (LdSt.getOpcode()) { in getMemOpBaseRegImmOfsWidth() 782 BaseReg = LdSt.getOperand(1).getReg(); in getMemOpBaseRegImmOfsWidth() 783 Offset = LdSt.getOperand(2).getImm(); in getMemOpBaseRegImmOfsWidth() 788 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, in getMemOpBaseRegImmOfs() argument 790 switch (LdSt.getOpcode()) { in getMemOpBaseRegImmOfs() 803 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI); in getMemOpBaseRegImmOfs()
|
D | LanaiInstrInfo.h | 70 bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, 74 bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 205 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, in getMemOpBaseRegImmOfs() argument 208 unsigned Opc = LdSt.getOpcode(); in getMemOpBaseRegImmOfs() 210 if (isDS(LdSt)) { in getMemOpBaseRegImmOfs() 212 getNamedOperand(LdSt, AMDGPU::OpName::offset); in getMemOpBaseRegImmOfs() 216 getNamedOperand(LdSt, AMDGPU::OpName::addr); in getMemOpBaseRegImmOfs() 227 getNamedOperand(LdSt, AMDGPU::OpName::offset0); in getMemOpBaseRegImmOfs() 229 getNamedOperand(LdSt, AMDGPU::OpName::offset1); in getMemOpBaseRegImmOfs() 239 if (LdSt.mayLoad()) in getMemOpBaseRegImmOfs() 240 EltSize = getOpRegClass(LdSt, 0)->getSize() / 2; in getMemOpBaseRegImmOfs() 242 assert(LdSt.mayStore()); in getMemOpBaseRegImmOfs() [all …]
|
D | SIInstrInfo.h | 113 bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.h | 96 bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, 100 bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
|
D | AArch64InstrInfo.cpp | 1538 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, in getMemOpBaseRegImmOfs() argument 1540 switch (LdSt.getOpcode()) { in getMemOpBaseRegImmOfs() 1568 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI); in getMemOpBaseRegImmOfs() 1573 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width, in getMemOpBaseRegImmOfsWidth() argument 1575 assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); in getMemOpBaseRegImmOfsWidth() 1577 if (LdSt.getNumExplicitOperands() == 3) { in getMemOpBaseRegImmOfsWidth() 1579 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm()) in getMemOpBaseRegImmOfsWidth() 1581 } else if (LdSt.getNumExplicitOperands() == 4) { in getMemOpBaseRegImmOfsWidth() 1583 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() || in getMemOpBaseRegImmOfsWidth() 1584 !LdSt.getOperand(3).isImm()) in getMemOpBaseRegImmOfsWidth() [all …]
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonInstrInfo.h | 192 bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
D | HexagonInstrInfo.cpp | 2942 bool HexagonInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, in getMemOpBaseRegImmOfs() argument 2947 BaseReg = getBaseAndOffset(&LdSt, OffsetVal, AccessSize); in getMemOpBaseRegImmOfs()
|
/external/llvm/lib/Target/X86/ |
D | X86InstrInfo.h | 312 bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
/external/swiftshader/third_party/subzero/src/ |
D | IceTargetLoweringMIPS32.h | 865 OperandMIPS32Mem *formAddressingMode(Type Ty, Cfg *Func, const Inst *LdSt,
|
D | IceTargetLoweringARM32.h | 1293 OperandARM32Mem *formAddressingMode(Type Ty, Cfg *Func, const Inst *LdSt,
|
D | IceTargetLoweringMIPS32.cpp | 5363 const Inst *LdSt, in formAddressingMode() argument 5373 LdSt->dumpDecorated(Func); in formAddressingMode()
|
D | IceTargetLoweringARM32.cpp | 5763 const Inst *LdSt, in formAddressingMode() argument 5776 LdSt->dumpDecorated(Func); in formAddressingMode()
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
D | ARMISelDAGToDAG.cpp | 941 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op); in SelectAddrMode6Offset() local 942 ISD::MemIndexedMode AM = LdSt->getAddressingMode(); in SelectAddrMode6Offset() 947 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits()) in SelectAddrMode6Offset()
|
/external/llvm/lib/Target/ARM/ |
D | ARMISelDAGToDAG.cpp | 1133 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op); in SelectAddrMode6Offset() local 1134 ISD::MemIndexedMode AM = LdSt->getAddressingMode(); in SelectAddrMode6Offset() 1139 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits()) in SelectAddrMode6Offset()
|