/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86LegalizerInfo.cpp | 76 for (unsigned MemOp : {G_LOAD, G_STORE}) in X86LegalizerInfo() 77 setLegalizeScalarToDifferentSizeStrategy(MemOp, 0, in X86LegalizerInfo() 131 for (unsigned MemOp : {G_LOAD, G_STORE}) { in setLegalizerInfo32bit() 133 setAction({MemOp, Ty}, Legal); in setLegalizerInfo32bit() 136 setAction({MemOp, 1, p0}, Legal); in setLegalizerInfo32bit() 222 for (unsigned MemOp : {G_LOAD, G_STORE}) in setLegalizerInfo64bit() 223 setAction({MemOp, s64}, Legal); in setLegalizerInfo64bit() 297 for (unsigned MemOp : {G_LOAD, G_STORE}) in setLegalizerInfoSSE1() 299 setAction({MemOp, Ty}, Legal); in setLegalizerInfoSSE1() 387 for (unsigned MemOp : {G_LOAD, G_STORE}) in setLegalizerInfoAVX() [all …]
|
D | X86InstrFoldTables.h | 93 const X86MemoryFoldTableEntry *lookupUnfoldTable(unsigned MemOp);
|
D | X86DomainReassignment.cpp | 567 int MemOp = X86II::getMemoryOperandNo(Desc.TSFlags); in buildClosure() local 568 if (MemOp != -1) in buildClosure() 569 MemOp += X86II::getOperandBias(Desc); in buildClosure() 571 if (OpIdx == MemOp) { in buildClosure()
|
D | X86InstrAVX512.td | 65 X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem"); 498 (ins To.RC:$src1, From.MemOp:$src2, u8imm:$src3), 791 (ins To.MemOp:$dst, From.RC:$src1, u8imm:$idx), 801 (ins To.MemOp:$dst, To.KRCWM:$mask, 1373 (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src", 1387 (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src", 1760 (ins _.RC:$src2, _.MemOp:$src3), 1878 (ins IdxVT.RC:$src2, _.MemOp:$src3), 1973 (ins _.RC:$src1, _.MemOp:$src2), 1979 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2), [all …]
|
D | X86InstructionSelector.cpp | 512 auto &MemOp = **I.memoperands_begin(); in selectLoadStoreOp() local 513 if (MemOp.isAtomic()) { in selectLoadStoreOp() 519 if (!MemOp.isUnordered()) { in selectLoadStoreOp() 523 if (MemOp.getAlignment() < Ty.getSizeInBits()/8) { in selectLoadStoreOp() 529 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment()); in selectLoadStoreOp()
|
D | X86InstrSSE.td | 4926 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp, 4933 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src), 4939 X86MemOperand MemOp, X86MemOperand MemYOp, 4941 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, 4944 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp, 4953 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp, 4956 MemOp, MemYOp, prd>; 4959 MemOp, MemYOp, prd>; 6689 X86MemOperand MemOp = i128mem> { 6698 (ins RC:$src1, MemOp:$src2), "", [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | MachineLICM.cpp | 419 for (const MachineMemOperand *MemOp : MI->memoperands()) { in InstructionStoresToFI() local 420 if (!MemOp->isStore() || !MemOp->getPseudoValue()) in InstructionStoresToFI() 423 dyn_cast<FixedStackPseudoSourceValue>(MemOp->getPseudoValue())) { in InstructionStoresToFI() 933 for (MachineMemOperand *MemOp : MI.memoperands()) in mayLoadFromGOTOrConstantPool() 934 if (const PseudoSourceValue *PSV = MemOp->getPseudoValue()) in mayLoadFromGOTOrConstantPool()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/XCore/ |
D | XCoreISelDAGToDAG.cpp | 153 MachineMemOperand *MemOp = in Select() local 156 CurDAG->setNodeMemRefs(cast<MachineSDNode>(node), {MemOp}); in Select()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/GlobalISel/ |
D | CombinerHelper.cpp | 1301 const MachineMemOperand *MemOp = *MMOIt; in tryCombineMemCpyFamily() local 1302 bool IsVolatile = MemOp->isVolatile(); in tryCombineMemCpyFamily() 1307 unsigned DstAlign = MemOp->getBaseAlignment(); in tryCombineMemCpyFamily() 1315 MemOp = *(++MMOIt); in tryCombineMemCpyFamily() 1316 SrcAlign = MemOp->getBaseAlignment(); in tryCombineMemCpyFamily()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMISelDAGToDAG.cpp | 1517 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); in transferMemOperands() local 1518 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp}); in transferMemOperands() 2124 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectVLD() local 2125 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLd), {MemOp}); in SelectVLD() 2162 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectVST() local 2251 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VSt), {MemOp}); in SelectVST() 2275 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStA), {MemOp}); in SelectVST() 2294 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStB), {MemOp}); in SelectVST() 2313 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectVLDSTLane() local 2404 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdLn), {MemOp}); in SelectVLDSTLane() [all …]
|
D | ARMInstructionSelector.cpp | 1082 const auto &MemOp = **I.memoperands_begin(); in select() local 1083 if (MemOp.isAtomic()) { in select()
|
D | ARMLoadStoreOptimizer.cpp | 2124 for (MachineInstr *MemOp : MemOps) in IsSafeAndProfitableToMove() 2125 if (I->mayAlias(AA, *MemOp, /*UseTBAA*/ false)) in IsSafeAndProfitableToMove()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonISelDAGToDAG.cpp | 130 MachineMemOperand *MemOp = LD->getMemOperand(); in SelectIndexedLoad() local 161 CurDAG->setNodeMemRefs(L, {MemOp}); in SelectIndexedLoad() 172 CurDAG->setNodeMemRefs(L, {MemOp}); in SelectIndexedLoad() 346 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(IntN)->getMemOperand(); in SelectBrevLdIntrinsic() local 347 CurDAG->setNodeMemRefs(Res, {MemOp}); in SelectBrevLdIntrinsic() 526 MachineMemOperand *MemOp = ST->getMemOperand(); in SelectIndexedStore() local 537 CurDAG->setNodeMemRefs(S, {MemOp}); in SelectIndexedStore() 544 CurDAG->setNodeMemRefs(S, {MemOp}); in SelectIndexedStore()
|
D | HexagonISelDAGToDAGHVX.cpp | 2156 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectV65GatherPred() local 2157 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp}); in SelectV65GatherPred() 2193 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectV65Gather() local 2194 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp}); in SelectV65Gather()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/SystemZ/AsmParser/ |
D | SystemZAsmParser.cpp | 114 struct MemOp { struct in __anonee5be2880111::SystemZOperand 138 MemOp Mem; 279 const MemOp& getMem() const { in getMem() 673 const MemOp &Op = getMem(); in print()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/AsmParser/ |
D | X86Operand.h | 59 struct MemOp { struct 77 struct MemOp Mem;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64ISelDAGToDAG.cpp | 1299 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectLoad() local 1300 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp}); in SelectLoad() 1351 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectStore() local 1352 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp}); in SelectStore() 1530 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectStoreLane() local 1531 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp}); in SelectStoreLane() 1564 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand(); in SelectPostStoreLane() local 1565 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp}); in SelectPostStoreLane() 2872 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); in SelectCMP_SWAP() local 2873 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); in SelectCMP_SWAP() [all …]
|
D | AArch64InstructionSelector.cpp | 1826 auto &MemOp = **I.memoperands_begin(); in select() local 1827 if (MemOp.isAtomic()) { in select() 1830 if (MemOp.getOrdering() == AtomicOrdering::Acquire && in select() 1831 MemOp.getSize() == 1) { in select() 1838 unsigned MemSizeInBits = MemOp.getSize() * 8; in select() 2184 const MachineMemOperand *MemOp = *LoadMI->memoperands_begin(); in select() local 2185 unsigned BytesLoaded = MemOp->getSize(); in select()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/MSP430/ |
D | MSP430ISelLowering.cpp | 861 SDValue MemOp; in LowerCCCCallTo() local 866 MemOp = DAG.getMemcpy(Chain, dl, PtrOff, Arg, SizeNode, in LowerCCCCallTo() 874 MemOp = DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); in LowerCCCCallTo() 877 MemOpChains.push_back(MemOp); in LowerCCCCallTo()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Lanai/AsmParser/ |
D | LanaiAsmParser.cpp | 128 struct MemOp { struct 139 struct MemOp Mem;
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/src/ |
D | IceInstX8632.cpp | 2046 if (auto *MemOp = llvm::dyn_cast<X86OperandMem>(this->getSrc(0))) { in deoptToAddOrNull() local 2048 MemOp->getBase()->getRegNum() == this->getDest()->getRegNum() && in deoptToAddOrNull() 2049 MemOp->getIndex() == nullptr && MemOp->getShift() == 0) { in deoptToAddOrNull() 2051 MemOp->getOffset()); in deoptToAddOrNull()
|
D | IceInstX8664.cpp | 2048 if (auto *MemOp = llvm::dyn_cast<X86OperandMem>(this->getSrc(0))) { in deoptToAddOrNull() local 2050 MemOp->getBase()->getRegNum() == this->getDest()->getRegNum() && in deoptToAddOrNull() 2051 MemOp->getIndex() == nullptr && MemOp->getShift() == 0) { in deoptToAddOrNull() 2053 MemOp->getOffset()); in deoptToAddOrNull()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Sparc/AsmParser/ |
D | SparcAsmParser.cpp | 239 struct MemOp { struct in __anon17170b7c0211::SparcOperand 249 struct MemOp Mem;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SILoadStoreOptimizer.cpp | 630 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp, in canMoveInstsAcrossMemOp() argument 633 assert(MemOp.mayLoadOrStore()); in canMoveInstsAcrossMemOp() 638 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA)) in canMoveInstsAcrossMemOp()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/MIRParser/ |
D | MIParser.cpp | 1001 MachineMemOperand *MemOp = nullptr; in parse() local 1002 if (parseMachineMemoryOperand(MemOp)) in parse() 1004 MemOperands.push_back(MemOp); in parse()
|