/external/llvm/lib/Target/AMDGPU/ |
D | SILoadStoreOptimizer.cpp | 65 static bool offsetsCanBeCombined(unsigned Offset0, 130 bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0, in offsetsCanBeCombined() argument 135 if (Offset0 == Offset1) in offsetsCanBeCombined() 139 if ((Offset0 % Size != 0) || (Offset1 % Size != 0)) in offsetsCanBeCombined() 142 unsigned EltOffset0 = Offset0 / Size; in offsetsCanBeCombined() 181 unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff; in findMatchingDSInst() local 185 if (offsetsCanBeCombined(Offset0, Offset1, EltSize)) in findMatchingDSInst() 205 unsigned Offset0 in mergeRead2Pair() local 210 unsigned NewOffset0 = Offset0 / EltSize; in mergeRead2Pair() 301 unsigned Offset0 in mergeWrite2Pair() local [all …]
|
D | AMDGPUInstrInfo.cpp | 51 int64_t Offset0, int64_t Offset1, in shouldScheduleLoadsNear() argument 53 assert(Offset1 > Offset0 && in shouldScheduleLoadsNear() 59 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); in shouldScheduleLoadsNear()
|
D | SIInstrInfo.cpp | 94 int64_t &Offset0, in areLoadsFromSameBasePtr() argument 127 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); in areLoadsFromSameBasePtr() 151 Offset0 = Load0Offset->getZExtValue(); in areLoadsFromSameBasePtr() 185 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); in areLoadsFromSameBasePtr() 231 uint8_t Offset0 = Offset0Imm->getImm(); in getMemOpBaseRegImmOfs() local 234 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { in getMemOpBaseRegImmOfs() 253 Offset = EltSize * Offset0; in getMemOpBaseRegImmOfs() 1344 int64_t Offset0, Offset1; in checkInstOffsetsDoNotOverlap() local 1346 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && in checkInstOffsetsDoNotOverlap() 1356 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { in checkInstOffsetsDoNotOverlap()
|
D | AMDGPUISelDAGToDAG.cpp | 90 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, 719 SDValue &Offset0, in SelectDS64Bit4ByteAligned() argument 732 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 758 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 775 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 783 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8); in SelectDS64Bit4ByteAligned()
|
D | SIInstrInfo.td | 542 def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>;
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | SILoadStoreOptimizer.cpp | 92 unsigned Offset0; member 250 if (CI.Offset0 == CI.Offset1) in offsetsCanBeCombined() 254 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0)) in offsetsCanBeCombined() 257 unsigned EltOffset0 = CI.Offset0 / CI.EltSize; in offsetsCanBeCombined() 275 CI.Offset0 = EltOffset0 / 64; in offsetsCanBeCombined() 283 CI.Offset0 = EltOffset0; in offsetsCanBeCombined() 290 CI.BaseOff = std::min(CI.Offset0, CI.Offset1); in offsetsCanBeCombined() 293 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64; in offsetsCanBeCombined() 300 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize; in offsetsCanBeCombined() 428 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm(); in findMatchingInst() [all …]
|
D | SIInstrInfo.cpp | 151 int64_t &Offset0, in areLoadsFromSameBasePtr() argument 184 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); in areLoadsFromSameBasePtr() 213 Offset0 = Load0Offset->getZExtValue(); in areLoadsFromSameBasePtr() 247 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); in areLoadsFromSameBasePtr() 293 uint8_t Offset0 = Offset0Imm->getImm(); in getMemOpBaseRegImmOfs() local 296 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { in getMemOpBaseRegImmOfs() 315 Offset = EltSize * Offset0; in getMemOpBaseRegImmOfs() 467 int64_t Offset0, int64_t Offset1, in shouldScheduleLoadsNear() argument 469 assert(Offset1 > Offset0 && in shouldScheduleLoadsNear() 475 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); in shouldScheduleLoadsNear() [all …]
|
D | AMDGPUISelDAGToDAG.cpp | 117 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, 905 SDValue &Offset0, in SelectDS64Bit4ByteAligned() argument 918 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 947 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 964 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 975 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8); in SelectDS64Bit4ByteAligned()
|
D | SIInstrInfo.h | 171 bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0,
|
D | SIInstrInfo.td | 735 def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>;
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Hexagon/ |
D | HexagonSubtarget.cpp | 279 int Offset0; in apply() local 281 unsigned Base0 = HII.getBaseAndOffset(L0, Offset0, Size0); in apply() 299 if (((Offset0 ^ Offset1) & 0x18) != 0) in apply()
|
/external/llvm/test/CodeGen/SPARC/ |
D | 64abi.ll | 504 ; HARD-DAG: std %f4, [%sp+[[Offset0:[0-9]+]]] 506 ; HARD-DAG: ldx [%sp+[[Offset0]]], %o2 519 ; HARD: st %f1, [%fp+[[Offset0:[0-9]+]]] 524 ; HARD: ld [%fp+[[Offset0]]], %f1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SPARC/ |
D | 64abi.ll | 502 ; HARD-DAG: std %f4, [%sp+[[Offset0:[0-9]+]]] 504 ; HARD-DAG: ldx [%sp+[[Offset0]]], %o2 517 ; HARD: st %f1, [%fp+[[Offset0:[0-9]+]]] 522 ; HARD: ld [%fp+[[Offset0]]], %f1
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | DAGCombiner.cpp | 9847 const APInt &Offset0 = CN->getAPIntValue(); in CombineToPreIndexedLoadStore() local 9857 APInt CNV = Offset0; in CombineToPreIndexedLoadStore()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/SelectionDAG/ |
D | DAGCombiner.cpp | 12396 const APInt &Offset0 = CN->getAPIntValue(); in CombineToPreIndexedLoadStore() local 12406 APInt CNV = Offset0; in CombineToPreIndexedLoadStore()
|