/third_party/json/tests/thirdparty/Fuzzer/test/ |
D | CustomCrossOverTest.cpp | 49 size_t Offset1 = 0; in LLVMFuzzerCustomCrossOver() local 50 size_t Len1 = R() % (Size1 - Offset1); in LLVMFuzzerCustomCrossOver() 58 memcpy(Out, Data1 + Offset1, Len1); in LLVMFuzzerCustomCrossOver()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
D | ScheduleDAGSDNodes.cpp | 240 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local 241 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || in ClusterNeighboringLoads() 242 Offset1 == Offset2 || in ClusterNeighboringLoads() 248 if (O2SMap.insert(std::make_pair(Offset1, Base)).second) in ClusterNeighboringLoads() 249 Offsets.push_back(Offset1); in ClusterNeighboringLoads() 252 if (Offset2 < Offset1) in ClusterNeighboringLoads()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonSubtarget.cpp | 290 int64_t Offset1; in apply() local 292 MachineOperand *BaseOp1 = HII.getBaseAndOffset(L1, Offset1, Size1); in apply() 298 if (((Offset0 ^ Offset1) & 0x18) != 0) in apply()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrInfo.h | 381 bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, 392 bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1,
|
D | X86InstrInfo.cpp | 5747 int64_t &Offset1, int64_t &Offset2) const { in areLoadsFromSameBasePtr() argument 5939 Offset1 = Disp1->getSExtValue(); in areLoadsFromSameBasePtr() 5945 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 5947 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 5948 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Mips/ |
D | MicroMipsSizeReduction.cpp | 400 int64_t Offset1, Offset2; in ConsecutiveInstr() local 401 if (!GetImm(MI1, 2, Offset1)) in ConsecutiveInstr() 409 return ((Offset1 == (Offset2 - 4)) && (ConsecutiveRegisters(Reg1, Reg2))); in ConsecutiveInstr()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/AArch64/ |
D | AArch64GenCallingConv.inc | 689 unsigned Offset1 = State.AllocateStack(4, 4); 690 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset1, LocVT, LocInfo)); 778 unsigned Offset1 = State.AllocateStack(8, 16); 779 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset1, LocVT, LocInfo)); 930 unsigned Offset1 = State.AllocateStack(4, 4); 931 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset1, LocVT, LocInfo));
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMBaseInstrInfo.h | 245 bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, 257 int64_t Offset1, int64_t Offset2,
|
D | ARMBaseInstrInfo.cpp | 1836 int64_t &Offset1, in areLoadsFromSameBasePtr() argument 1897 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); in areLoadsFromSameBasePtr() 1917 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 1922 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 1924 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
/third_party/node/deps/v8/src/utils/ |
D | utils.h | 233 #define STATIC_ASSERT_FIELD_OFFSETS_EQUAL(Offset1, Offset2) \ argument 234 STATIC_ASSERT(static_cast<int>(Offset1) == Offset2)
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | TargetInstrInfo.h | 1219 int64_t &Offset1, in areLoadsFromSameBasePtr() argument 1233 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
D | SeparateConstOffsetFromGEP.cpp | 1294 Value *Offset1 = First->getOperand(1); in swapGEPOperand() local 1297 Second->setOperand(1, Offset1); in swapGEPOperand()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 148 int64_t &Offset1) const { in areLoadsFromSameBasePtr() 184 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); in areLoadsFromSameBasePtr() 209 Offset1 = Load1Offset->getZExtValue(); in areLoadsFromSameBasePtr() 242 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); in areLoadsFromSameBasePtr() 295 uint8_t Offset1 = Offset1Imm->getImm(); in getMemOperandWithOffset() local 297 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { in getMemOperandWithOffset() 505 int64_t Offset0, int64_t Offset1, in shouldScheduleLoadsNear() argument 507 assert(Offset1 > Offset0 && in shouldScheduleLoadsNear() 513 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); in shouldScheduleLoadsNear() 2531 int64_t Offset0, Offset1; in checkInstOffsetsDoNotOverlap() local [all …]
|
D | AMDGPUISelDAGToDAG.cpp | 207 SDValue &Offset1) const; 1260 SDValue &Offset1) const { in SelectDS64Bit4ByteAligned() 1273 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 1308 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 1325 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 1334 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8); in SelectDS64Bit4ByteAligned()
|
D | SIInstrInfo.h | 181 int64_t &Offset1, 194 int64_t Offset1, unsigned NumLoads) const override;
|
D | AMDGPUInstructionSelector.cpp | 1081 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | in selectDSOrderedIntrinsic() local 1085 Offset1 |= (CountDw - 1) << 6; in selectDSOrderedIntrinsic() 1087 unsigned Offset = Offset0 | (Offset1 << 8); in selectDSOrderedIntrinsic()
|
D | SILoadStoreOptimizer.cpp | 1724 uint64_t Offset1 = Src1->getImm(); in processBaseWithConstOffset() local 1731 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32); in processBaseWithConstOffset()
|
D | SIInstrInfo.td | 1048 def offset1 : NamedOperandU8<"Offset1", NamedMatchClass<"Offset1">>;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.cpp | 2338 int64_t Offset1, unsigned Opcode1, int FI2, in shouldClusterFI() argument 2355 ObjectOffset1 += Offset1; in shouldClusterFI() 2401 int64_t Offset1 = FirstLdSt.getOperand(2).getImm(); in shouldClusterMemOps() local 2402 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1)) in shouldClusterMemOps() 2410 if (Offset1 > 63 || Offset1 < -64) in shouldClusterMemOps() 2416 assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && in shouldClusterMemOps() 2421 return shouldClusterFI(MFI, BaseOp1.getIndex(), Offset1, FirstOpc, in shouldClusterMemOps() 2425 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps() 2427 return Offset1 + 1 == Offset2; in shouldClusterMemOps()
|
D | AArch64InstrFormats.td | 9544 int Offset1, int Offset2, int Offset4, int Offset8> { 9564 !cast<DAGOperand>("GPR64pi" # Offset1)>; 9567 !cast<DAGOperand>("GPR64pi" # Offset1)>; 9587 defm : SIMDLdrAliases<NAME, asm, "8b", Count, Offset1, 64>; 9588 defm : SIMDLdrAliases<NAME, asm, "16b", Count, Offset1, 128>;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/Mips/ |
D | MipsGenCallingConv.inc | 354 unsigned Offset1 = State.AllocateStack(4, 4); 355 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset1, LocVT, LocInfo));
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | MachinePipeliner.cpp | 695 int64_t Offset1, Offset2; in addLoopCarriedDependences() local 696 if (TII->getMemOperandWithOffset(LdMI, BaseOp1, Offset1, TRI) && in addLoopCarriedDependences() 699 (int)Offset1 < (int)Offset2) { in addLoopCarriedDependences()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/X86/ |
D | X86GenCallingConv.inc | 1108 unsigned Offset1 = State.AllocateStack(4, 4); 1109 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset1, LocVT, LocInfo)); 1130 unsigned Offset1 = State.AllocateStack(16, 16); 1131 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset1, LocVT, LocInfo)); 1841 unsigned Offset1 = State.AllocateStack(4, 4); 1842 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset1, LocVT, LocInfo));
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | ValueTracking.cpp | 5992 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL); in isPointerOffset() local 5994 if (!Offset1 || !Offset2) in isPointerOffset() 5996 return *Offset2 - *Offset1; in isPointerOffset()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCISelLowering.cpp | 12057 int64_t Offset1 = 0, Offset2 = 0; in isConsecutiveLSLoc() local 12058 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); in isConsecutiveLSLoc() 12060 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) in isConsecutiveLSLoc() 12066 Offset1 = 0; in isConsecutiveLSLoc() 12068 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); in isConsecutiveLSLoc() 12071 return Offset1 == (Offset2 + Dist*Bytes); in isConsecutiveLSLoc()
|