/third_party/json/test/thirdparty/Fuzzer/test/ |
D | CustomCrossOverTest.cpp | 51 size_t Offset2 = 0; in LLVMFuzzerCustomCrossOver() local 52 size_t Len2 = R() % (Size2 - Offset2); in LLVMFuzzerCustomCrossOver() 60 memcpy(Out + Len1 + SeparatorLen, Data2 + Offset2, Len2); in LLVMFuzzerCustomCrossOver()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
D | ScheduleDAGSDNodes.cpp | 240 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local 241 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || in ClusterNeighboringLoads() 242 Offset1 == Offset2 || in ClusterNeighboringLoads() 250 O2SMap.insert(std::make_pair(Offset2, User)); in ClusterNeighboringLoads() 251 Offsets.push_back(Offset2); in ClusterNeighboringLoads() 252 if (Offset2 < Offset1) in ClusterNeighboringLoads()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrInfo.h | 382 int64_t &Offset2) const override; 393 int64_t Offset2,
|
D | X86InstrInfo.cpp | 5747 int64_t &Offset1, int64_t &Offset2) const { in areLoadsFromSameBasePtr() 5940 Offset2 = Disp2->getSExtValue(); in areLoadsFromSameBasePtr() 5945 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 5947 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 5948 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Mips/ |
D | MicroMipsSizeReduction.cpp | 400 int64_t Offset1, Offset2; in ConsecutiveInstr() local 403 if (!GetImm(MI2, 2, Offset2)) in ConsecutiveInstr() 409 return ((Offset1 == (Offset2 - 4)) && (ConsecutiveRegisters(Reg1, Reg2))); in ConsecutiveInstr()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMBaseInstrInfo.h | 246 int64_t &Offset2) const override; 257 int64_t Offset1, int64_t Offset2,
|
D | ARMBaseInstrInfo.cpp | 1837 int64_t &Offset2) const { in areLoadsFromSameBasePtr() 1898 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); in areLoadsFromSameBasePtr() 1917 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 1922 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 1924 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/AArch64/ |
D | AArch64GenCallingConv.inc | 696 unsigned Offset2 = State.AllocateStack(8, 16); 697 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); 793 unsigned Offset2 = State.AllocateStack(8, 8); 794 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); 937 unsigned Offset2 = State.AllocateStack(8, 8); 938 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
D | SeparateConstOffsetFromGEP.cpp | 1295 Value *Offset2 = Second->getOperand(1); in swapGEPOperand() local 1296 First->setOperand(1, Offset2); in swapGEPOperand()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | TargetInstrInfo.h | 1220 int64_t &Offset2) const { in areLoadsFromSameBasePtr() argument 1233 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/SystemZ/ |
D | SystemZInstrInfo.cpp | 1512 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); in getOpcodeForOffset() local 1513 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { in getOpcodeForOffset() 1523 if (isInt<20>(Offset) && isInt<20>(Offset2)) { in getOpcodeForOffset()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/Mips/ |
D | MipsGenCallingConv.inc | 360 unsigned Offset2 = State.AllocateStack(8, 8); 361 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/ARM/ |
D | ARMGenCallingConv.inc | 394 unsigned Offset2 = State.AllocateStack(4, 4); 395 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.cpp | 2339 int64_t Offset2, unsigned Opcode2) { in shouldClusterFI() argument 2356 ObjectOffset2 += Offset2; in shouldClusterFI() 2405 int64_t Offset2 = SecondLdSt.getOperand(2).getImm(); in shouldClusterMemOps() local 2406 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2)) in shouldClusterMemOps() 2416 assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && in shouldClusterMemOps() 2422 BaseOp2.getIndex(), Offset2, SecondOpc); in shouldClusterMemOps() 2425 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps() 2427 return Offset1 + 1 == Offset2; in shouldClusterMemOps()
|
D | AArch64InstrFormats.td | 9544 int Offset1, int Offset2, int Offset4, int Offset8> { 9570 !cast<DAGOperand>("GPR64pi" # Offset2)>; 9573 !cast<DAGOperand>("GPR64pi" # Offset2)>; 9589 defm : SIMDLdrAliases<NAME, asm, "4h", Count, Offset2, 64>; 9590 defm : SIMDLdrAliases<NAME, asm, "8h", Count, Offset2, 128>;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/X86/ |
D | X86GenCallingConv.inc | 746 unsigned Offset2 = State.AllocateStack(4, 4); 747 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); 1141 unsigned Offset2 = State.AllocateStack(32, 32); 1142 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); 1801 unsigned Offset2 = State.AllocateStack(8, 8); 1802 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); 1848 unsigned Offset2 = State.AllocateStack(8, 8); 1849 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | MachinePipeliner.cpp | 695 int64_t Offset1, Offset2; in addLoopCarriedDependences() local 697 TII->getMemOperandWithOffset(MI, BaseOp2, Offset2, TRI)) { in addLoopCarriedDependences() 699 (int)Offset1 < (int)Offset2) { in addLoopCarriedDependences()
|
D | CodeGenPrepare.cpp | 1909 uint64_t Offset2 = Offset.getLimitedValue(); in optimizeCallInst() local 1910 if ((Offset2 & (PrefAlign-1)) != 0) in optimizeCallInst() 1914 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) in optimizeCallInst() 1924 MinSize + Offset2) in optimizeCallInst()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.h | 182 int64_t &Offset2) const override;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | ValueTracking.cpp | 5993 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL); in isPointerOffset() local 5994 if (!Offset1 || !Offset2) in isPointerOffset() 5996 return *Offset2 - *Offset1; in isPointerOffset()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCISelLowering.cpp | 12057 int64_t Offset1 = 0, Offset2 = 0; in isConsecutiveLSLoc() local 12059 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); in isConsecutiveLSLoc() 12060 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) in isConsecutiveLSLoc() 12067 Offset2 = 0; in isConsecutiveLSLoc() 12069 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); in isConsecutiveLSLoc() 12071 return Offset1 == (Offset2 + Dist*Bytes); in isConsecutiveLSLoc()
|