/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | A15SDOptimizer.cpp | 68 const DebugLoc &DL, unsigned Reg, unsigned Lane, 74 unsigned Lane, const TargetRegisterClass *TRC); 88 unsigned Lane, unsigned ToInsert); 419 unsigned Lane, bool QPR) { in createDupLane() argument 425 .addImm(Lane) in createDupLane() 434 const DebugLoc &DL, unsigned DReg, unsigned Lane, in createExtractSubreg() argument 441 .addReg(DReg, 0, Lane); in createExtractSubreg() 479 const DebugLoc &DL, unsigned DReg, unsigned Lane, unsigned ToInsert) { in createInsertSubreg() argument 487 .addImm(Lane); in createInsertSubreg() 544 unsigned Lane; in optimizeAllLanesPattern() local [all …]
|
D | ARMBaseInstrInfo.cpp | 4867 unsigned SReg, unsigned &Lane) { in getCorrespondingDRegAndLane() argument 4869 Lane = 0; in getCorrespondingDRegAndLane() 4874 Lane = 1; in getCorrespondingDRegAndLane() 4898 unsigned Lane, unsigned &ImplicitSReg) { in getImplicitSPRUseForDPRUse() argument 4908 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); in getImplicitSPRUseForDPRUse() 4926 unsigned Lane; in setExecutionDomain() local 4969 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); in setExecutionDomain() 4977 .addImm(Lane) in setExecutionDomain() 4993 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); in setExecutionDomain() 4996 if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg)) in setExecutionDomain() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Vectorize/ |
D | SLPVectorizer.cpp | 766 OperandData &getData(unsigned OpIdx, unsigned Lane) { in getData() argument 767 return OpsVec[OpIdx][Lane]; in getData() 771 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { in getData() 772 return OpsVec[OpIdx][Lane]; in getData() 779 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; in clearUsed() local 780 ++Lane) in clearUsed() 781 OpsVec[OpIdx][Lane].IsUsed = false; in clearUsed() 785 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { in swap() argument 786 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); in swap() 1013 getBestOperand(unsigned OpIdx, int Lane, int LastLane, in getBestOperand() argument [all …]
|
D | VPlanSLP.cpp | 316 for (unsigned Lane = 1, E = MultiNodeOps[0].second.size(); Lane < E; ++Lane) { in reorderMultiNodeOps() local 317 LLVM_DEBUG(dbgs() << " Finding best value for lane " << Lane << "\n"); in reorderMultiNodeOps() 322 dbgs() << *cast<VPInstruction>(Ops.second[Lane])->getUnderlyingInstr() in reorderMultiNodeOps() 324 Candidates.insert(Ops.second[Lane]); in reorderMultiNodeOps() 333 VPValue *Last = FinalOrder[Op].second[Lane - 1]; in reorderMultiNodeOps()
|
D | VPlan.cpp | 164 !(State->Instance->Part == 0 && State->Instance->Lane == 0); in execute() 264 for (unsigned Lane = 0, VF = State->VF; Lane < VF; ++Lane) { in execute() local 265 State->Instance->Lane = Lane; in execute()
|
D | VPlan.h | 89 unsigned Lane; member 154 assert(Instance.Lane < VF && "Queried Scalar Lane is too large."); in hasScalarValue() 161 return Entry[Instance.Part][Instance.Lane] != nullptr; in hasScalarValue() 175 return ScalarMapStorage[Key][Instance.Part][Instance.Lane]; in getScalarValue() 201 ScalarMapStorage[Key][Instance.Part][Instance.Lane] = Scalar; in setScalarValue() 221 ScalarMapStorage[Key][Instance.Part][Instance.Lane] = Scalar; in resetScalarValue()
|
D | LoopVectorize.cpp | 615 unsigned Lane = UINT_MAX); 1779 Value *VectorLoopVal, unsigned Part, unsigned Lane) { in recordVectorLoopValueForInductionCast() argument 1799 if (Lane < UINT_MAX) in recordVectorLoopValueForInductionCast() 1800 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); in recordVectorLoopValueForInductionCast() 1989 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { in buildScalarSteps() local 1990 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); in buildScalarSteps() 1993 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); in buildScalarSteps() 1994 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); in buildScalarSteps() 2057 for (unsigned Lane = 0; Lane < VF; ++Lane) in getOrCreateVectorValue() local 2058 packScalarIntoVectorValue(V, {Part, Lane}); in getOrCreateVectorValue() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/WebAssembly/ |
D | WebAssemblyISelLowering.cpp | 1335 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) { in LowerBUILD_VECTOR() argument 1337 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT) in LowerBUILD_VECTOR() 1339 const SDValue &SwizzleSrc = Lane->getOperand(0); in LowerBUILD_VECTOR() 1340 const SDValue &IndexExt = Lane->getOperand(1); in LowerBUILD_VECTOR() 1383 const SDValue &Lane = Op->getOperand(I); in LowerBUILD_VECTOR() local 1384 if (Lane.isUndef()) in LowerBUILD_VECTOR() 1387 AddCount(SplatValueCounts, Lane); in LowerBUILD_VECTOR() 1389 if (IsConstant(Lane)) { in LowerBUILD_VECTOR() 1392 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane); in LowerBUILD_VECTOR() 1420 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) { in LowerBUILD_VECTOR() argument [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/MC/ |
D | LaneBitmask.h | 84 static constexpr LaneBitmask getLane(unsigned Lane) { in getLane() 85 return LaneBitmask(Type(1) << Lane); in getLane()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InterleavedAccess.cpp | 440 for (int Lane = 0; Lane < LaneCount; Lane++) in createShuffleStride() local 442 Mask.push_back((i * Stride) % LaneSize + LaneSize * Lane); in createShuffleStride() 615 int Lane = (VectorWidth / 128 > 0) ? VectorWidth / 128 : 1; in group2Shuffle() local 617 IndexGroup[(Index * 3) % (VF / Lane)] = Index; in group2Shuffle() 621 for (int i = 0; i < VF / Lane; i++) { in group2Shuffle()
|
/third_party/libjpeg-turbo/ |
D | jversion.h.in | 5 * Copyright (C) 1991-2020, Thomas G. Lane, Guido Vollbeding. 51 "Copyright (C) 1991-2020 Thomas G. Lane, Guido Vollbeding"
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.h | 433 int Lane = -1; 436 SpilledReg(unsigned R, int L) : VGPR (R), Lane (L) {} 438 bool hasLane() { return Lane != -1;} 509 MCPhysReg getVGPRToAGPRSpill(int FrameIndex, unsigned Lane) const { 512 : I->second.Lanes[Lane];
|
D | SIFrameLowering.cpp | 765 .addImm(Spill[0].Lane) in emitPrologue() 866 .addImm(Spill[0].Lane); in emitEpilogue() 1029 << ':' << Spill.Lane << '\n'); in determineCalleeSaves() 1047 << ':' << Spill.Lane << '\n';); in determineCalleeSaves()
|
D | SIRegisterInfo.cpp | 546 unsigned Lane, in spillVGPRtoAGPR() argument 554 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane); in spillVGPRtoAGPR() 806 .addImm(Spill.Lane) in spillSGPR() 897 .addImm(Spill.Lane); in restoreSGPR()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | InterleavedAccessPass.cpp | 221 unsigned Lane = J * Factor + I; in isReInterleaveMask() local 222 unsigned NextLane = Lane + Factor; in isReInterleaveMask() 223 int LaneValue = Mask[Lane]; in isReInterleaveMask()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/InstCombine/ |
D | InstCombineSimplifyDemanded.cpp | 1694 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { in SimplifyDemandedVectorElts() local 1695 unsigned LaneIdx = Lane * VWidthPerLane; in SimplifyDemandedVectorElts() 1699 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt); in SimplifyDemandedVectorElts() 1709 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { in SimplifyDemandedVectorElts() local 1710 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); in SimplifyDemandedVectorElts() 1712 LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum); in SimplifyDemandedVectorElts()
|
/third_party/skia/third_party/externals/libpng/ |
D | AUTHORS | 28 * Tom Lane
|
D | LICENSE | 77 Tom Lane
|
/third_party/vk-gl-cts/external/vulkancts/data/vulkan/amber/draw/shader_invocation/ |
D | helper_invocation.amber | 55 // Lane 1 and 2 should be nuked by this.
|
/third_party/skia/third_party/externals/icu/source/data/locales/ |
D | mfe.txt | 255 dn{"Lane"}
|
/third_party/skia/third_party/externals/swiftshader/docs/ |
D | VulkanShaderDebugging.md | 51 …urrently presented as a single thread, with each invocation presented as `Lane N` groups in the wa…
|
/third_party/icu/icu4c/source/data/locales/ |
D | mfe.txt | 275 dn{"Lane"}
|
/third_party/skia/third_party/externals/libjpeg-turbo/ |
D | coderules.txt | 4 Copyright (C) 1991-1996, Thomas G. Lane.
|
D | jconfig.txt | 5 * Copyright (C) 1991-1994, Thomas G. Lane.
|
/third_party/libpng/ |
D | LICENSE | 77 Tom Lane
|