/third_party/skia/experimental/graphite/src/geom/ |
D | Rect.h | 16 #define AI SK_ALWAYS_INLINE macro 29 AI Rect() = default; 30 AI Rect(float l, float t, float r, float b) : fVals(NegateBotRight({l,t,r,b})) {} in Rect() 31 AI Rect(float2 topLeft, float2 botRight) : fVals(topLeft, -botRight) {} in Rect() 32 AI Rect(const SkRect& r) : fVals(NegateBotRight(float4::Load(r.asScalars()))) {} in Rect() 34 AI static Rect XYWH(float x, float y, float w, float h) { in XYWH() 37 AI static Rect XYWH(float2 topLeft, float2 size) { in XYWH() 40 AI static Rect WH(float w, float h) { in WH() 43 AI static Rect WH(float2 size) { in WH() 46 AI static Rect Point(float2 p) { in Point() [all …]
|
/third_party/skia/include/private/ |
D | SkNx_neon.h | 19 AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) { in emulate_vrndmq_f32() 24 AI static float32x2_t emulate_vrndm_f32(float32x2_t v) { in emulate_vrndm_f32() 33 AI SkNx(float32x2_t vec) : fVec(vec) {} in SkNx() 35 AI SkNx() {} in SkNx() 36 AI SkNx(float val) : fVec(vdup_n_f32(val)) {} in SkNx() 37 AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } in SkNx() 39 AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } in Load() 40 AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } in store() 42 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 48 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() [all …]
|
D | SkNx_sse.h | 31 AI static __m128 emulate_mm_floor_ps(__m128 v) { in emulate_mm_floor_ps() 40 AI SkNx(const __m128& vec) : fVec(vec) {} in SkNx() 42 AI SkNx() {} in SkNx() 43 AI SkNx(float val) : fVec(_mm_set1_ps(val)) {} in SkNx() 44 AI static SkNx Load(const void* ptr) { in Load() 47 AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} in SkNx() 49 AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } in store() 51 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 57 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() 62 AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { in Store3() [all …]
|
D | SkNx.h | 20 #define AI SK_ALWAYS_INLINE macro 31 AI SkNx() = default; 32 AI SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {} in SkNx() 34 AI SkNx(T v) : fLo(v), fHi(v) {} in SkNx() 36 AI SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); } in SkNx() 37 AI SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); } in SkNx() 38 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { in SkNx() 41 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h, in SkNx() 47 AI T operator[](int k) const { 52 AI static SkNx Load(const void* vptr) { in Load() [all …]
|
/third_party/flutter/skia/include/private/ |
D | SkNx_neon.h | 19 AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) { in emulate_vrndmq_f32() 24 AI static float32x2_t emulate_vrndm_f32(float32x2_t v) { in emulate_vrndm_f32() 33 AI SkNx(float32x2_t vec) : fVec(vec) {} in SkNx() 35 AI SkNx() {} in SkNx() 36 AI SkNx(float val) : fVec(vdup_n_f32(val)) {} in SkNx() 37 AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } in SkNx() 39 AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } in Load() 40 AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } in store() 42 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 48 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() [all …]
|
D | SkNx_sse.h | 31 AI static __m128 emulate_mm_floor_ps(__m128 v) { in emulate_mm_floor_ps() 40 AI SkNx(const __m128& vec) : fVec(vec) {} in SkNx() 42 AI SkNx() {} in SkNx() 43 AI SkNx(float val) : fVec(_mm_set1_ps(val)) {} in SkNx() 44 AI static SkNx Load(const void* ptr) { in Load() 47 AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} in SkNx() 49 AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } in store() 51 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 57 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() 62 AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { in Store3() [all …]
|
D | SkNx.h | 20 #define AI SK_ALWAYS_INLINE macro 31 AI SkNx() = default; 32 AI SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {} in SkNx() 34 AI SkNx(T v) : fLo(v), fHi(v) {} in SkNx() 36 AI SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); } in SkNx() 37 AI SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); } in SkNx() 38 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { in SkNx() 41 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h, in SkNx() 47 AI T operator[](int k) const { 52 AI static SkNx Load(const void* vptr) { in Load() [all …]
|
/third_party/flutter/skia/third_party/externals/harfbuzz/src/ |
D | hb-atomic.hh | 59 #define hb_atomic_int_impl_add(AI, V) __atomic_fetch_add ((AI), (V), __ATOMIC_ACQ_REL) argument 60 #define hb_atomic_int_impl_set_relaxed(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELAXED) argument 61 #define hb_atomic_int_impl_set(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELEASE) argument 62 #define hb_atomic_int_impl_get_relaxed(AI) __atomic_load_n ((AI), __ATOMIC_RELAXED) argument 63 #define hb_atomic_int_impl_get(AI) __atomic_load_n ((AI), __ATOMIC_ACQUIRE) argument 86 #define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->fetch_add ((V), … argument 87 #define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V… argument 88 #define hb_atomic_int_impl_set(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std:… argument 89 #define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (… argument 90 #define hb_atomic_int_impl_get(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (std::me… argument [all …]
|
/third_party/harfbuzz/src/ |
D | hb-atomic.hh | 59 #define hb_atomic_int_impl_add(AI, V) __atomic_fetch_add ((AI), (V), __ATOMIC_ACQ_REL) argument 60 #define hb_atomic_int_impl_set_relaxed(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELAXED) argument 61 #define hb_atomic_int_impl_set(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELEASE) argument 62 #define hb_atomic_int_impl_get_relaxed(AI) __atomic_load_n ((AI), __ATOMIC_RELAXED) argument 63 #define hb_atomic_int_impl_get(AI) __atomic_load_n ((AI), __ATOMIC_ACQUIRE) argument 87 #define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->fetch_add ((V), … argument 88 #define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V… argument 89 #define hb_atomic_int_impl_set(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std:… argument 90 #define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (… argument 91 #define hb_atomic_int_impl_get(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (std::me… argument [all …]
|
/third_party/skia/third_party/externals/harfbuzz/src/ |
D | hb-atomic.hh | 59 #define hb_atomic_int_impl_add(AI, V) __atomic_fetch_add ((AI), (V), __ATOMIC_ACQ_REL) argument 60 #define hb_atomic_int_impl_set_relaxed(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELAXED) argument 61 #define hb_atomic_int_impl_set(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELEASE) argument 62 #define hb_atomic_int_impl_get_relaxed(AI) __atomic_load_n ((AI), __ATOMIC_RELAXED) argument 63 #define hb_atomic_int_impl_get(AI) __atomic_load_n ((AI), __ATOMIC_ACQUIRE) argument 87 #define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->fetch_add ((V), … argument 88 #define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V… argument 89 #define hb_atomic_int_impl_set(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std:… argument 90 #define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (… argument 91 #define hb_atomic_int_impl_get(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (std::me… argument [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64StackTagging.cpp | 271 AllocaInst *AI; member 290 bool isInterestingAlloca(const AllocaInst &AI); 293 void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr, 295 void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size); 393 bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) { in isInterestingAlloca() argument 396 AI.getAllocatedType()->isSized() && AI.isStaticAlloca() && in isInterestingAlloca() 398 AI.getAllocationSizeInBits(*DL).getValue() > 0 && in isInterestingAlloca() 401 !AI.isUsedWithInAlloca() && in isInterestingAlloca() 403 !AI.isSwiftError(); in isInterestingAlloca() 407 void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore, in tagAlloca() argument [all …]
|
/third_party/skia/src/gpu/tessellate/ |
D | WangsFormula.h | 17 #define AI SK_MAYBE_UNUSED SK_ALWAYS_INLINE macro 39 AI float root4(float x) { in root4() 47 AI int nextlog4(float x) { in nextlog4() 55 AI int nextlog16(float x) { in nextlog16() 67 AI explicit VectorXform() : fType(Type::kIdentity) {} in VectorXform() 68 AI explicit VectorXform(const SkMatrix& m) { *this = m; } in VectorXform() 69 AI VectorXform& operator=(const SkMatrix& m) { 87 AI float2 operator()(float2 vector) const { in operator() 98 AI float4 operator()(float4 vectors) const { in operator() 118 AI float quadratic_pow4(float precision, [all …]
|
D | Tessellation.h | 36 #define AI SK_MAYBE_UNUSED SK_ALWAYS_INLINE macro 38 AI float dot(float2 a, float2 b) { in dot() 43 AI float cross(float2 a, float2 b) { in cross() 52 AI vec<N> mix(vec<N> a, vec<N> b, vec<N> T) { in mix() 58 AI vec<N> mix(vec<N> a, vec<N> b, float T) { in mix() 62 AI constexpr float pow2(float x) { return x*x; } in pow2() 63 AI constexpr float pow4(float x) { return pow2(x*x); } in pow4() 65 #undef AI
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | StackProtector.cpp | 164 bool StackProtector::HasAddressTaken(const Instruction *AI) { in HasAddressTaken() argument 165 for (const User *U : AI->users()) { in HasAddressTaken() 169 if (AI == cast<StoreInst>(I)->getValueOperand()) in HasAddressTaken() 175 if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand()) in HasAddressTaken() 179 if (AI == cast<PtrToIntInst>(I)->getOperand(0)) in HasAddressTaken() 282 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { in RequiresStackProtector() local 283 if (AI->isArrayAllocation()) { in RequiresStackProtector() 292 if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { in RequiresStackProtector() 296 Layout.insert(std::make_pair(AI, in RequiresStackProtector() 302 Layout.insert(std::make_pair(AI, in RequiresStackProtector() [all …]
|
D | SafeStack.cpp | 164 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI); 220 uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) { in getStaticAllocaAllocationSize() argument 221 uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType()); in getStaticAllocaAllocationSize() 222 if (AI->isArrayAllocation()) { in getStaticAllocaAllocationSize() 223 auto C = dyn_cast<ConstantInt>(AI->getArraySize()); in getStaticAllocaAllocationSize() 385 if (auto AI = dyn_cast<AllocaInst>(&I)) { in findInsts() local 388 uint64_t Size = getStaticAllocaAllocationSize(AI); in findInsts() 389 if (IsSafeStackAlloca(AI, Size)) in findInsts() 392 if (AI->isStaticAlloca()) { in findInsts() 394 StaticAllocas.push_back(AI); in findInsts() [all …]
|
D | RegUsageInfoCollector.cpp | 149 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) in runOnMachineFunction() local 150 SetRegAsDefined(*AI); in runOnMachineFunction() 162 for (MCRegAliasIterator AI(PReg, TRI, true); AI.isValid(); ++AI) in runOnMachineFunction() local 163 if (!SavedRegs.test(*AI)) in runOnMachineFunction() 164 SetRegAsDefined(*AI); in runOnMachineFunction()
|
D | AggressiveAntiDepBreaker.cpp | 164 for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI) { in StartBlock() local 165 unsigned Reg = *AI; in StartBlock() 182 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { in StartBlock() local 183 unsigned AliasReg = *AI; in StartBlock() 314 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) in HandleLastUse() local 315 if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI)) { in HandleLastUse() 397 for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) { in PrescanInstruction() local 398 unsigned AliasReg = *AI; in PrescanInstruction() 428 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { in PrescanInstruction() local 435 if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI)) in PrescanInstruction() [all …]
|
D | AtomicExpandPass.cpp | 80 bool tryExpandAtomicRMW(AtomicRMWInst *AI); 91 AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI); 93 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI); 119 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 489 AtomicRMWInst *AI = in expandAtomicStore() local 495 return tryExpandAtomicRMW(AI); in expandAtomicStore() 565 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { in tryExpandAtomicRMW() argument 566 switch (TLI->shouldExpandAtomicRMWInIR(AI)) { in tryExpandAtomicRMW() 571 unsigned ValueSize = getAtomicOpSize(AI); in tryExpandAtomicRMW() 577 return performAtomicOp(AI->getOperation(), Builder, Loaded, in tryExpandAtomicRMW() [all …]
|
D | CriticalAntiDepBreaker.cpp | 74 for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI) { in StartBlock() local 75 unsigned Reg = *AI; in StartBlock() 92 for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) { in StartBlock() local 93 unsigned Reg = *AI; in StartBlock() 205 for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) { in PrescanInstruction() local 209 unsigned AliasReg = *AI; in PrescanInstruction() 335 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { in ScanInstruction() local 336 unsigned AliasReg = *AI; in ScanInstruction()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Utils/ |
D | PromoteMemoryToRegister.cpp | 64 bool llvm::isAllocaPromotable(const AllocaInst *AI) { in isAllocaPromotable() argument 67 unsigned AS = AI->getType()->getAddressSpace(); in isAllocaPromotable() 70 for (const User *U : AI->users()) { in isAllocaPromotable() 77 if (SI->getOperand(0) == AI) in isAllocaPromotable() 129 void AnalyzeAlloca(AllocaInst *AI) { in AnalyzeAlloca() 135 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { in AnalyzeAlloca() 157 DbgDeclares = FindDbgAddrUses(AI); in AnalyzeAlloca() 291 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 315 static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { in removeLifetimeIntrinsicUsers() argument 319 for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) { in removeLifetimeIntrinsicUsers() [all …]
|
D | MetaRenamer.cpp | 96 for (auto AI = M.alias_begin(), AE = M.alias_end(); AI != AE; ++AI) { in runOnModule() local 97 StringRef Name = AI->getName(); in runOnModule() 101 AI->setName("alias"); in runOnModule() 146 for (auto AI = F.arg_begin(), AE = F.arg_end(); AI != AE; ++AI) in runOnFunction() local 147 if (!AI->getType()->isVoidTy()) in runOnFunction() 148 AI->setName("arg"); in runOnFunction()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/InstCombine/ |
D | InstCombineLoadStoreAlloca.cpp | 162 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, in isOnlyCopiedFromConstantGlobal() argument 165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) in isOnlyCopiedFromConstantGlobal() 171 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, in isDereferenceableForAllocaSize() argument 173 if (AI->isArrayAllocation()) in isDereferenceableForAllocaSize() 175 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType()); in isDereferenceableForAllocaSize() 178 return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()), in isDereferenceableForAllocaSize() 182 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { in simplifyAllocaArraySize() argument 184 if (!AI.isArrayAllocation()) { in simplifyAllocaArraySize() 186 if (AI.getArraySize()->getType()->isIntegerTy(32)) in simplifyAllocaArraySize() 191 AI.setOperand(0, V); in simplifyAllocaArraySize() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Instrumentation/ |
D | HWAddressSanitizer.cpp | 219 bool isInterestingAlloca(const AllocaInst &AI); 220 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size); 231 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI, 759 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) { in getAllocaSizeInBytes() argument 761 if (AI.isArrayAllocation()) { in getAllocaSizeInBytes() 762 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); in getAllocaSizeInBytes() 766 Type *Ty = AI.getAllocatedType(); in getAllocaSizeInBytes() 767 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty); in getAllocaSizeInBytes() 771 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, in tagAlloca() argument 780 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag, in tagAlloca() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.cpp | 430 yaml::SIArgumentInfo AI; in convertArgumentInfo() local 453 Any |= convertArg(AI.PrivateSegmentBuffer, ArgInfo.PrivateSegmentBuffer); in convertArgumentInfo() 454 Any |= convertArg(AI.DispatchPtr, ArgInfo.DispatchPtr); in convertArgumentInfo() 455 Any |= convertArg(AI.QueuePtr, ArgInfo.QueuePtr); in convertArgumentInfo() 456 Any |= convertArg(AI.KernargSegmentPtr, ArgInfo.KernargSegmentPtr); in convertArgumentInfo() 457 Any |= convertArg(AI.DispatchID, ArgInfo.DispatchID); in convertArgumentInfo() 458 Any |= convertArg(AI.FlatScratchInit, ArgInfo.FlatScratchInit); in convertArgumentInfo() 459 Any |= convertArg(AI.PrivateSegmentSize, ArgInfo.PrivateSegmentSize); in convertArgumentInfo() 460 Any |= convertArg(AI.WorkGroupIDX, ArgInfo.WorkGroupIDX); in convertArgumentInfo() 461 Any |= convertArg(AI.WorkGroupIDY, ArgInfo.WorkGroupIDY); in convertArgumentInfo() [all …]
|
D | SIMachineFunctionInfo.h | 210 static void mapping(IO &YamlIO, SIArgumentInfo &AI) { 211 YamlIO.mapOptional("privateSegmentBuffer", AI.PrivateSegmentBuffer); 212 YamlIO.mapOptional("dispatchPtr", AI.DispatchPtr); 213 YamlIO.mapOptional("queuePtr", AI.QueuePtr); 214 YamlIO.mapOptional("kernargSegmentPtr", AI.KernargSegmentPtr); 215 YamlIO.mapOptional("dispatchID", AI.DispatchID); 216 YamlIO.mapOptional("flatScratchInit", AI.FlatScratchInit); 217 YamlIO.mapOptional("privateSegmentSize", AI.PrivateSegmentSize); 219 YamlIO.mapOptional("workGroupIDX", AI.WorkGroupIDX); 220 YamlIO.mapOptional("workGroupIDY", AI.WorkGroupIDY); [all …]
|