• Home
  • Raw
  • Download

Lines Matching refs:AI

19 AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) {  in emulate_vrndmq_f32()
24 AI static float32x2_t emulate_vrndm_f32(float32x2_t v) { in emulate_vrndm_f32()
33 AI SkNx(float32x2_t vec) : fVec(vec) {} in SkNx()
35 AI SkNx() {} in SkNx()
36 AI SkNx(float val) : fVec(vdup_n_f32(val)) {} in SkNx()
37 AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } in SkNx()
39 AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } in Load()
40 AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } in store()
42 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2()
48 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2()
56 AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { in Store3()
65 AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { in Store4()
75 AI SkNx operator - () const { return vneg_f32(fVec); }
77 AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
78 AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
79 AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
80 AI SkNx operator / (const SkNx& o) const {
91 AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
92 AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
93 AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
94 AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
95 AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
96 AI SkNx operator!=(const SkNx& o) const {
100 AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); } in Min()
101 AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); } in Max()
103 AI SkNx abs() const { return vabs_f32(fVec); } in abs()
104 AI SkNx floor() const { in floor()
112 AI SkNx sqrt() const { in sqrt()
123 AI float operator[](int k) const {
129 AI bool allTrue() const { in allTrue()
137 AI bool anyTrue() const { in anyTrue()
146 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { in thenElse()
156 AI SkNx(float32x4_t vec) : fVec(vec) {} in SkNx()
158 AI SkNx() {} in SkNx()
159 AI SkNx(float val) : fVec(vdupq_n_f32(val)) {} in SkNx()
160 AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; } in SkNx()
162 AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); } in Load()
163 AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } in store()
165 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2()
171 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { in Load4()
178 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { in Store4()
188 AI SkNx operator - () const { return vnegq_f32(fVec); }
190 AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
191 AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
192 AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
193 AI SkNx operator / (const SkNx& o) const {
204 AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));}
205 AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));}
206 AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
207 AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));}
208 AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));}
209 AI SkNx operator!=(const SkNx& o) const {
213 AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); } in Min()
214 AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); } in Max()
216 AI SkNx abs() const { return vabsq_f32(fVec); } in abs()
217 AI SkNx floor() const { in floor()
226 AI SkNx sqrt() const { in sqrt()
237 AI float operator[](int k) const {
243 AI float min() const { in min()
252 AI float max() const { in max()
261 AI bool allTrue() const { in allTrue()
270 AI bool anyTrue() const { in anyTrue()
280 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { in thenElse()
288 AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) { in SkNx_fma()
299 AI SkNx(const uint16x4_t& vec) : fVec(vec) {} in SkNx()
301 AI SkNx() {} in SkNx()
302 AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {} in SkNx()
303 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { in SkNx()
307 AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } in Load()
308 AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } in store()
310 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { in Load4()
317 AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { in Load3()
323 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { in Store4()
333 AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
334 AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
335 AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
336 AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); }
337 AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); }
339 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
340 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
342 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); } in Min()
344 AI uint16_t operator[](int k) const {
350 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { in thenElse()
360 AI SkNx(const uint16x8_t& vec) : fVec(vec) {} in SkNx()
362 AI SkNx() {} in SkNx()
363 AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {} in SkNx()
364 AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); } in Load()
366 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, in SkNx()
371 AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } in store()
373 AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
374 AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
375 AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
376 AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); }
377 AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); }
379 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
380 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
382 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); } in Min()
384 AI uint16_t operator[](int k) const {
390 AI SkNx mulHi(const SkNx& m) const { in mulHi()
397 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { in thenElse()
409 AI SkNx(const uint8x8_t& vec) : fVec(vec) {} in SkNx()
411 AI SkNx() {} in SkNx()
412 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { in SkNx()
415 AI static SkNx Load(const void* ptr) { in Load()
418 AI void store(void* ptr) const { in store()
421 AI uint8_t operator[](int k) const {
435 AI SkNx(const uint8x8_t& vec) : fVec(vec) {} in SkNx()
437 AI SkNx() {} in SkNx()
438 AI SkNx(uint8_t val) : fVec(vdup_n_u8(val)) {} in SkNx()
439 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, in SkNx()
444 AI static SkNx Load(const void* ptr) { return vld1_u8((const uint8_t*)ptr); } in Load()
445 AI void store(void* ptr) const { vst1_u8((uint8_t*)ptr, fVec); } in store()
447 AI uint8_t operator[](int k) const {
459 AI SkNx(const uint8x16_t& vec) : fVec(vec) {} in SkNx()
461 AI SkNx() {} in SkNx()
462 AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {} in SkNx()
463 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, in SkNx()
470 AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); } in Load()
471 AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } in store()
473 AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } in saturatedAdd()
475 AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
476 AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
477 AI SkNx operator & (const SkNx& o) const { return vandq_u8(fVec, o.fVec); }
479 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); } in Min()
480 AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
482 AI uint8_t operator[](int k) const {
488 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { in thenElse()
498 AI SkNx(const int32x4_t& vec) : fVec(vec) {} in SkNx()
500 AI SkNx() {} in SkNx()
501 AI SkNx(int32_t v) { in SkNx()
504 AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) { in SkNx()
507 AI static SkNx Load(const void* ptr) { in Load()
510 AI void store(void* ptr) const { in store()
513 AI int32_t operator[](int k) const {
519 AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
520 AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
521 AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
523 AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
524 AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
525 AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
527 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
528 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
530 AI SkNx operator == (const SkNx& o) const {
533 AI SkNx operator < (const SkNx& o) const {
536 AI SkNx operator > (const SkNx& o) const {
540 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } in Min()
541 AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } in Max()
544 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { in thenElse()
548 AI SkNx abs() const { return vabsq_s32(fVec); } in abs()
556 AI SkNx(const uint32x4_t& vec) : fVec(vec) {} in SkNx()
558 AI SkNx() {} in SkNx()
559 AI SkNx(uint32_t v) { in SkNx()
562 AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { in SkNx()
565 AI static SkNx Load(const void* ptr) { in Load()
568 AI void store(void* ptr) const { in store()
571 AI uint32_t operator[](int k) const {
577 AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
578 AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
579 AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
581 AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
582 AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
583 AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
585 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
586 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
588 AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
589 AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
590 AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
592 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); } in Min()
595 AI SkNx mulHi(const SkNx& m) const { in mulHi()
602 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { in thenElse()
609 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
613 template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
616 template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
620 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
624 template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
628 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
634 template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) {
639 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
643 template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
647 template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
660 template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) {
669 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
673 template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) {
677 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
681 template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) {
685 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
690 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) {
695 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
699 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
703 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
707 AI static Sk4i Sk4f_round(const Sk4f& x) { in Sk4f_round()