• Home
  • Raw
  • Download

Lines Matching refs:fVec

33     AI SkNx(float32x2_t vec) : fVec(vec) {}  in SkNx()
36 AI SkNx(float val) : fVec(vdup_n_f32(val)) {} in SkNx()
37 AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } in SkNx()
40 AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } in store()
50 a.fVec, in Store2()
51 b.fVec, in Store2()
58 a.fVec, in Store3()
59 b.fVec, in Store3()
60 c.fVec, in Store3()
67 a.fVec, in Store4()
68 b.fVec, in Store4()
69 c.fVec, in Store4()
70 d.fVec, in Store4()
76 float32x2_t est0 = vrecpe_f32(fVec), in invert()
77 est1 = vmul_f32(vrecps_f32(est0, fVec), est0); in invert()
81 AI SkNx operator - () const { return vneg_f32(fVec); }
83 AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
84 AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
85 AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
88 return vdiv_f32(fVec, o.fVec);
90 float32x2_t est0 = vrecpe_f32(o.fVec),
91 est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0),
92 est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1);
93 return vmul_f32(fVec, est2);
97 AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
98 AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
99 AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
100 AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
101 AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
103 return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
106 AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); } in Min()
107 AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); } in Max()
109 AI SkNx abs() const { return vabs_f32(fVec); } in abs()
112 return vrndm_f32(fVec); in floor()
114 return emulate_vrndm_f32(fVec); in floor()
119 float32x2_t est0 = vrsqrte_f32(fVec); in rsqrt()
120 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); in rsqrt()
125 return vsqrt_f32(fVec); in sqrt()
127 float32x2_t est0 = vrsqrte_f32(fVec), in sqrt()
128 est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0), in sqrt()
129 est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); in sqrt()
130 return vmul_f32(fVec, est2); in sqrt()
136 union { float32x2_t v; float fs[2]; } pun = {fVec};
142 return 0 != vminv_u32(vreinterpret_u32_f32(fVec)); in allTrue()
144 auto v = vreinterpret_u32_f32(fVec); in allTrue()
150 return 0 != vmaxv_u32(vreinterpret_u32_f32(fVec)); in anyTrue()
152 auto v = vreinterpret_u32_f32(fVec); in anyTrue()
158 return vbsl_f32(vreinterpret_u32_f32(fVec), t.fVec, e.fVec); in thenElse()
161 float32x2_t fVec; variable
167 AI SkNx(float32x4_t vec) : fVec(vec) {} in SkNx()
170 AI SkNx(float val) : fVec(vdupq_n_f32(val)) {} in SkNx()
171 AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; } in SkNx()
174 AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } in store()
191 r.fVec, in Store4()
192 g.fVec, in Store4()
193 b.fVec, in Store4()
194 a.fVec, in Store4()
200 float32x4_t est0 = vrecpeq_f32(fVec), in invert()
201 est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); in invert()
205 AI SkNx operator - () const { return vnegq_f32(fVec); }
207 AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
208 AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
209 AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
212 return vdivq_f32(fVec, o.fVec);
214 float32x4_t est0 = vrecpeq_f32(o.fVec),
215 est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
216 est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
217 return vmulq_f32(fVec, est2);
221 AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));}
222 AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));}
223 AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
224 AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));}
225 AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));}
227 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
230 AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); } in Min()
231 AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); } in Max()
233 AI SkNx abs() const { return vabsq_f32(fVec); } in abs()
236 return vrndmq_f32(fVec); in floor()
238 return emulate_vrndmq_f32(fVec); in floor()
244 float32x4_t est0 = vrsqrteq_f32(fVec); in rsqrt()
245 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); in rsqrt()
250 return vsqrtq_f32(fVec); in sqrt()
252 float32x4_t est0 = vrsqrteq_f32(fVec), in sqrt()
253 est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0), in sqrt()
254 est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); in sqrt()
255 return vmulq_f32(fVec, est2); in sqrt()
261 union { float32x4_t v; float fs[4]; } pun = {fVec};
267 return vminvq_f32(fVec); in min()
269 SkNx min = Min(*this, vrev64q_f32(fVec)); in min()
276 return vmaxvq_f32(fVec); in max()
278 SkNx max = Max(*this, vrev64q_f32(fVec)); in max()
285 return 0 != vminvq_u32(vreinterpretq_u32_f32(fVec)); in allTrue()
287 auto v = vreinterpretq_u32_f32(fVec); in allTrue()
294 return 0 != vmaxvq_u32(vreinterpretq_u32_f32(fVec)); in anyTrue()
296 auto v = vreinterpretq_u32_f32(fVec); in anyTrue()
303 return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); in thenElse()
306 float32x4_t fVec; variable
311 return vfmaq_f32(a.fVec, f.fVec, m.fVec); in SkNx_fma()
321 AI SkNx(const uint16x4_t& vec) : fVec(vec) {} in SkNx()
324 AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {} in SkNx()
326 fVec = (uint16x4_t) { a,b,c,d }; in SkNx()
330 AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } in store()
347 r.fVec, in Store4()
348 g.fVec, in Store4()
349 b.fVec, in Store4()
350 a.fVec, in Store4()
355 AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
356 AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
357 AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
358 AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); }
359 AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); }
361 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
362 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
364 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); } in Min()
368 union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
373 return vbsl_u16(fVec, t.fVec, e.fVec); in thenElse()
376 uint16x4_t fVec; variable
382 AI SkNx(const uint16x8_t& vec) : fVec(vec) {} in SkNx()
385 AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {} in SkNx()
390 fVec = (uint16x8_t) { a,b,c,d, e,f,g,h }; in SkNx()
393 AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } in store()
395 AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
396 AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
397 AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
398 AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); }
399 AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); }
401 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
402 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
404 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); } in Min()
408 union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
413 uint32x4_t hi = vmull_u16(vget_high_u16(fVec), vget_high_u16(m.fVec)); in mulHi()
414 uint32x4_t lo = vmull_u16( vget_low_u16(fVec), vget_low_u16(m.fVec)); in mulHi()
420 return vbslq_u16(fVec, t.fVec, e.fVec); in thenElse()
423 uint16x8_t fVec; variable
431 AI SkNx(const uint8x8_t& vec) : fVec(vec) {} in SkNx()
435 fVec = (uint8x8_t){a,b,c,d, 0,0,0,0}; in SkNx()
441 return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); in store()
445 union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
451 uint8x8_t fVec; variable
457 AI SkNx(const uint8x8_t& vec) : fVec(vec) {} in SkNx()
460 AI SkNx(uint8_t val) : fVec(vdup_n_u8(val)) {} in SkNx()
463 fVec = (uint8x8_t) { a,b,c,d, e,f,g,h }; in SkNx()
467 AI void store(void* ptr) const { vst1_u8((uint8_t*)ptr, fVec); } in store()
471 union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
475 uint8x8_t fVec; variable
481 AI SkNx(const uint8x16_t& vec) : fVec(vec) {} in SkNx()
484 AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {} in SkNx()
489 fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p }; in SkNx()
493 AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } in store()
495 AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } in saturatedAdd()
497 AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
498 AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
499 AI SkNx operator & (const SkNx& o) const { return vandq_u8(fVec, o.fVec); }
501 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); } in Min()
502 AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
506 union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
511 return vbslq_u8(fVec, t.fVec, e.fVec); in thenElse()
514 uint8x16_t fVec; variable
520 AI SkNx(const int32x4_t& vec) : fVec(vec) {} in SkNx()
524 fVec = vdupq_n_s32(v); in SkNx()
527 fVec = (int32x4_t){a,b,c,d}; in SkNx()
533 return vst1q_s32((int32_t*)ptr, fVec); in store()
537 union { int32x4_t v; int32_t is[4]; } pun = {fVec};
541 AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
542 AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
543 AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
545 AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
546 AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
547 AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
549 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
550 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
553 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
559 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
562 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } in Min()
563 AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } in Max()
567 return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); in thenElse()
570 AI SkNx abs() const { return vabsq_s32(fVec); } in abs()
572 int32x4_t fVec; variable
578 AI SkNx(const uint32x4_t& vec) : fVec(vec) {} in SkNx()
582 fVec = vdupq_n_u32(v); in SkNx()
585 fVec = (uint32x4_t){a,b,c,d}; in SkNx()
591 return vst1q_u32((uint32_t*)ptr, fVec); in store()
595 union { uint32x4_t v; uint32_t us[4]; } pun = {fVec};
599 AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
600 AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
601 AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
603 AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
604 AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
605 AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
607 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
608 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
610 AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
611 AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
612 AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
614 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); } in Min()
618 uint64x2_t hi = vmull_u32(vget_high_u32(fVec), vget_high_u32(m.fVec)); in mulHi()
619 uint64x2_t lo = vmull_u32( vget_low_u32(fVec), vget_low_u32(m.fVec)); in mulHi()
625 return vbslq_u32(fVec, t.fVec, e.fVec); in thenElse()
628 uint32x4_t fVec; variable
632 return vcvtq_s32_f32(src.fVec);
636 return vcvtq_f32_s32(src.fVec);
643 return vqmovn_u32(vcvtq_u32_f32(src.fVec));
647 return vcvtq_f32_u32(vmovl_u16(src.fVec));
651 uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
657 uint16x8_t _16 = vmovl_u8(src.fVec);
662 return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec);
666 return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec);
676 return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
677 (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
678 vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
679 (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
685 uint16x4_t a16 = vqmovun_s32(a.fVec);
686 uint16x4_t b16 = vqmovun_s32(b.fVec);
692 return vget_low_u16(vmovl_u8(src.fVec));
696 return vmovl_u8(src.fVec);
700 return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
704 return vqmovn_u16(src.fVec);
708 uint16x4_t _16 = vqmovun_s32(src.fVec);
713 uint16x4_t _16 = vqmovn_u32(src.fVec);
718 return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
722 return vmovn_u32(vreinterpretq_u32_s32(src.fVec));
726 return vreinterpretq_s32_u32(src.fVec);
730 return vcvtq_s32_f32((x + 0.5f).fVec); in Sk4f_round()