Home
last modified time | relevance | path

Searched refs:vabsx (Results 1 – 25 of 43) sorted by relevance

12

/external/XNNPACK/src/f32-vrnd/gen/
Dvrndz-wasmsimd-addsub-x4.c35 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4() local
36 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
37 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
38 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
48 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4() local
49 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
50 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
51 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
Dvrndne-wasmsimd-addsub-x4.c34 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4() local
35 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4()
36 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4()
45 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4() local
46 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4()
47 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4()
Dvrndz-wasmsimd-addsub-x8.c62 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8() local
63 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
64 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
65 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
75 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8() local
76 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
77 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
78 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
Dvrndd-wasmsimd-addsub-x4.c35 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4() local
36 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4()
37 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4()
47 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4() local
48 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4()
49 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4()
Dvrndu-wasmsimd-addsub-x4.c35 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4() local
36 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4()
37 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4()
49 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4() local
50 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4()
51 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4()
Dvrndne-wasmsimd-addsub-x8.c55 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8() local
56 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8()
57 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8()
66 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8() local
67 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8()
68 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8()
Dvrndd-wasmsimd-addsub-x8.c59 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8() local
60 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8()
61 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8()
71 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8() local
72 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8()
73 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8()
Dvrndu-wasmsimd-addsub-x8.c65 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8() local
66 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8()
67 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8()
79 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8() local
80 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8()
81 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8()
Dvrndz-wasmsimd-cvt-x4.c35 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndz_ukernel__wasmsimd_cvt_x4() local
37 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask); in xnn_f32_vrndz_ukernel__wasmsimd_cvt_x4()
47 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndz_ukernel__wasmsimd_cvt_x4() local
49 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask); in xnn_f32_vrndz_ukernel__wasmsimd_cvt_x4()
Dvrndd-wasmsimd-cvt-x4.c36 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() local
38 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4()
50 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() local
52 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4()
Dvrndu-wasmsimd-cvt-x4.c36 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() local
38 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4()
51 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() local
53 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4()
/external/XNNPACK/src/f32-vrnd/
Dvrndz-wasmsimd-addsub.c.in38 const v128_t vabsx${ABC[N:N+4]} = wasm_v128_andnot(vx${ABC[N:N+4]}, vsign_mask);
41 …vrndmask${ABC[N:N+4]} = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx${ABC[N:N+4]}));
44 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
47 …const v128_t vadjustment${ABC[N:N+4]} = wasm_v128_and(wasm_f32x4_lt(vabsx${ABC[N:N+4]}, vrndabsx${…
64 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
65 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
66 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number);
67 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone);
77 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
78 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
[all …]
Dvrndne-wasmsimd-addsub.c.in37 const v128_t vabsx${ABC[N:N+4]} = wasm_v128_andnot(vx${ABC[N:N+4]}, vsign_mask);
40 …vrndmask${ABC[N:N+4]} = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx${ABC[N:N+4]}));
43 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
57 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
58 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
59 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number);
68 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
69 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
70 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number);
Dvrndd-wasmsimd-addsub.c.in38 const v128_t vabsx${ABC[N:N+4]} = wasm_v128_andnot(vx${ABC[N:N+4]}, vsign_mask);
41 …vrndmask${ABC[N:N+4]} = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx${ABC[N:N+4]}));
44 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
61 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
62 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
63 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number);
73 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
74 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
75 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number);
Dvrndu-wasmsimd-addsub.c.in38 const v128_t vabsx${ABC[N:N+4]} = wasm_v128_andnot(vx${ABC[N:N+4]}, vsign_mask);
41 …vrndmask${ABC[N:N+4]} = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx${ABC[N:N+4]}));
44 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
67 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
68 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
69 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number);
81 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); variable
82 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx));
83 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number);
Dvrndz-wasmsimd-cvt.c.in38 const v128_t vabsx${ABC[N:N+4]} = wasm_f32x4_abs(vx${ABC[N:N+4]});
42 …const v128_t vrndmask${ABC[N:N+4]} = wasm_v128_andnot(wasm_f32x4_lt(vabsx${ABC[N:N+4]}, vmagic_num…
57 const v128_t vabsx = wasm_f32x4_abs(vx); variable
59 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask);
69 const v128_t vabsx = wasm_f32x4_abs(vx); variable
71 const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_number), vsign_mask);
Dvrndne-neon.c.in33 const float32x4_t vabsx${ABC[N:N+4]} = vabsq_f32(vx${ABC[N:N+4]});
37 float32x4_t vrndabsx${ABC[N:N+4]} = vaddq_f32(vabsx${ABC[N:N+4]}, vmagic_number);
54 const float32x4_t vabsx = vabsq_f32(vx); variable
56 float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number);
64 const float32x4_t vabsx = vabsq_f32(vx); variable
66 float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number);
/external/XNNPACK/src/math/
Droundz-scalar-addsub.c35 const float vabsx = fabsf(vx); in xnn_math_f32_roundz__scalar_addsub() local
39 const float vrndabsx = (vabsx + vmagic_number) - vmagic_number; in xnn_math_f32_roundz__scalar_addsub()
43 const float vflrabsx = XNN_UNPREDICTABLE(vrndabsx <= vabsx) ? vrndabsx : vrndabsx - vone; in xnn_math_f32_roundz__scalar_addsub()
49 const float vabsy = XNN_UNPREDICTABLE(vabsx >= vmagic_number) ? vabsx : vflrabsx; in xnn_math_f32_roundz__scalar_addsub()
Droundne-scalar-addsub.c32 const float vabsx = fabsf(vx); in xnn_math_f32_roundne__scalar_addsub() local
36 const float vrndabsx = (vabsx + vmagic_number) - vmagic_number; in xnn_math_f32_roundne__scalar_addsub()
42 const float vabsy = XNN_UNPREDICTABLE(vabsx >= vmagic_number) ? vabsx : vrndabsx; in xnn_math_f32_roundne__scalar_addsub()
Droundd-scalar-addsub.c34 const float vabsx = fabsf(vx); in xnn_math_f32_roundd__scalar_addsub() local
38 const float vprerndabsx = (vabsx + vmagic_number) - vmagic_number; in xnn_math_f32_roundd__scalar_addsub()
44 const float vrndabsx = XNN_UNPREDICTABLE(vabsx >= vmagic_number) ? vabsx : vprerndabsx; in xnn_math_f32_roundd__scalar_addsub()
Droundu-scalar-addsub.c34 const float vabsx = fabsf(vx); in xnn_math_f32_roundu__scalar_addsub() local
38 const float vprerndabsx = (vabsx + vmagic_number) - vmagic_number; in xnn_math_f32_roundu__scalar_addsub()
44 const float vrndabsx = XNN_UNPREDICTABLE(vabsx >= vmagic_number) ? vabsx : vprerndabsx; in xnn_math_f32_roundu__scalar_addsub()
Droundz-wasmsimd-addsub.c39 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_math_f32_roundz__wasmsimd_addsub() local
44 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_ge(vabsx, vmagic_number)); in xnn_math_f32_roundz__wasmsimd_addsub()
48 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundz__wasmsimd_addsub()
52 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_gt(vrndabsx, vabsx), vone); in xnn_math_f32_roundz__wasmsimd_addsub()
Droundz-sse-addsub.c39 const __m128 vabsx = _mm_and_ps(vx, vnonsign_mask); in xnn_math_f32_roundz__sse_addsub() local
44 const __m128 vrndmask = _mm_andnot_ps(_mm_cmpge_ps(vabsx, vmagic_number), vnonsign_mask); in xnn_math_f32_roundz__sse_addsub()
48 const __m128 vrndabsx = _mm_sub_ps(_mm_add_ps(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundz__sse_addsub()
52 const __m128 vadjustment = _mm_and_ps(vone, _mm_cmpgt_ps(vrndabsx, vabsx)); in xnn_math_f32_roundz__sse_addsub()
Droundne-wasmsimd-addsub.c36 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); in xnn_math_f32_roundne__wasmsimd_addsub() local
41 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_gt(vabsx, vmagic_number)); in xnn_math_f32_roundne__wasmsimd_addsub()
45 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundne__wasmsimd_addsub()
Droundne-sse-addsub.c36 const __m128 vabsx = _mm_and_ps(vx, vnonsign_mask); in xnn_math_f32_roundne__sse_addsub() local
41 const __m128 vrndmask = _mm_andnot_ps(_mm_cmpge_ps(vabsx, vmagic_number), vnonsign_mask); in xnn_math_f32_roundne__sse_addsub()
45 const __m128 vrndabsx = _mm_sub_ps(_mm_add_ps(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundne__sse_addsub()

12