Home
last modified time | relevance | path

Searched refs:vrndabsx (Results 1 – 25 of 31) sorted by relevance

12

/external/XNNPACK/src/f32-vrnd/
Dvrndne-neon.c.in37 float32x4_t vrndabsx${ABC[N:N+4]} = vaddq_f32(vabsx${ABC[N:N+4]}, vmagic_number);
43 vrndabsx${ABC[N:N+4]} = vsubq_f32(vrndabsx${ABC[N:N+4]}, vmagic_number);
46 …t32x4_t vy${ABC[N:N+4]} = vbslq_f32(vrndmask${ABC[N:N+4]}, vx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]});
56 float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number); variable
58 vrndabsx = vsubq_f32(vrndabsx, vmagic_number);
59 const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx);
66 float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number); variable
68 vrndabsx = vsubq_f32(vrndabsx, vmagic_number);
69 const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx);
Dvrndz-wasmsimd-addsub.c.in44 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
47 …justment${ABC[N:N+4]} = wasm_v128_and(wasm_f32x4_lt(vabsx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]}), vo…
50 …const v128_t vflrabsx${ABC[N:N+4]} = wasm_f32x4_sub(vrndabsx${ABC[N:N+4]}, vadjustment${ABC[N:N+4]…
66 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
67 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone);
68 const v128_t vflrabsx = wasm_f32x4_sub(vrndabsx, vadjustment);
79 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
80 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone);
81 const v128_t vflrabsx = wasm_f32x4_sub(vrndabsx, vadjustment);
Dvrndne-wasmsimd-addsub.c.in43 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
46 …const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(vx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]}, vrndmas…
59 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
60 const v128_t vy = wasm_v128_bitselect(vx, vrndabsx, vrndmask);
70 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
71 v128_t vy = wasm_v128_bitselect(vx, vrndabsx, vrndmask);
Dvrndd-wasmsimd-addsub.c.in44 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
47 …const v128_t vrndx${ABC[N:N+4]} = wasm_v128_bitselect(vx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]}, vrnd…
63 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
64 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask);
75 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
76 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask);
Dvrndu-wasmsimd-addsub.c.in44 …const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_numb…
47 …const v128_t vrndx${ABC[N:N+4]} = wasm_v128_bitselect(vx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]}, vrnd…
69 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
70 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask);
83 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); variable
84 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask);
/external/XNNPACK/src/f32-vrnd/gen/
Dvrndne-neon-x8.c57 float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number); in xnn_f32_vrndne_ukernel__neon_x8() local
59 vrndabsx = vsubq_f32(vrndabsx, vmagic_number); in xnn_f32_vrndne_ukernel__neon_x8()
60 const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx); in xnn_f32_vrndne_ukernel__neon_x8()
67 float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number); in xnn_f32_vrndne_ukernel__neon_x8() local
69 vrndabsx = vsubq_f32(vrndabsx, vmagic_number); in xnn_f32_vrndne_ukernel__neon_x8()
70 const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx); in xnn_f32_vrndne_ukernel__neon_x8()
Dvrndz-wasmsimd-addsub-x4.c37 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4() local
38 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
39 const v128_t vflrabsx = wasm_f32x4_sub(vrndabsx, vadjustment); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
50 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4() local
51 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
52 const v128_t vflrabsx = wasm_f32x4_sub(vrndabsx, vadjustment); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x4()
Dvrndz-wasmsimd-addsub-x8.c64 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8() local
65 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
66 const v128_t vflrabsx = wasm_f32x4_sub(vrndabsx, vadjustment); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
77 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8() local
78 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_lt(vabsx, vrndabsx), vone); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
79 const v128_t vflrabsx = wasm_f32x4_sub(vrndabsx, vadjustment); in xnn_f32_vrndz_ukernel__wasmsimd_addsub_x8()
Dvrndne-wasmsimd-addsub-x4.c36 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4() local
37 const v128_t vy = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4()
47 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4() local
48 v128_t vy = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x4()
Dvrndne-neon-x4.c49 float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number); in xnn_f32_vrndne_ukernel__neon_x4() local
51 vrndabsx = vsubq_f32(vrndabsx, vmagic_number); in xnn_f32_vrndne_ukernel__neon_x4()
52 const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx); in xnn_f32_vrndne_ukernel__neon_x4()
Dvrndd-wasmsimd-addsub-x4.c37 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4() local
38 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4()
49 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4() local
50 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x4()
Dvrndu-wasmsimd-addsub-x4.c37 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4() local
38 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4()
51 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4() local
52 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x4()
Dvrndne-wasmsimd-addsub-x8.c57 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8() local
58 const v128_t vy = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8()
68 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8() local
69 v128_t vy = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndne_ukernel__wasmsimd_addsub_x8()
Dvrndd-wasmsimd-addsub-x8.c61 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8() local
62 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8()
73 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8() local
74 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndd_ukernel__wasmsimd_addsub_x8()
Dvrndu-wasmsimd-addsub-x8.c67 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8() local
68 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8()
81 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8() local
82 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_f32_vrndu_ukernel__wasmsimd_addsub_x8()
/external/XNNPACK/src/math/
Droundz-scalar-addsub.c39 const float vrndabsx = (vabsx + vmagic_number) - vmagic_number; in xnn_math_f32_roundz__scalar_addsub() local
43 const float vflrabsx = XNN_UNPREDICTABLE(vrndabsx <= vabsx) ? vrndabsx : vrndabsx - vone; in xnn_math_f32_roundz__scalar_addsub()
Droundz-neon-addsub.c48 const float32x4_t vrndabsx = vsubq_f32(vaddq_f32(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundz__neon_addsub() local
52 …const float32x4_t vadjustment = vreinterpretq_f32_u32(vandq_u32(vone, vcgtq_f32(vrndabsx, vabsx))); in xnn_math_f32_roundz__neon_addsub()
55 const float32x4_t vflrabsx = vsubq_f32(vrndabsx, vadjustment); in xnn_math_f32_roundz__neon_addsub()
Droundz-wasmsimd-addsub.c48 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundz__wasmsimd_addsub() local
52 const v128_t vadjustment = wasm_v128_and(wasm_f32x4_gt(vrndabsx, vabsx), vone); in xnn_math_f32_roundz__wasmsimd_addsub()
55 const v128_t vflrabsx = wasm_f32x4_sub(vrndabsx, vadjustment); in xnn_math_f32_roundz__wasmsimd_addsub()
Droundz-sse-addsub.c48 const __m128 vrndabsx = _mm_sub_ps(_mm_add_ps(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundz__sse_addsub() local
52 const __m128 vadjustment = _mm_and_ps(vone, _mm_cmpgt_ps(vrndabsx, vabsx)); in xnn_math_f32_roundz__sse_addsub()
55 const __m128 vflrabsx = _mm_sub_ps(vrndabsx, vadjustment); in xnn_math_f32_roundz__sse_addsub()
Droundne-scalar-addsub.c36 const float vrndabsx = (vabsx + vmagic_number) - vmagic_number; in xnn_math_f32_roundne__scalar_addsub() local
42 const float vabsy = XNN_UNPREDICTABLE(vabsx >= vmagic_number) ? vabsx : vrndabsx; in xnn_math_f32_roundne__scalar_addsub()
Droundd-scalar-addsub.c44 const float vrndabsx = XNN_UNPREDICTABLE(vabsx >= vmagic_number) ? vabsx : vprerndabsx; in xnn_math_f32_roundd__scalar_addsub() local
46 const float vrndx = copysignf(vrndabsx, vx); in xnn_math_f32_roundd__scalar_addsub()
Droundu-scalar-addsub.c44 const float vrndabsx = XNN_UNPREDICTABLE(vabsx >= vmagic_number) ? vabsx : vprerndabsx; in xnn_math_f32_roundu__scalar_addsub() local
46 const float vrndx = copysignf(vrndabsx, vx); in xnn_math_f32_roundu__scalar_addsub()
Droundne-neon-addsub.c45 const float32x4_t vrndabsx = vsubq_f32(vaddq_f32(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundne__neon_addsub() local
51 const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx); in xnn_math_f32_roundne__neon_addsub()
Droundne-wasmsimd-addsub.c45 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundne__wasmsimd_addsub() local
51 const v128_t vy = wasm_v128_bitselect(vx, vrndabsx, vrndmask); in xnn_math_f32_roundne__wasmsimd_addsub()
Droundne-sse-addsub.c45 const __m128 vrndabsx = _mm_sub_ps(_mm_add_ps(vabsx, vmagic_number), vmagic_number); in xnn_math_f32_roundne__sse_addsub() local
51 const __m128 vy = _mm_or_ps(_mm_and_ps(vrndabsx, vrndmask), _mm_andnot_ps(vrndmask, vx)); in xnn_math_f32_roundne__sse_addsub()

12