/external/XNNPACK/src/f32-vlrelu/ |
D | sse.c.in | 34 __m128 vx${ABC[0:4]} = _mm_loadu_ps(x); 36 __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N}); 38 const __m128 vx${ABC[0:4]} = _mm_loadu_ps(x); 40 const __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N}); 45 __m128 vacc${ABC[N:N+4]} = _mm_max_ps(_mm_setzero_ps(), vx${ABC[N:N+4]}); 46 vx${ABC[N:N+4]} = _mm_min_ps(vx${ABC[N:N+4]}, vzero); 48 __m128 vacc${ABC[N:N+4]} = _mm_mul_ps(vx${ABC[N:N+4]}, vslope); 50 …:N+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx${ABC[N:N+4]}))); 54 vacc${ABC[N:N+4]} = _mm_add_ps(vacc${ABC[N:N+4]}, _mm_mul_ps(vx${ABC[N:N+4]}, vslope)); 56 …and_ps(vacc${ABC[N:N+4]}, vmask${ABC[N:N+4]}), _mm_andnot_ps(vmask${ABC[N:N+4]}, vx${ABC[N:N+4]})); [all …]
|
D | scalar.c.in | 28 const float vx${ABC[N]} = x[${N}]; 32 float vacc${ABC[N]} = vx${ABC[N]} * vslope; 35 vacc${ABC[N]} = XNN_UNPREDICTABLE(vx${ABC[N]} < 0.0f) ? vacc${ABC[N]} : vx${ABC[N]}; 44 const float vx = *x++; variable 45 float vacc = vx * vslope; 46 vacc = XNN_UNPREDICTABLE(vx < 0.0f) ? vacc : vx; 51 const float vx = *x; 52 float vacc = vx * vslope; 53 vacc = XNN_UNPREDICTABLE(vx < 0.0f) ? vacc : vx; 58 const float vx = *x++; variable [all …]
|
D | wasmsimd-minmax.c.in | 30 v128_t vx${ABC[0:4]} = wasm_v128_load(x); 32 v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N}); 36 v128_t vacc${ABC[N:N+4]} = wasm_i32x4_max(vx${ABC[N:N+4]}, vzero); 37 vx${ABC[N:N+4]} = wasm_i32x4_min(vx${ABC[N:N+4]}, vzero); 40 … vacc${ABC[N:N+4]} = wasm_f32x4_add(vacc${ABC[N:N+4]}, wasm_f32x4_mul(vx${ABC[N:N+4]}, vslope)); 48 v128_t vx = wasm_v128_load(x); variable 50 v128_t vacc = wasm_i32x4_max(vx, vzero); 51 vx = wasm_i32x4_min(vx, vzero); 52 vacc = wasm_f32x4_add(vacc, wasm_f32x4_mul(vx, vslope)); 57 v128_t vx = wasm_v128_load(x); variable [all …]
|
D | wasmsimd-bitselect.c.in | 30 const v128_t vx${ABC[0:4]} = wasm_v128_load(x); 32 const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N}); 36 v128_t vacc${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vslope); 37 const v128_t vmask${ABC[N:N+4]} = wasm_i32x4_lt(vx${ABC[N:N+4]}, vzero); 40 … vacc${ABC[N:N+4]} = wasm_v128_bitselect(vacc${ABC[N:N+4]}, vx${ABC[N:N+4]}, vmask${ABC[N:N+4]}); 48 const v128_t vx = wasm_v128_load(x); variable 50 v128_t vacc = wasm_f32x4_mul(vx, vslope); 51 const v128_t vmask = wasm_i32x4_lt(vx, vzero); 52 vacc = wasm_v128_bitselect(vacc, vx, vmask); 57 const v128_t vx = wasm_v128_load(x); variable [all …]
|
D | neon.c.in | 30 const float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4; 33 float32x4_t vacc${ABC[N:N+4]} = vmulq_f32(vx${ABC[N:N+4]}, vslope); 34 …const uint32x4_t vmask${ABC[N:N+4]} = vcltq_s32(vreinterpretq_s32_f32(vx${ABC[N:N+4]}), vmovq_n_s3… 37 vacc${ABC[N:N+4]} = vbslq_f32(vmask${ABC[N:N+4]}, vacc${ABC[N:N+4]}, vx${ABC[N:N+4]}); 44 const float32x4_t vx = vld1q_f32(x); x += 4; variable 45 float32x4_t vacc = vmulq_f32(vx, vslope); 46 const uint32x4_t vmask = vcltq_s32(vreinterpretq_s32_f32(vx), vmovq_n_s32(0)); 47 vacc = vbslq_f32(vmask, vacc, vx); 51 const float32x4_t vx = vld1q_f32(x); variable 52 float32x4_t vacc = vmulq_f32(vx, vslope); [all …]
|
/external/XNNPACK/src/f32-vlrelu/gen/ |
D | vlrelu-wasmsimd-minmax-x4.c | 30 v128_t vx = wasm_v128_load(x); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4() local 32 v128_t vacc = wasm_i32x4_max(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4() 33 vx = wasm_i32x4_min(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4() 34 vacc = wasm_f32x4_add(vacc, wasm_f32x4_mul(vx, vslope)); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4() 39 v128_t vx = wasm_v128_load(x); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4() local 40 v128_t vacc = wasm_i32x4_max(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4() 41 vx = wasm_i32x4_min(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4() 42 vacc = wasm_f32x4_add(vacc, wasm_f32x4_mul(vx, vslope)); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x4()
|
D | vlrelu-wasmsimd-minmax-x8.c | 47 v128_t vx = wasm_v128_load(x); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8() local 49 v128_t vacc = wasm_i32x4_max(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8() 50 vx = wasm_i32x4_min(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8() 51 vacc = wasm_f32x4_add(vacc, wasm_f32x4_mul(vx, vslope)); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8() 56 v128_t vx = wasm_v128_load(x); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8() local 57 v128_t vacc = wasm_i32x4_max(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8() 58 vx = wasm_i32x4_min(vx, vzero); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8() 59 vacc = wasm_f32x4_add(vacc, wasm_f32x4_mul(vx, vslope)); in xnn_f32_vlrelu_ukernel__wasmsimd_minmax_x8()
|
D | vlrelu-sse-x8.c | 47 __m128 vx = _mm_loadu_ps(x); in xnn_f32_vlrelu_ukernel__sse_x8() local 50 __m128 vacc = _mm_max_ps(_mm_setzero_ps(), vx); in xnn_f32_vlrelu_ukernel__sse_x8() 51 vx = _mm_min_ps(vx, vzero); in xnn_f32_vlrelu_ukernel__sse_x8() 52 vacc = _mm_add_ps(vacc, _mm_mul_ps(vx, vslope)); in xnn_f32_vlrelu_ukernel__sse_x8() 58 __m128 vx = _mm_loadu_ps(x); in xnn_f32_vlrelu_ukernel__sse_x8() local 60 __m128 vacc = _mm_max_ps(_mm_setzero_ps(), vx); in xnn_f32_vlrelu_ukernel__sse_x8() 61 vx = _mm_min_ps(vx, vzero); in xnn_f32_vlrelu_ukernel__sse_x8() 62 vacc = _mm_add_ps(vacc, _mm_mul_ps(vx, vslope)); in xnn_f32_vlrelu_ukernel__sse_x8()
|
/external/XNNPACK/src/f32-hswish/ |
D | wasmsimd.c.in | 33 v128_t vx${ABC[0:4]} = wasm_v128_load(x); 35 v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N}); 39 v128_t vacc${ABC[N:N+4]} = wasm_f32x4_add(vx${ABC[N:N+4]}, vthree); 40 vx${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vsixth); 49 vacc${ABC[N:N+4]} = wasm_f32x4_mul(vacc${ABC[N:N+4]}, vx${ABC[N:N+4]}); 57 v128_t vx = wasm_v128_load(x); variable 60 v128_t vacc = wasm_f32x4_add(vx, vthree); 61 vx = wasm_f32x4_mul(vx, vsixth); 64 vacc = wasm_f32x4_mul(vacc, vx); 70 v128_t vx = wasm_v128_load(x); variable [all …]
|
D | scalar.c.in | 36 float vx${ABC[N]} = x[${N}]; 40 float vacc${ABC[N]} = vx${ABC[N]} + vthree; 41 vx${ABC[N]} *= vsixth; 50 vacc${ABC[N]} *= vx${ABC[N]}; 59 float vx = *x++; 60 float vacc = vx + vthree; 61 vx *= vsixth; 64 vacc *= vx; 69 float vx = *x; 70 float vacc = vx + vthree; [all …]
|
D | neon.c.in | 34 float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4; 37 float32x4_t vacc${ABC[N:N+4]} = vaddq_f32(vx${ABC[N:N+4]}, vthree); 38 vx${ABC[N:N+4]} = vmulq_f32(vx${ABC[N:N+4]}, vsixth); 47 vacc${ABC[N:N+4]} = vmulq_f32(vacc${ABC[N:N+4]}, vx${ABC[N:N+4]}); 53 float32x4_t vx = vld1q_f32(x); x += 4; variable 54 float32x4_t vacc = vaddq_f32(vx, vthree); 55 vx = vmulq_f32(vx, vsixth); 58 vacc = vmulq_f32(vacc, vx); 62 float32x4_t vx = vld1q_f32(x); variable 63 float32x4_t vacc = vaddq_f32(vx, vthree); [all …]
|
/external/angle/src/tests/compiler_tests/ |
D | ShaderVariable_test.cpp | 184 ShaderVariable vx; in TEST() local 185 vx.type = GL_FLOAT; in TEST() 186 vx.precision = GL_MEDIUM_FLOAT; in TEST() 187 vx.name = "vary"; in TEST() 188 vx.mappedName = "m_vary"; in TEST() 189 vx.staticUse = true; in TEST() 190 vx.isInvariant = true; in TEST() 202 EXPECT_FALSE(vx.isSameVaryingAtLinkTime(fx)); in TEST() 203 EXPECT_FALSE(vx.isSameVaryingAtLinkTime(fx, 100)); in TEST() 205 EXPECT_TRUE(vx.isSameVaryingAtLinkTime(fx, 300)); in TEST() [all …]
|
/external/XNNPACK/src/f32-hswish/gen/ |
D | hswish-wasmsimd-x4.c | 33 v128_t vx = wasm_v128_load(x); in xnn_f32_hswish_ukernel__wasmsimd_x4() local 36 v128_t vacc = wasm_f32x4_add(vx, vthree); in xnn_f32_hswish_ukernel__wasmsimd_x4() 37 vx = wasm_f32x4_mul(vx, vsixth); in xnn_f32_hswish_ukernel__wasmsimd_x4() 40 vacc = wasm_f32x4_mul(vacc, vx); in xnn_f32_hswish_ukernel__wasmsimd_x4() 46 v128_t vx = wasm_v128_load(x); in xnn_f32_hswish_ukernel__wasmsimd_x4() local 48 v128_t vacc = wasm_f32x4_add(vx, vthree); in xnn_f32_hswish_ukernel__wasmsimd_x4() 49 vx = wasm_f32x4_mul(vx, vsixth); in xnn_f32_hswish_ukernel__wasmsimd_x4() 52 vacc = wasm_f32x4_mul(vacc, vx); in xnn_f32_hswish_ukernel__wasmsimd_x4()
|
D | hswish-neon-x4.c | 33 float32x4_t vx = vld1q_f32(x); x += 4; in xnn_f32_hswish_ukernel__neon_x4() local 34 float32x4_t vacc = vaddq_f32(vx, vthree); in xnn_f32_hswish_ukernel__neon_x4() 35 vx = vmulq_f32(vx, vsixth); in xnn_f32_hswish_ukernel__neon_x4() 38 vacc = vmulq_f32(vacc, vx); in xnn_f32_hswish_ukernel__neon_x4() 42 float32x4_t vx = vld1q_f32(x); in xnn_f32_hswish_ukernel__neon_x4() local 43 float32x4_t vacc = vaddq_f32(vx, vthree); in xnn_f32_hswish_ukernel__neon_x4() 44 vx = vmulq_f32(vx, vsixth); in xnn_f32_hswish_ukernel__neon_x4() 47 vacc = vmulq_f32(vacc, vx); in xnn_f32_hswish_ukernel__neon_x4()
|
D | hswish-wasmsimd-x8.c | 56 v128_t vx = wasm_v128_load(x); in xnn_f32_hswish_ukernel__wasmsimd_x8() local 59 v128_t vacc = wasm_f32x4_add(vx, vthree); in xnn_f32_hswish_ukernel__wasmsimd_x8() 60 vx = wasm_f32x4_mul(vx, vsixth); in xnn_f32_hswish_ukernel__wasmsimd_x8() 63 vacc = wasm_f32x4_mul(vacc, vx); in xnn_f32_hswish_ukernel__wasmsimd_x8() 69 v128_t vx = wasm_v128_load(x); in xnn_f32_hswish_ukernel__wasmsimd_x8() local 71 v128_t vacc = wasm_f32x4_add(vx, vthree); in xnn_f32_hswish_ukernel__wasmsimd_x8() 72 vx = wasm_f32x4_mul(vx, vsixth); in xnn_f32_hswish_ukernel__wasmsimd_x8() 75 vacc = wasm_f32x4_mul(vacc, vx); in xnn_f32_hswish_ukernel__wasmsimd_x8()
|
/external/XNNPACK/src/f32-vrnd/gen/ |
D | vrndd-wasmsimd-cvt-x4.c | 32 const v128_t vx = wasm_v128_load(x); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() local 35 const v128_t vintx = wasm_i32x4_trunc_saturate_f32x4(vx); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() 36 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() 39 const v128_t vrndx = wasm_v128_bitselect(vprerndx, vx, vrndmask); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() 40 const v128_t vadj = wasm_v128_and(wasm_f32x4_lt(vx, vrndx), vone); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() 47 const v128_t vx = wasm_v128_load(x); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() local 49 const v128_t vintx = wasm_i32x4_trunc_saturate_f32x4(vx); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() 50 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() 53 const v128_t vrndx = wasm_v128_bitselect(vprerndx, vx, vrndmask); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4() 54 const v128_t vadj = wasm_v128_and(wasm_f32x4_lt(vx, vrndx), vone); in xnn_f32_vrndd_ukernel__wasmsimd_cvt_x4()
|
D | vrndu-wasmsimd-cvt-x4.c | 32 const v128_t vx = wasm_v128_load(x); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() local 35 const v128_t vintx = wasm_i32x4_trunc_saturate_f32x4(vx); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() 36 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() 39 const v128_t vrndx = wasm_v128_bitselect(vprerndx, vx, vrndmask); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() 40 const v128_t vadjmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vx, vrndx)); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() 48 const v128_t vx = wasm_v128_load(x); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() local 50 const v128_t vintx = wasm_i32x4_trunc_saturate_f32x4(vx); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() 51 const v128_t vabsx = wasm_f32x4_abs(vx); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() 54 const v128_t vrndx = wasm_v128_bitselect(vprerndx, vx, vrndmask); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4() 55 const v128_t vadjmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vx, vrndx)); in xnn_f32_vrndu_ukernel__wasmsimd_cvt_x4()
|
/external/llvm-project/libcxx/test/libcxx/atomics/ |
D | diagnose_invalid_memory_order.verify.cpp | 24 volatile std::atomic<int>& vx = x; in main() local 31 …vx.load(std::memory_order_release); // expected-warning {{memory order argument to atomic operatio… in main() 32 …vx.load(std::memory_order_acq_rel); // expected-warning {{memory order argument to atomic operatio… in main() 42 …std::atomic_load_explicit(&vx, std::memory_order_release); // expected-warning {{memory order argu… in main() 43 …std::atomic_load_explicit(&vx, std::memory_order_acq_rel); // expected-warning {{memory order argu… in main() 55 …vx.store(42, std::memory_order_consume); // expected-warning {{memory order argument to atomic ope… in main() 56 …vx.store(42, std::memory_order_acquire); // expected-warning {{memory order argument to atomic ope… in main() 57 …vx.store(42, std::memory_order_acq_rel); // expected-warning {{memory order argument to atomic ope… in main() 67 …std::atomic_store_explicit(&vx, 42, std::memory_order_consume); // expected-warning {{memory order… in main() 68 …std::atomic_store_explicit(&vx, 42, std::memory_order_acquire); // expected-warning {{memory order… in main() [all …]
|
/external/libcxx/test/libcxx/atomics/ |
D | diagnose_invalid_memory_order.fail.cpp | 25 volatile std::atomic<int>& vx = x; in main() local 32 …vx.load(std::memory_order_release); // expected-warning {{memory order argument to atomic operatio… in main() 33 …vx.load(std::memory_order_acq_rel); // expected-warning {{memory order argument to atomic operatio… in main() 43 …std::atomic_load_explicit(&vx, std::memory_order_release); // expected-warning {{memory order argu… in main() 44 …std::atomic_load_explicit(&vx, std::memory_order_acq_rel); // expected-warning {{memory order argu… in main() 56 …vx.store(42, std::memory_order_consume); // expected-warning {{memory order argument to atomic ope… in main() 57 …vx.store(42, std::memory_order_acquire); // expected-warning {{memory order argument to atomic ope… in main() 58 …vx.store(42, std::memory_order_acq_rel); // expected-warning {{memory order argument to atomic ope… in main() 68 …std::atomic_store_explicit(&vx, 42, std::memory_order_consume); // expected-warning {{memory order… in main() 69 …std::atomic_store_explicit(&vx, 42, std::memory_order_acquire); // expected-warning {{memory order… in main() [all …]
|
/external/kotlinx.coroutines/js/example-frontend-js/src/ |
D | ExampleMain.kt | 100 var vx = speed in onRect() variable 106 x += vx * dt in onRect() 110 vx = -vx in onRect() 114 vx = -vx in onRect() 128 val t = vx in onRect() 130 vx = vy in onRect() 133 vx = -vy in onRect() 155 var vx = sin(initialAngle) * initialSpeed in onCircle() variable 164 vx += dx / dn * acceleration * dt in onCircle() 166 val vn = sqrt(vx * vx + vy * vy) in onCircle() [all …]
|
/external/llvm-project/llvm/test/MC/RISCV/rvv/ |
D | macc.s | 23 vmacc.vx v8, a0, v4, v0.t 24 # CHECK-INST: vmacc.vx v8, a0, v4, v0.t 29 vmacc.vx v8, a0, v4 30 # CHECK-INST: vmacc.vx v8, a0, v4 47 vnmsac.vx v8, a0, v4, v0.t 48 # CHECK-INST: vnmsac.vx v8, a0, v4, v0.t 53 vnmsac.vx v8, a0, v4 54 # CHECK-INST: vnmsac.vx v8, a0, v4 71 vmadd.vx v8, a0, v4, v0.t 72 # CHECK-INST: vmadd.vx v8, a0, v4, v0.t [all …]
|
D | mul.s | 23 vmul.vx v8, v4, a0, v0.t 24 # CHECK-INST: vmul.vx v8, v4, a0, v0.t 29 vmul.vx v8, v4, a0 30 # CHECK-INST: vmul.vx v8, v4, a0 47 vmulh.vx v8, v4, a0, v0.t 48 # CHECK-INST: vmulh.vx v8, v4, a0, v0.t 53 vmulh.vx v8, v4, a0 54 # CHECK-INST: vmulh.vx v8, v4, a0 71 vmulhu.vx v8, v4, a0, v0.t 72 # CHECK-INST: vmulhu.vx v8, v4, a0, v0.t [all …]
|
/external/XNNPACK/src/f16-hswish/ |
D | neonfp16arith.c.in | 37 float16x8_t vx${ABC[N:N+8]} = vld1q_f16(x); x += 8; 40 float16x8_t vacc${ABC[N:N+8]} = vaddq_f16(vx${ABC[N:N+8]}, vthree); 41 vx${ABC[N:N+8]} = vmulq_f16(vx${ABC[N:N+8]}, vsixth); 50 vacc${ABC[N:N+8]} = vmulq_f16(vacc${ABC[N:N+8]}, vx${ABC[N:N+8]}); 56 float16x8_t vx = vld1q_f16(x); x += 8; variable 57 float16x8_t vacc = vaddq_f16(vx, vthree); 58 vx = vmulq_f16(vx, vsixth); 61 vacc = vmulq_f16(vacc, vx); 65 float16x8_t vx = vld1q_f16(x); variable 66 float16x8_t vacc = vaddq_f16(vx, vthree); [all …]
|
/external/XNNPACK/src/f16-hswish/gen/ |
D | hswish-neonfp16arith-x8.c | 36 float16x8_t vx = vld1q_f16(x); x += 8; in xnn_f16_hswish_ukernel__neonfp16arith_x8() local 37 float16x8_t vacc = vaddq_f16(vx, vthree); in xnn_f16_hswish_ukernel__neonfp16arith_x8() 38 vx = vmulq_f16(vx, vsixth); in xnn_f16_hswish_ukernel__neonfp16arith_x8() 41 vacc = vmulq_f16(vacc, vx); in xnn_f16_hswish_ukernel__neonfp16arith_x8() 45 float16x8_t vx = vld1q_f16(x); in xnn_f16_hswish_ukernel__neonfp16arith_x8() local 46 float16x8_t vacc = vaddq_f16(vx, vthree); in xnn_f16_hswish_ukernel__neonfp16arith_x8() 47 vx = vmulq_f16(vx, vsixth); in xnn_f16_hswish_ukernel__neonfp16arith_x8() 50 vacc = vmulq_f16(vacc, vx); in xnn_f16_hswish_ukernel__neonfp16arith_x8()
|
/external/XNNPACK/src/f32-vrnd/ |
D | vrndd-wasmsimd-cvt.c.in | 32 const v128_t vx${ABC[0:4]} = wasm_v128_load(x); 34 const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N}); 38 const v128_t vintx${ABC[N:N+4]} = wasm_i32x4_trunc_saturate_f32x4(vx${ABC[N:N+4]}); 39 const v128_t vabsx${ABC[N:N+4]} = wasm_f32x4_abs(vx${ABC[N:N+4]}); 46 …const v128_t vrndx${ABC[N:N+4]} = wasm_v128_bitselect(vprerndx${ABC[N:N+4]}, vx${ABC[N:N+4]}, vrnd… 49 …const v128_t vadj${ABC[N:N+4]} = wasm_v128_and(wasm_f32x4_lt(vx${ABC[N:N+4]}, vrndx${ABC[N:N+4]}),… 60 const v128_t vx = wasm_v128_load(x); variable 63 const v128_t vintx = wasm_i32x4_trunc_saturate_f32x4(vx); 64 const v128_t vabsx = wasm_f32x4_abs(vx); 67 const v128_t vrndx = wasm_v128_bitselect(vprerndx, vx, vrndmask); [all …]
|