Home
last modified time | relevance | path

Searched refs:vw01234567 (Results 1 – 5 of 5) sorted by relevance

/external/XNNPACK/src/f16-prelu/gen/
Dneonfp16arith-2x16.c48 const float16x8_t vw01234567 = vld1q_f16(w); w += 8; in xnn_f16_prelu_ukernel__neonfp16arith_2x16() local
56 float16x8_t vacc0x001234567 = vmulq_f16(vi0x001234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x16()
60 float16x8_t vacc1x001234567 = vmulq_f16(vi1x001234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x16()
76 const float16x8_t vw01234567 = vld1q_f16(w); w += 8; in xnn_f16_prelu_ukernel__neonfp16arith_2x16() local
83 float16x8_t vacc0x01234567 = vmulq_f16(vi0x01234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x16()
85 float16x8_t vacc1x01234567 = vmulq_f16(vi1x01234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x16()
95 const float16x8_t vw01234567 = vld1q_f16(w); w += 8; in xnn_f16_prelu_ukernel__neonfp16arith_2x16() local
102 float16x8_t vacc0x01234567 = vmulq_f16(vi0x01234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x16()
104 float16x8_t vacc1x01234567 = vmulq_f16(vi1x01234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x16()
Dneonfp16arith-2x8.c48 const float16x8_t vw01234567 = vld1q_f16(w); w += 8; in xnn_f16_prelu_ukernel__neonfp16arith_2x8() local
53 float16x8_t vacc0x001234567 = vmulq_f16(vi0x001234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x8()
55 float16x8_t vacc1x001234567 = vmulq_f16(vi1x001234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x8()
65 const float16x8_t vw01234567 = vld1q_f16(w); w += 8; in xnn_f16_prelu_ukernel__neonfp16arith_2x8() local
72 float16x8_t vacc0x01234567 = vmulq_f16(vi0x01234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x8()
74 float16x8_t vacc1x01234567 = vmulq_f16(vi1x01234567, vw01234567); in xnn_f16_prelu_ukernel__neonfp16arith_2x8()
/external/XNNPACK/src/f32-prelu/gen/
Davx-2x8.c49 const __m256 vw01234567 = _mm256_load_ps(w); in xnn_f32_prelu_ukernel__avx_2x8() local
57 const __m256 vprod0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567); in xnn_f32_prelu_ukernel__avx_2x8()
58 const __m256 vprod1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567); in xnn_f32_prelu_ukernel__avx_2x8()
Davx-2x16.c49 const __m256 vw01234567 = _mm256_load_ps(w); in xnn_f32_prelu_ukernel__avx_2x16() local
60 const __m256 vprod0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567); in xnn_f32_prelu_ukernel__avx_2x16()
62 const __m256 vprod1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567); in xnn_f32_prelu_ukernel__avx_2x16()
/external/XNNPACK/src/f16-prelu/
Dneonfp16arith.c.in77 const float16x8_t vw01234567 = vld1q_f16(w); w += 8; variable
84 float16x8_t vacc${M}x01234567 = vmulq_f16(vi${M}x01234567, vw01234567);
94 const float16x8_t vw01234567 = vld1q_f16(w); w += 8; variable
101 float16x8_t vacc${M}x01234567 = vmulq_f16(vi${M}x01234567, vw01234567);