/external/XNNPACK/src/f16-vbinary/gen/ |
D | vsqrdiffc-f16c-x16.c | 39 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vsqrdiffc_ukernel__f16c_x16() local
|
D | vminc-f16c-x16.c | 39 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vminc_ukernel__f16c_x16() local
|
D | vsqrdiffc-neonfp16arith-x16.c | 39 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16() local
|
D | vminc-neonfp16arith-x16.c | 39 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vminc_ukernel__neonfp16arith_x16() local
|
D | vmaxc-f16c-x16.c | 39 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vmaxc_ukernel__f16c_x16() local
|
D | vmaxc-neonfp16arith-x16.c | 39 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vmaxc_ukernel__neonfp16arith_x16() local
|
D | vrdivc-minmax-neonfp16arith-x16.c | 41 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vrdivc_minmax_ukernel__neonfp16arith_x16() local
|
D | vaddc-minmax-neonfp16arith-x16.c | 41 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vaddc_minmax_ukernel__neonfp16arith_x16() local
|
D | vmulc-minmax-neonfp16arith-x16.c | 41 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x16() local
|
D | vrdivc-minmax-f16c-x16.c | 41 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vrdivc_minmax_ukernel__f16c_x16() local
|
D | vrsubc-minmax-f16c-x16.c | 41 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vrsubc_minmax_ukernel__f16c_x16() local
|
D | vmulc-minmax-f16c-x16.c | 41 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vmulc_minmax_ukernel__f16c_x16() local
|
D | vaddc-minmax-f16c-x16.c | 41 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vaddc_minmax_ukernel__f16c_x16() local
|
D | vsubc-minmax-f16c-x16.c | 41 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vsubc_minmax_ukernel__f16c_x16() local
|
D | vrsubc-minmax-neonfp16arith-x16.c | 41 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vrsubc_minmax_ukernel__neonfp16arith_x16() local
|
D | vsubc-minmax-neonfp16arith-x16.c | 41 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vsubc_minmax_ukernel__neonfp16arith_x16() local
|
D | vdivc-minmax-f16c-x16.c | 41 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vdivc_minmax_ukernel__f16c_x16() local
|
D | vdivc-minmax-neonfp16arith-x16.c | 41 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vdivc_minmax_ukernel__neonfp16arith_x16() local
|
D | vsqrdiff-neonfp16arith-x16.c | 39 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16() local
|
D | vmin-neonfp16arith-x16.c | 39 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vmin_ukernel__neonfp16arith_x16() local
|
D | vmin-f16c-x16.c | 39 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vmin_ukernel__f16c_x16() local
|
D | vmax-neonfp16arith-x16.c | 39 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vmax_ukernel__neonfp16arith_x16() local
|
D | vsqrdiff-f16c-x16.c | 39 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vsqrdiff_ukernel__f16c_x16() local
|
D | vmax-f16c-x16.c | 39 const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8))); in xnn_f16_vmax_ukernel__f16c_x16() local
|
D | vadd-minmax-neonfp16arith-x16.c | 41 const float16x8_t va456789AB = vld1q_f16(a); a += 8; in xnn_f16_vadd_minmax_ukernel__neonfp16arith_x16() local
|