| /external/XNNPACK/src/f16-vbinary/gen/ |
| D | vmaxc-neonfp16arith-x16.c | 38 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmaxc_ukernel__neonfp16arith_x16() local 50 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmaxc_ukernel__neonfp16arith_x16() local 56 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmaxc_ukernel__neonfp16arith_x16() local
|
| D | vsqrdiffc-neonfp16arith-x16.c | 38 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16() local 52 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16() local 59 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16() local
|
| D | vminc-neonfp16arith-x16.c | 38 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vminc_ukernel__neonfp16arith_x16() local 50 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vminc_ukernel__neonfp16arith_x16() local 56 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vminc_ukernel__neonfp16arith_x16() local
|
| D | vdivc-minmax-neonfp16arith-x16.c | 40 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vdivc_minmax_ukernel__neonfp16arith_x16() local 57 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vdivc_minmax_ukernel__neonfp16arith_x16() local 65 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vdivc_minmax_ukernel__neonfp16arith_x16() local
|
| D | vrsubc-minmax-neonfp16arith-x16.c | 40 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vrsubc_minmax_ukernel__neonfp16arith_x16() local 57 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vrsubc_minmax_ukernel__neonfp16arith_x16() local 65 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vrsubc_minmax_ukernel__neonfp16arith_x16() local
|
| D | vsubc-minmax-neonfp16arith-x16.c | 40 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsubc_minmax_ukernel__neonfp16arith_x16() local 57 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsubc_minmax_ukernel__neonfp16arith_x16() local 65 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vsubc_minmax_ukernel__neonfp16arith_x16() local
|
| D | vmulc-minmax-neonfp16arith-x16.c | 40 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x16() local 57 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x16() local 65 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x16() local
|
| D | vaddc-minmax-neonfp16arith-x16.c | 40 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vaddc_minmax_ukernel__neonfp16arith_x16() local 57 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vaddc_minmax_ukernel__neonfp16arith_x16() local 65 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vaddc_minmax_ukernel__neonfp16arith_x16() local
|
| D | vrdivc-minmax-neonfp16arith-x16.c | 40 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vrdivc_minmax_ukernel__neonfp16arith_x16() local 57 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vrdivc_minmax_ukernel__neonfp16arith_x16() local 65 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vrdivc_minmax_ukernel__neonfp16arith_x16() local
|
| D | vmin-neonfp16arith-x16.c | 37 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmin_ukernel__neonfp16arith_x16() local 51 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmin_ukernel__neonfp16arith_x16() local 58 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmin_ukernel__neonfp16arith_x16() local
|
| D | vsqrdiff-neonfp16arith-x16.c | 37 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16() local 53 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16() local 61 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16() local
|
| D | vmax-neonfp16arith-x16.c | 37 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmax_ukernel__neonfp16arith_x16() local 51 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmax_ukernel__neonfp16arith_x16() local 58 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmax_ukernel__neonfp16arith_x16() local
|
| D | vadd-minmax-neonfp16arith-x16.c | 39 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vadd_minmax_ukernel__neonfp16arith_x16() local 58 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vadd_minmax_ukernel__neonfp16arith_x16() local 67 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vadd_minmax_ukernel__neonfp16arith_x16() local
|
| D | vmul-minmax-neonfp16arith-x16.c | 39 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmul_minmax_ukernel__neonfp16arith_x16() local 58 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmul_minmax_ukernel__neonfp16arith_x16() local 67 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmul_minmax_ukernel__neonfp16arith_x16() local
|
| D | vsub-minmax-neonfp16arith-x16.c | 39 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsub_minmax_ukernel__neonfp16arith_x16() local 58 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsub_minmax_ukernel__neonfp16arith_x16() local 67 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vsub_minmax_ukernel__neonfp16arith_x16() local
|
| D | vdiv-minmax-neonfp16arith-x16.c | 39 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vdiv_minmax_ukernel__neonfp16arith_x16() local 58 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vdiv_minmax_ukernel__neonfp16arith_x16() local 67 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vdiv_minmax_ukernel__neonfp16arith_x16() local
|
| D | vminc-neonfp16arith-x8.c | 38 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vminc_ukernel__neonfp16arith_x8() local 44 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vminc_ukernel__neonfp16arith_x8() local
|
| D | vmaxc-neonfp16arith-x8.c | 38 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmaxc_ukernel__neonfp16arith_x8() local 44 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmaxc_ukernel__neonfp16arith_x8() local
|
| D | vsqrdiffc-neonfp16arith-x8.c | 38 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8() local 45 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8() local
|
| D | vsqrdiff-neonfp16arith-x8.c | 37 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8() local 45 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8() local
|
| D | vmax-neonfp16arith-x8.c | 37 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmax_ukernel__neonfp16arith_x8() local 44 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmax_ukernel__neonfp16arith_x8() local
|
| D | vmin-neonfp16arith-x8.c | 37 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmin_ukernel__neonfp16arith_x8() local 44 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmin_ukernel__neonfp16arith_x8() local
|
| D | vmulc-minmax-neonfp16arith-x8.c | 40 const float16x8_t va01234567 = vld1q_f16(a); a += 8; in xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x8() local 48 const float16x8_t va01234567 = vld1q_f16(a); in xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x8() local
|
| /external/XNNPACK/src/f16-vbinary/ |
| D | vopc-neonfp16arith.c.in | 77 const float16x8_t va01234567 = vld1q_f16(a); a += 8; variable 88 const float16x8_t va01234567 = vld1q_f16(a); variable
|
| D | vop-neonfp16arith.c.in | 75 const float16x8_t va01234567 = vld1q_f16(a); a += 8; variable 87 const float16x8_t va01234567 = vld1q_f16(a); variable
|