| /external/XNNPACK/src/s16-vlshift/gen/ |
| D | scalar-x3.c | 37 const uint16_t vout2 = vi2 << shift; in xnn_s16_vlshift_ukernel__scalar_x3() local
|
| D | scalar-x4.c | 38 const uint16_t vout2 = vi2 << shift; in xnn_s16_vlshift_ukernel__scalar_x4() local
|
| D | neon-x24.c | 40 const int16x8_t vout2 = vshlq_s16(vi2, vshift); in xnn_s16_vlshift_ukernel__neon_x24() local
|
| D | neon-x32.c | 41 const int16x8_t vout2 = vshlq_s16(vi2, vshift); in xnn_s16_vlshift_ukernel__neon_x32() local
|
| /external/XNNPACK/src/qs8-vcvt/gen/ |
| D | vcvt-scalar-x4.c | 38 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qs8_vcvt_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/cs16-bfly4/ |
| D | samples1-neon.c | 38 const int16x4_t vout2 = vsub_s16(vout0, vtmp3hi); in xnn_cs16_bfly4_samples1_ukernel__neon() local
|
| /external/XNNPACK/src/qu8-vcvt/gen/ |
| D | vcvt-scalar-x4.c | 38 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qu8_vcvt_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/s16-window/gen/ |
| D | scalar-x3.c | 49 int32_t vout2 = (int32_t) vi2 * (int32_t) w2; in xnn_s16_window_ukernel__scalar_x3() local
|
| D | scalar-x4.c | 51 int32_t vout2 = (int32_t) vi2 * (int32_t) w2; in xnn_s16_window_ukernel__scalar_x4() local
|
| D | neon-shift15-x24.c | 50 const int16x8_t vout2 = vqdmulhq_s16(vi2, vw2); in xnn_s16_window_shift15_ukernel__neon_x24() local
|
| D | neon-shift15-x32.c | 52 const int16x8_t vout2 = vqdmulhq_s16(vi2, vw2); in xnn_s16_window_shift15_ukernel__neon_x32() local
|
| D | neon-x24.c | 65 const int16x8_t vout2 = vcombine_s16(vqmovn_s32(vacc2_lo), vqmovn_s32(vacc2_hi)); in xnn_s16_window_ukernel__neon_x24() local
|
| /external/XNNPACK/src/qu8-vlrelu/gen/ |
| D | vlrelu-scalar-select-x4.c | 50 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qu8_vlrelu_ukernel__scalar_select_x4() local
|
| D | vlrelu-scalar-andxor-x4.c | 60 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() local
|
| /external/XNNPACK/src/qs8-vlrelu/gen/ |
| D | vlrelu-scalar-andxor-x4.c | 60 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() local
|
| D | vlrelu-scalar-select-x4.c | 50 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qs8_vlrelu_ukernel__scalar_select_x4() local
|
| /external/XNNPACK/src/cs16-vsquareabs/gen/ |
| D | scalar-x3.c | 45 const uint32_t vout2 = vrsquare2 + visquare2; in xnn_cs16_vsquareabs_ukernel__scalar_x3() local
|
| D | scalar-x4.c | 49 const uint32_t vout2 = vrsquare2 + visquare2; in xnn_cs16_vsquareabs_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/f32-spmm/gen/ |
| D | 8x1-minmax-scalar-pipelined.c | 86 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 152 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local
|
| /external/XNNPACK/src/qu8-vaddc/gen/ |
| D | minmax-scalar-x4.c | 45 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/qs8-vaddc/gen/ |
| D | minmax-scalar-x4.c | 45 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/qs8-vmulc/gen/ |
| D | minmax-fp32-scalar-x4.c | 65 const int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point; in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/qu8-vmulc/gen/ |
| D | minmax-fp32-scalar-x4.c | 65 const int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point; in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/qu8-vadd/gen/ |
| D | minmax-scalar-x4.c | 55 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qu8_vadd_minmax_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/qs8-vadd/gen/ |
| D | minmax-scalar-x4.c | 55 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qs8_vadd_minmax_ukernel__scalar_x4() local
|