/external/XNNPACK/src/s16-window/gen/ |
D | scalar-x4.c | 52 int32_t vout3 = (int32_t) vi3 * (int32_t) w3; in xnn_s16_window_ukernel__scalar_x4() local 57 vout3 = math_asr_s32(vout3, shift); in xnn_s16_window_ukernel__scalar_x4() 62 vout3 = math_max_s32(vout3, INT16_MIN); in xnn_s16_window_ukernel__scalar_x4() 67 vout3 = math_min_s32(vout3, INT16_MAX); in xnn_s16_window_ukernel__scalar_x4() 72 output[3] = (int16_t) vout3; in xnn_s16_window_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-vaddc/gen/ |
D | minmax-scalar-x4.c | 46 int32_t vout3 = math_asr_s32(vacc3, vshift); in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() local 51 vout3 = math_max_s32(vout3, voutput_min_less_zero_point); in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() 56 vout3 = math_min_s32(vout3, voutput_max_less_zero_point); in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() 61 vout3 += voutput_zero_point; in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() 66 output[3] = (int8_t) vout3; in xnn_qs8_vaddc_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vaddc/gen/ |
D | minmax-scalar-x4.c | 46 int32_t vout3 = math_asr_s32(vacc3, vshift); in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() local 51 vout3 = math_max_s32(vout3, voutput_min_less_zero_point); in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() 56 vout3 = math_min_s32(vout3, voutput_max_less_zero_point); in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() 61 vout3 += voutput_zero_point; in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() 66 output[3] = (uint8_t) vout3; in xnn_qu8_vaddc_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-vcvt/gen/ |
D | vcvt-scalar-x4.c | 39 int32_t vout3 = math_asr_s32(vacc3, 8); in xnn_qs8_vcvt_ukernel__scalar_x4() local 44 vout3 = math_max_s32(vout3, -128); in xnn_qs8_vcvt_ukernel__scalar_x4() 49 vout3 = math_min_s32(vout3, 127); in xnn_qs8_vcvt_ukernel__scalar_x4() 54 y[3] = (int8_t) vout3; in xnn_qs8_vcvt_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vcvt/gen/ |
D | vcvt-scalar-x4.c | 39 int32_t vout3 = math_asr_s32(vacc3, 8); in xnn_qu8_vcvt_ukernel__scalar_x4() local 44 vout3 = math_max_s32(vout3, 0); in xnn_qu8_vcvt_ukernel__scalar_x4() 49 vout3 = math_min_s32(vout3, 255); in xnn_qu8_vcvt_ukernel__scalar_x4() 54 y[3] = (uint8_t) vout3; in xnn_qu8_vcvt_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vadd/gen/ |
D | minmax-scalar-x4.c | 56 int32_t vout3 = math_asr_s32(vacc3, vshift); in xnn_qu8_vadd_minmax_ukernel__scalar_x4() local 61 vout3 = math_max_s32(vout3, voutput_min_less_zero_point); in xnn_qu8_vadd_minmax_ukernel__scalar_x4() 66 vout3 = math_min_s32(vout3, voutput_max_less_zero_point); in xnn_qu8_vadd_minmax_ukernel__scalar_x4() 71 vout3 += voutput_zero_point; in xnn_qu8_vadd_minmax_ukernel__scalar_x4() 76 output[3] = (uint8_t) vout3; in xnn_qu8_vadd_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-vadd/gen/ |
D | minmax-scalar-x4.c | 56 int32_t vout3 = math_asr_s32(vacc3, vshift); in xnn_qs8_vadd_minmax_ukernel__scalar_x4() local 61 vout3 = math_max_s32(vout3, voutput_min_less_zero_point); in xnn_qs8_vadd_minmax_ukernel__scalar_x4() 66 vout3 = math_min_s32(vout3, voutput_max_less_zero_point); in xnn_qs8_vadd_minmax_ukernel__scalar_x4() 71 vout3 += voutput_zero_point; in xnn_qs8_vadd_minmax_ukernel__scalar_x4() 76 output[3] = (int8_t) vout3; in xnn_qs8_vadd_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vlrelu/gen/ |
D | vlrelu-scalar-select-x4.c | 51 int32_t vout3 = math_asr_s32(vacc3, 8); in xnn_qu8_vlrelu_ukernel__scalar_select_x4() local 56 vout3 = math_max_s32(vout3, 0); in xnn_qu8_vlrelu_ukernel__scalar_select_x4() 61 vout3 = math_min_s32(vout3, 255); in xnn_qu8_vlrelu_ukernel__scalar_select_x4() 66 y[3] = (uint8_t) vout3; in xnn_qu8_vlrelu_ukernel__scalar_select_x4()
|
D | vlrelu-scalar-andxor-x4.c | 61 int32_t vout3 = math_asr_s32(vacc3, 8); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() local 66 vout3 = math_max_s32(vout3, 0); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() 71 vout3 = math_min_s32(vout3, 255); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() 76 y[3] = (uint8_t) vout3; in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4()
|
/external/XNNPACK/src/qs8-vlrelu/gen/ |
D | vlrelu-scalar-select-x4.c | 51 int32_t vout3 = math_asr_s32(vacc3, 8); in xnn_qs8_vlrelu_ukernel__scalar_select_x4() local 56 vout3 = math_max_s32(vout3, -128); in xnn_qs8_vlrelu_ukernel__scalar_select_x4() 61 vout3 = math_min_s32(vout3, 127); in xnn_qs8_vlrelu_ukernel__scalar_select_x4() 66 y[3] = (int8_t) vout3; in xnn_qs8_vlrelu_ukernel__scalar_select_x4()
|
D | vlrelu-scalar-andxor-x4.c | 61 int32_t vout3 = math_asr_s32(vacc3, 8); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() local 66 vout3 = math_max_s32(vout3, -128); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() 71 vout3 = math_min_s32(vout3, 127); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() 76 y[3] = (int8_t) vout3; in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4()
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x1-minmax-scalar-pipelined.c | 87 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 95 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() 103 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() 153 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 157 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() 161 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined()
|
D | 8x1-minmax-scalar.c | 144 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 152 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() 160 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() 241 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 245 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() 249 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
|
D | 4x1-minmax-scalar-pipelined.c | 71 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() local 75 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() 79 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined()
|
D | 8x2-minmax-scalar.c | 186 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 194 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() 202 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() 305 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 309 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() 313 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
|
D | 4x1-minmax-scalar.c | 104 float vout3 = math_min_f32(vacc3, vmax); in xnn_f32_spmm_minmax_ukernel_4x1__scalar() local 108 vout3 = math_max_f32(vout3, vmin); in xnn_f32_spmm_minmax_ukernel_4x1__scalar() 112 output[3] = vout3; in xnn_f32_spmm_minmax_ukernel_4x1__scalar()
|
/external/XNNPACK/src/qu8-gavgpool/gen/ |
D | 7x-minmax-fp32-scalar-imagic-c4.c | 142 int32_t vout3 = (int32_t) float_as_uint32(vfpacc3); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() local 147 vout3 = math_max_s32(vout3, vmagic_min); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 152 vout3 = math_min_s32(vout3, vmagic_max); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 157 vout3 -= vmagic_bias_less_zero_point; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 162 output[3] = (uint8_t) vout3; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4()
|
D | 7p7x-minmax-fp32-scalar-imagic-c4.c | 320 int32_t vout3 = (int32_t) float_as_uint32(vfpacc3); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() local 325 vout3 = math_max_s32(vout3, vmagic_min); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 330 vout3 = math_min_s32(vout3, vmagic_max); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 335 vout3 -= vmagic_bias_less_zero_point; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 340 output[3] = (uint8_t) vout3; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4()
|
/external/XNNPACK/src/qs8-gavgpool/gen/ |
D | 7x-minmax-fp32-scalar-imagic-c4.c | 142 int32_t vout3 = (int32_t) float_as_uint32(vfpacc3); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() local 147 vout3 = math_max_s32(vout3, vmagic_min); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 152 vout3 = math_min_s32(vout3, vmagic_max); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 157 vout3 -= vmagic_bias_less_zero_point; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 162 output[3] = (int8_t) vout3; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4()
|
D | 7p7x-minmax-fp32-scalar-imagic-c4.c | 320 int32_t vout3 = (int32_t) float_as_uint32(vfpacc3); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() local 325 vout3 = math_max_s32(vout3, vmagic_min); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 330 vout3 = math_min_s32(vout3, vmagic_max); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 335 vout3 -= vmagic_bias_less_zero_point; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 340 output[3] = (int8_t) vout3; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4()
|
/external/XNNPACK/src/qs8-dwconv/gen/ |
D | up4x9-minmax-fp32-scalar-imagic.c | 257 int32_t vout3 = (int32_t) float_as_uint32(vfpacc3); in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() local 262 vout3 = math_max_s32(vout3, vmagic_min); in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 267 vout3 = math_min_s32(vout3, vmagic_max); in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 272 vout3 -= vmagic_bias_less_zero_point; in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 277 output[3] = (int8_t) vout3; in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic()
|
/external/XNNPACK/src/qc8-dwconv/gen/ |
D | up4x9-minmax-fp32-scalar-imagic.c | 262 int32_t vout3 = (int32_t) float_as_uint32(vfpacc3); in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() local 267 vout3 = math_max_s32(vout3, vmagic_min); in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 272 vout3 = math_min_s32(vout3, vmagic_max); in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 277 vout3 -= vmagic_bias_less_zero_point; in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 282 output[3] = (int8_t) vout3; in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic()
|
/external/XNNPACK/src/s16-vlshift/gen/ |
D | scalar-x4.c | 39 const uint16_t vout3 = vi3 << shift; in xnn_s16_vlshift_ukernel__scalar_x4() local 44 output[3] = (int16_t) vout3; in xnn_s16_vlshift_ukernel__scalar_x4()
|
D | neon-x32.c | 42 const int16x8_t vout3 = vshlq_s16(vi3, vshift); in xnn_s16_vlshift_ukernel__neon_x32() local 47 vst1q_s16(output, vout3); output += 8; in xnn_s16_vlshift_ukernel__neon_x32()
|
/external/XNNPACK/src/qu8-dwconv/gen/ |
D | up4x9-minmax-fp32-scalar-imagic.c | 258 int32_t vout3 = (int32_t) float_as_uint32(vfpacc3); in xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() local 263 vout3 = math_max_s32(vout3, vmagic_min); in xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 268 vout3 = math_min_s32(vout3, vmagic_max); in xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 273 vout3 -= vmagic_bias_less_zero_point; in xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 278 output[3] = (uint8_t) vout3; in xnn_qu8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic()
|