/external/XNNPACK/src/s16-window/gen/ |
D | scalar-x3.c | 49 int32_t vout2 = (int32_t) vi2 * (int32_t) w2; in xnn_s16_window_ukernel__scalar_x3() local 53 vout2 = math_asr_s32(vout2, shift); in xnn_s16_window_ukernel__scalar_x3() 57 vout2 = math_max_s32(vout2, INT16_MIN); in xnn_s16_window_ukernel__scalar_x3() 61 vout2 = math_min_s32(vout2, INT16_MAX); in xnn_s16_window_ukernel__scalar_x3() 65 output[2] = (int16_t) vout2; in xnn_s16_window_ukernel__scalar_x3()
|
D | scalar-x4.c | 51 int32_t vout2 = (int32_t) vi2 * (int32_t) w2; in xnn_s16_window_ukernel__scalar_x4() local 56 vout2 = math_asr_s32(vout2, shift); in xnn_s16_window_ukernel__scalar_x4() 61 vout2 = math_max_s32(vout2, INT16_MIN); in xnn_s16_window_ukernel__scalar_x4() 66 vout2 = math_min_s32(vout2, INT16_MAX); in xnn_s16_window_ukernel__scalar_x4() 71 output[2] = (int16_t) vout2; in xnn_s16_window_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-vaddc/gen/ |
D | minmax-scalar-x4.c | 45 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() local 50 vout2 = math_max_s32(vout2, voutput_min_less_zero_point); in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() 55 vout2 = math_min_s32(vout2, voutput_max_less_zero_point); in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() 60 vout2 += voutput_zero_point; in xnn_qs8_vaddc_minmax_ukernel__scalar_x4() 65 output[2] = (int8_t) vout2; in xnn_qs8_vaddc_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vaddc/gen/ |
D | minmax-scalar-x4.c | 45 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() local 50 vout2 = math_max_s32(vout2, voutput_min_less_zero_point); in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() 55 vout2 = math_min_s32(vout2, voutput_max_less_zero_point); in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() 60 vout2 += voutput_zero_point; in xnn_qu8_vaddc_minmax_ukernel__scalar_x4() 65 output[2] = (uint8_t) vout2; in xnn_qu8_vaddc_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-vcvt/gen/ |
D | vcvt-scalar-x4.c | 38 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qs8_vcvt_ukernel__scalar_x4() local 43 vout2 = math_max_s32(vout2, -128); in xnn_qs8_vcvt_ukernel__scalar_x4() 48 vout2 = math_min_s32(vout2, 127); in xnn_qs8_vcvt_ukernel__scalar_x4() 53 y[2] = (int8_t) vout2; in xnn_qs8_vcvt_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vcvt/gen/ |
D | vcvt-scalar-x4.c | 38 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qu8_vcvt_ukernel__scalar_x4() local 43 vout2 = math_max_s32(vout2, 0); in xnn_qu8_vcvt_ukernel__scalar_x4() 48 vout2 = math_min_s32(vout2, 255); in xnn_qu8_vcvt_ukernel__scalar_x4() 53 y[2] = (uint8_t) vout2; in xnn_qu8_vcvt_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vadd/gen/ |
D | minmax-scalar-x4.c | 55 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qu8_vadd_minmax_ukernel__scalar_x4() local 60 vout2 = math_max_s32(vout2, voutput_min_less_zero_point); in xnn_qu8_vadd_minmax_ukernel__scalar_x4() 65 vout2 = math_min_s32(vout2, voutput_max_less_zero_point); in xnn_qu8_vadd_minmax_ukernel__scalar_x4() 70 vout2 += voutput_zero_point; in xnn_qu8_vadd_minmax_ukernel__scalar_x4() 75 output[2] = (uint8_t) vout2; in xnn_qu8_vadd_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-vadd/gen/ |
D | minmax-scalar-x4.c | 55 int32_t vout2 = math_asr_s32(vacc2, vshift); in xnn_qs8_vadd_minmax_ukernel__scalar_x4() local 60 vout2 = math_max_s32(vout2, voutput_min_less_zero_point); in xnn_qs8_vadd_minmax_ukernel__scalar_x4() 65 vout2 = math_min_s32(vout2, voutput_max_less_zero_point); in xnn_qs8_vadd_minmax_ukernel__scalar_x4() 70 vout2 += voutput_zero_point; in xnn_qs8_vadd_minmax_ukernel__scalar_x4() 75 output[2] = (int8_t) vout2; in xnn_qs8_vadd_minmax_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vlrelu/gen/ |
D | vlrelu-scalar-select-x4.c | 50 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qu8_vlrelu_ukernel__scalar_select_x4() local 55 vout2 = math_max_s32(vout2, 0); in xnn_qu8_vlrelu_ukernel__scalar_select_x4() 60 vout2 = math_min_s32(vout2, 255); in xnn_qu8_vlrelu_ukernel__scalar_select_x4() 65 y[2] = (uint8_t) vout2; in xnn_qu8_vlrelu_ukernel__scalar_select_x4()
|
D | vlrelu-scalar-andxor-x4.c | 60 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() local 65 vout2 = math_max_s32(vout2, 0); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() 70 vout2 = math_min_s32(vout2, 255); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() 75 y[2] = (uint8_t) vout2; in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4()
|
/external/XNNPACK/src/qs8-vlrelu/gen/ |
D | vlrelu-scalar-select-x4.c | 50 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qs8_vlrelu_ukernel__scalar_select_x4() local 55 vout2 = math_max_s32(vout2, -128); in xnn_qs8_vlrelu_ukernel__scalar_select_x4() 60 vout2 = math_min_s32(vout2, 127); in xnn_qs8_vlrelu_ukernel__scalar_select_x4() 65 y[2] = (int8_t) vout2; in xnn_qs8_vlrelu_ukernel__scalar_select_x4()
|
D | vlrelu-scalar-andxor-x4.c | 60 int32_t vout2 = math_asr_s32(vacc2, 8); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() local 65 vout2 = math_max_s32(vout2, -128); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() 70 vout2 = math_min_s32(vout2, 127); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() 75 y[2] = (int8_t) vout2; in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4()
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x1-minmax-scalar-pipelined.c | 86 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 94 vout2 = math_max_f32(vout2, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() 102 output[2] = vout2; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() 152 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 156 vout2 = math_max_f32(vout2, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() 160 output[2] = vout2; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined()
|
D | 8x1-minmax-scalar.c | 143 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 151 vout2 = math_max_f32(vout2, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() 159 output[2] = vout2; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() 240 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 244 vout2 = math_max_f32(vout2, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar() 248 output[2] = vout2; in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
|
D | 4x1-minmax-scalar-pipelined.c | 70 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() local 74 vout2 = math_max_f32(vout2, vmin); in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() 78 output[2] = vout2; in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined()
|
D | 8x2-minmax-scalar.c | 185 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 193 vout2 = math_max_f32(vout2, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() 201 output[2] = vout2; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() 304 float vout2 = math_min_f32(vacc2, vmax); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 308 vout2 = math_max_f32(vout2, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar() 312 output[2] = vout2; in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
|
/external/XNNPACK/src/qu8-gavgpool/gen/ |
D | 7x-minmax-fp32-scalar-imagic-c4.c | 141 int32_t vout2 = (int32_t) float_as_uint32(vfpacc2); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() local 146 vout2 = math_max_s32(vout2, vmagic_min); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 151 vout2 = math_min_s32(vout2, vmagic_max); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 156 vout2 -= vmagic_bias_less_zero_point; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 161 output[2] = (uint8_t) vout2; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4()
|
D | 7p7x-minmax-fp32-scalar-imagic-c4.c | 319 int32_t vout2 = (int32_t) float_as_uint32(vfpacc2); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() local 324 vout2 = math_max_s32(vout2, vmagic_min); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 329 vout2 = math_min_s32(vout2, vmagic_max); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 334 vout2 -= vmagic_bias_less_zero_point; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 339 output[2] = (uint8_t) vout2; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4()
|
/external/XNNPACK/src/qs8-gavgpool/gen/ |
D | 7x-minmax-fp32-scalar-imagic-c4.c | 141 int32_t vout2 = (int32_t) float_as_uint32(vfpacc2); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() local 146 vout2 = math_max_s32(vout2, vmagic_min); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 151 vout2 = math_min_s32(vout2, vmagic_max); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 156 vout2 -= vmagic_bias_less_zero_point; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4() 161 output[2] = (int8_t) vout2; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4()
|
D | 7p7x-minmax-fp32-scalar-imagic-c4.c | 319 int32_t vout2 = (int32_t) float_as_uint32(vfpacc2); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() local 324 vout2 = math_max_s32(vout2, vmagic_min); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 329 vout2 = math_min_s32(vout2, vmagic_max); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 334 vout2 -= vmagic_bias_less_zero_point; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4() 339 output[2] = (int8_t) vout2; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4()
|
/external/XNNPACK/src/s16-vlshift/gen/ |
D | scalar-x3.c | 37 const uint16_t vout2 = vi2 << shift; in xnn_s16_vlshift_ukernel__scalar_x3() local 41 output[2] = (int16_t) vout2; in xnn_s16_vlshift_ukernel__scalar_x3()
|
D | scalar-x4.c | 38 const uint16_t vout2 = vi2 << shift; in xnn_s16_vlshift_ukernel__scalar_x4() local 43 output[2] = (int16_t) vout2; in xnn_s16_vlshift_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-dwconv/gen/ |
D | up4x9-minmax-fp32-scalar-imagic.c | 256 int32_t vout2 = (int32_t) float_as_uint32(vfpacc2); in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() local 261 vout2 = math_max_s32(vout2, vmagic_min); in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 266 vout2 = math_min_s32(vout2, vmagic_max); in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 271 vout2 -= vmagic_bias_less_zero_point; in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 276 output[2] = (int8_t) vout2; in xnn_qs8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic()
|
/external/XNNPACK/src/qc8-dwconv/gen/ |
D | up4x9-minmax-fp32-scalar-imagic.c | 261 int32_t vout2 = (int32_t) float_as_uint32(vfpacc2); in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() local 266 vout2 = math_max_s32(vout2, vmagic_min); in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 271 vout2 = math_min_s32(vout2, vmagic_max); in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 276 vout2 -= vmagic_bias_less_zero_point; in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic() 281 output[2] = (int8_t) vout2; in xnn_qc8_dwconv_minmax_fp32_ukernel_up4x9__scalar_imagic()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c | 168 uint32_t vout2 = wasm_i32x4_extract_lane(vout, 2); in xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld128() local 172 *((uint16_t*) c2) = (uint16_t) vout2; in xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld128() 173 vout2 >>= 16; in xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld128() 183 *c2 = (int8_t) vout2; in xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld128()
|