/external/XNNPACK/src/qs8-vmulc/gen/ |
D | minmax-fp32-scalar-x2.c | 39 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x2() local 42 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x2() 45 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x2() 48 vfpacc0 += vmagic_bias; in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x2() 51 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x2()
|
D | minmax-fp32-scalar-x4.c | 43 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4() local 48 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4() 53 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4() 58 vfpacc0 += vmagic_bias; in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4() 63 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vmulc/gen/ |
D | minmax-fp32-scalar-x2.c | 39 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x2() local 42 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x2() 45 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x2() 48 vfpacc0 += vmagic_bias; in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x2() 51 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x2()
|
D | minmax-fp32-scalar-x4.c | 43 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4() local 48 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4() 53 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4() 58 vfpacc0 += vmagic_bias; in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4() 63 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4()
|
/external/XNNPACK/src/qu8-vmul/gen/ |
D | minmax-fp32-scalar-x2.c | 43 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x2() local 46 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x2() 49 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x2() 52 vfpacc0 += vmagic_bias; in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x2() 55 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x2()
|
D | minmax-fp32-scalar-x4.c | 49 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x4() local 54 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x4() 59 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x4() 64 vfpacc0 += vmagic_bias; in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x4() 69 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x4()
|
/external/XNNPACK/src/qs8-vmul/gen/ |
D | minmax-fp32-scalar-x2.c | 43 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x2() local 46 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x2() 49 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x2() 52 vfpacc0 += vmagic_bias; in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x2() 55 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x2()
|
D | minmax-fp32-scalar-x4.c | 49 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x4() local 54 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x4() 59 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x4() 64 vfpacc0 += vmagic_bias; in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x4() 69 const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x4()
|
/external/XNNPACK/src/qc8-dwconv/gen/ |
D | up2x3-minmax-fp32-wasm-fmagic.c | 93 float vfpacc0 = (float) vacc0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__wasm_fmagic() local 100 vfpacc0 *= vscale0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__wasm_fmagic() 103 vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__wasm_fmagic() 106 vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__wasm_fmagic() 109 vfpacc0 += vmagic_bias; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__wasm_fmagic() 112 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__wasm_fmagic()
|
D | up2x3-minmax-fp32-scalar-lrintf.c | 93 float vfpacc0 = (float) vacc0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__scalar_lrintf() local 100 vfpacc0 *= vscale0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__scalar_lrintf() 103 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__scalar_lrintf() 106 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__scalar_lrintf() 109 const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__scalar_lrintf()
|
D | up2x9-minmax-fp32-scalar-fmagic.c | 183 float vfpacc0 = (float) vacc0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() local 190 vfpacc0 *= vscale0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 193 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 196 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 199 vfpacc0 += vmagic_bias; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 202 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic()
|
D | up2x9-minmax-fp32-wasm-fmagic.c | 183 float vfpacc0 = (float) vacc0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() local 190 vfpacc0 *= vscale0; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 193 vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 196 vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 199 vfpacc0 += vmagic_bias; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 202 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qc8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic()
|
/external/XNNPACK/src/qs8-gavgpool/gen/ |
D | 7x-minmax-fp32-scalar-fmagic-c2.c | 101 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() local 104 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() 107 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() 110 vfpacc0 += vmagic_bias; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() 113 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2()
|
D | 7x-minmax-fp32-scalar-lrintf-c2.c | 101 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2() local 104 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2() 107 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2() 110 const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2()
|
D | 7x-minmax-fp32-scalar-fmagic-c4.c | 129 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() local 134 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() 139 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() 144 vfpacc0 += vmagic_bias; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() 149 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4()
|
D | 7x-minmax-fp32-scalar-lrintf-c4.c | 129 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4() local 134 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4() 139 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4() 144 const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4()
|
D | 7p7x-minmax-fp32-scalar-fmagic-c2.c | 215 float vfpacc0 = (float) vacc0 * vscale; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() local 218 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() 221 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() 224 vfpacc0 += vmagic_bias; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() 227 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2()
|
/external/XNNPACK/src/qu8-gavgpool/gen/ |
D | 7x-minmax-fp32-scalar-fmagic-c2.c | 101 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() local 104 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() 107 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() 110 vfpacc0 += vmagic_bias; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2() 113 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2()
|
D | 7x-minmax-fp32-scalar-lrintf-c2.c | 101 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2() local 104 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2() 107 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2() 110 const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2()
|
D | 7x-minmax-fp32-scalar-fmagic-c4.c | 129 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() local 134 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() 139 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() 144 vfpacc0 += vmagic_bias; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4() 149 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4()
|
D | 7x-minmax-fp32-scalar-lrintf-c4.c | 129 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4() local 134 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4() 139 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4() 144 const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4()
|
D | 7p7x-minmax-fp32-scalar-fmagic-c2.c | 215 float vfpacc0 = (float) vacc0 * vscale; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() local 218 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() 221 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() 224 vfpacc0 += vmagic_bias; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() 227 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2()
|
/external/XNNPACK/src/qs8-dwconv/gen/ |
D | up2x9-minmax-fp32-scalar-magic.c | 185 float vfpacc0 = (float) vacc0; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic() local 188 vfpacc0 *= vscale; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic() 191 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic() 194 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic() 197 vfpacc0 += vmagic_bias; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic() 200 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_magic()
|
D | up2x9-minmax-fp32-scalar-fmagic.c | 184 float vfpacc0 = (float) vacc0; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() local 187 vfpacc0 *= vscale; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 190 vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 193 vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 196 vfpacc0 += vmagic_bias; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic() 199 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__scalar_fmagic()
|
D | up2x9-minmax-fp32-wasm-fmagic.c | 184 float vfpacc0 = (float) vacc0; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() local 187 vfpacc0 *= vscale; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 190 vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point); in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 193 vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point); in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 196 vfpacc0 += vmagic_bias; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic() 199 int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; in xnn_qs8_dwconv_minmax_fp32_ukernel_up2x9__wasm_fmagic()
|