/external/XNNPACK/src/qu8-requantization/ |
D | fp32-scalar-magic.c | 56 const int32_t w_biased = (int32_t) fp32_to_bits(w_clamped + fmagic) - imagic; in xnn_qu8_requantize_fp32__scalar_magic() local 61 output[3] = (uint8_t) w_biased; in xnn_qu8_requantize_fp32__scalar_magic()
|
D | fp32-scalar-lrintf.c | 59 const int32_t w_biased = w_clamped + (int32_t)(uint32_t) zero_point; in xnn_qu8_requantize_fp32__scalar_lrintf() local 64 output[3] = (uint8_t) w_biased; in xnn_qu8_requantize_fp32__scalar_lrintf()
|
D | fp32-wasmsimd.c | 68 const v128_t w_biased = wasm_i32x4_sub(wasm_f32x4_add(w_clamped, vfmagic), vimagic); in xnn_qu8_requantize_fp32__wasmsimd() local 73 const v128_t zw_packed = wasm_v16x8_shuffle(z_biased, w_biased, 0, 2, 4, 6, 8, 10, 12, 14); in xnn_qu8_requantize_fp32__wasmsimd()
|
D | precise-scalar-signed64.c | 87 const int32_t w_biased = w_clamped + zero_point; in xnn_qu8_requantize_precise__scalar_signed64() local 92 output[3] = (uint8_t) w_biased; in xnn_qu8_requantize_precise__scalar_signed64()
|
D | precise-scalar-unsigned64.c | 94 const int32_t w_biased = w_clamped + zero_point; in xnn_qu8_requantize_precise__scalar_unsigned64() local 99 output[3] = (uint8_t) w_biased; in xnn_qu8_requantize_precise__scalar_unsigned64()
|
D | fp32-neon.c | 99 …const int32x4_t w_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(w_clamped, vfmagic)), vimagic… in xnn_qu8_requantize_fp32__neon() local 104 const int16x8_t zw_packed = vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased)); in xnn_qu8_requantize_fp32__neon()
|
D | q31-scalar.c | 122 const int32_t w_biased = w_clamped + zero_point; in xnn_qu8_requantize_q31__scalar() local 127 output[3] = (uint8_t) w_biased; in xnn_qu8_requantize_q31__scalar()
|
D | precise-scalar-unsigned32.c | 122 const int32_t w_biased = w_clamped + zero_point; in xnn_qu8_requantize_precise__scalar_unsigned32() local 127 output[3] = (uint8_t) w_biased; in xnn_qu8_requantize_precise__scalar_unsigned32()
|
/external/XNNPACK/src/qs8-requantization/ |
D | fp32-scalar-lrintf.c | 59 const int32_t w_biased = w_clamped + (int32_t) zero_point; in xnn_qs8_requantize_fp32__scalar_lrintf() local 64 output[3] = (int8_t) w_biased; in xnn_qs8_requantize_fp32__scalar_lrintf()
|
D | fp32-scalar-magic.c | 56 const int32_t w_biased = (int32_t) fp32_to_bits(w_clamped + fmagic) - imagic; in xnn_qs8_requantize_fp32__scalar_magic() local 61 output[3] = (int8_t) w_biased; in xnn_qs8_requantize_fp32__scalar_magic()
|
D | fp32-wasmsimd.c | 68 const v128_t w_biased = wasm_i32x4_sub(wasm_f32x4_add(w_clamped, vfmagic), vimagic); in xnn_qs8_requantize_fp32__wasmsimd() local 73 const v128_t zw_packed = wasm_v16x8_shuffle(z_biased, w_biased, 0, 2, 4, 6, 8, 10, 12, 14); in xnn_qs8_requantize_fp32__wasmsimd()
|
D | precise-scalar-signed64.c | 87 const int32_t w_biased = w_clamped + zero_point; in xnn_qs8_requantize_precise__scalar_signed64() local 92 output[3] = (int8_t) w_biased; in xnn_qs8_requantize_precise__scalar_signed64()
|
D | fp32-neon.c | 123 …const int32x4_t w_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(w_clamped, vfmagic)), vimagic… in xnn_qs8_requantize_fp32__neon() local 128 const int16x8_t zw_packed = vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased)); in xnn_qs8_requantize_fp32__neon()
|
D | precise-scalar-unsigned64.c | 94 const int32_t w_biased = w_clamped + zero_point; in xnn_qs8_requantize_precise__scalar_unsigned64() local 99 output[3] = (int8_t) w_biased; in xnn_qs8_requantize_precise__scalar_unsigned64()
|
D | q31-scalar.c | 122 const int32_t w_biased = w_clamped + zero_point; in xnn_qs8_requantize_q31__scalar() local 127 output[3] = (int8_t) w_biased; in xnn_qs8_requantize_q31__scalar()
|
D | precise-scalar-unsigned32.c | 122 const int32_t w_biased = w_clamped + zero_point; in xnn_qs8_requantize_precise__scalar_unsigned32() local 127 output[3] = (int8_t) w_biased; in xnn_qs8_requantize_precise__scalar_unsigned32()
|