Home
last modified time | relevance | path

Searched refs:y_clamped (Results 1 – 18 of 18) sorted by relevance

/external/XNNPACK/src/qs8-requantization/
Dfp32-scalar-fmagic.c48 const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax); in xnn_qs8_requantize_fp32__scalar_fmagic() local
53 const int32_t y_biased = (int32_t) float_as_uint32(y_clamped + fmagic) - imagic; in xnn_qs8_requantize_fp32__scalar_fmagic()
Dfp32-scalar-lrintf.c46 const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax); in xnn_qs8_requantize_fp32__scalar_lrintf() local
51 const int32_t y_rounded = (int32_t) lrintf(y_clamped); in xnn_qs8_requantize_fp32__scalar_lrintf()
Dfp32-wasmsimd.c59 const v128_t y_clamped = wasm_f32x4_min(wasm_f32x4_max(y_scaled, vfmin), vfmax); in xnn_qs8_requantize_fp32__wasmsimd() local
66 const v128_t y_biased = wasm_i32x4_sub(wasm_f32x4_add(y_clamped, vfmagic), vimagic); in xnn_qs8_requantize_fp32__wasmsimd()
Drndnu-scalar.c67 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qs8_requantize_rndnu__scalar() local
77 const int32_t y_biased = y_clamped + zero_point; in xnn_qs8_requantize_rndnu__scalar()
Drndna-scalar-signed64.c73 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qs8_requantize_rndna__scalar_signed64() local
83 const int32_t y_biased = y_clamped + zero_point; in xnn_qs8_requantize_rndna__scalar_signed64()
Drndna-scalar-unsigned64.c80 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qs8_requantize_rndna__scalar_unsigned64() local
90 const int32_t y_biased = y_clamped + zero_point; in xnn_qs8_requantize_rndna__scalar_unsigned64()
Dfp32-neon.c115 const float32x4_t y_clamped = vminq_f32(vmaxq_f32(y_scaled, vfmin), vfmax); in xnn_qs8_requantize_fp32__neon() local
122 …const int32x4_t y_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(y_clamped, vfmagic)), vimagic… in xnn_qs8_requantize_fp32__neon()
Dgemmlowp-scalar.c108 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qs8_requantize_gemmlowp__scalar() local
118 const int32_t y_biased = y_clamped + zero_point; in xnn_qs8_requantize_gemmlowp__scalar()
Drndna-scalar-unsigned32.c108 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qs8_requantize_rndna__scalar_unsigned32() local
118 const int32_t y_biased = y_clamped + zero_point; in xnn_qs8_requantize_rndna__scalar_unsigned32()
/external/XNNPACK/src/qu8-requantization/
Dfp32-scalar-fmagic.c48 const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax); in xnn_qu8_requantize_fp32__scalar_fmagic() local
53 const int32_t y_biased = (int32_t) float_as_uint32(y_clamped + fmagic) - imagic; in xnn_qu8_requantize_fp32__scalar_fmagic()
Dfp32-scalar-lrintf.c46 const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax); in xnn_qu8_requantize_fp32__scalar_lrintf() local
51 const int32_t y_rounded = (int32_t) lrintf(y_clamped); in xnn_qu8_requantize_fp32__scalar_lrintf()
Dfp32-wasmsimd.c59 const v128_t y_clamped = wasm_f32x4_min(wasm_f32x4_max(y_scaled, vfmin), vfmax); in xnn_qu8_requantize_fp32__wasmsimd() local
66 const v128_t y_biased = wasm_i32x4_sub(wasm_f32x4_add(y_clamped, vfmagic), vimagic); in xnn_qu8_requantize_fp32__wasmsimd()
Drndna-scalar-signed64.c73 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qu8_requantize_rndna__scalar_signed64() local
83 const int32_t y_biased = y_clamped + zero_point; in xnn_qu8_requantize_rndna__scalar_signed64()
Drndna-scalar-unsigned64.c80 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qu8_requantize_rndna__scalar_unsigned64() local
90 const int32_t y_biased = y_clamped + zero_point; in xnn_qu8_requantize_rndna__scalar_unsigned64()
Dfp32-neon.c91 const float32x4_t y_clamped = vminq_f32(vmaxq_f32(y_scaled, vfmin), vfmax); in xnn_qu8_requantize_fp32__neon() local
98 …const int32x4_t y_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(y_clamped, vfmagic)), vimagic… in xnn_qu8_requantize_fp32__neon()
Dgemmlowp-scalar.c108 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qu8_requantize_gemmlowp__scalar() local
118 const int32_t y_biased = y_clamped + zero_point; in xnn_qu8_requantize_gemmlowp__scalar()
Drndna-scalar-unsigned32.c108 const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax); in xnn_qu8_requantize_rndna__scalar_unsigned32() local
118 const int32_t y_biased = y_clamped + zero_point; in xnn_qu8_requantize_rndna__scalar_unsigned32()
/external/tensorflow/tensorflow/core/kernels/
Dcwise_ops.h1225 T y_clamped = y;
1226 if (y_clamped < 0) {
1227 y_clamped = 0;
1228 } else if (y_clamped > sizeof(T) * CHAR_BIT - 1) {
1229 y_clamped = sizeof(T) * CHAR_BIT - 1;
1232 return static_cast<T>(static_cast<U>(x) << static_cast<U>(y_clamped));
1241 T y_clamped = y;
1242 if (y_clamped < 0) {
1243 y_clamped = 0;
1244 } else if (y_clamped > sizeof(T) * CHAR_BIT - 1) {
[all …]