/external/XNNPACK/src/qs8-requantization/ |
D | rndnu-neon-mull.c | 83 …const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23… in xnn_qs8_requantize_rndnu__neon_mull() local 92 const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled)); in xnn_qs8_requantize_rndnu__neon_mull() local
|
D | rndna-neon.c | 111 …const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23… in xnn_qs8_requantize_rndna__neon() local 120 const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled)); in xnn_qs8_requantize_rndna__neon() local
|
D | fp32-scalar-fmagic.c | 45 const float y_scaled = (float) y * scale; in xnn_qs8_requantize_fp32__scalar_fmagic() local
|
D | fp32-sse4.c | 49 const __m128 y_scaled = _mm_mul_ps(_mm_cvtepi32_ps(y), vscale); in xnn_qs8_requantize_fp32__sse4() local
|
D | fp32-sse2.c | 49 const __m128 y_scaled = _mm_mul_ps(_mm_cvtepi32_ps(y), vscale); in xnn_qs8_requantize_fp32__sse2() local
|
D | fp32-scalar-lrintf.c | 43 const float y_scaled = (float) y * scale; in xnn_qs8_requantize_fp32__scalar_lrintf() local
|
D | fp32-wasmsimd.c | 47 const v128_t y_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(y), vscale); in xnn_qs8_requantize_fp32__wasmsimd() local
|
D | rndnu-scalar.c | 63 const int32_t y_scaled = (int32_t) asr_s64(y_product + rounding, shift); in xnn_qs8_requantize_rndnu__scalar() local
|
D | rndna-scalar-signed64.c | 69 const int32_t y_scaled = (int32_t) asr_s64(y_adjusted_product + rounding, shift); in xnn_qs8_requantize_rndna__scalar_signed64() local
|
D | gemmlowp-neon.c | 78 const int32x4_t y_scaled = vrshlq_s32(y_adjusted_product, vshift); in xnn_qs8_requantize_gemmlowp__neon() local
|
D | rndna-scalar-unsigned64.c | 76 const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled); in xnn_qs8_requantize_rndna__scalar_unsigned64() local
|
D | fp32-neon.c | 57 const float32x4_t y_scaled = vmulq_f32(vcvtq_f32_s32(y), vscale); in xnn_qs8_requantize_fp32__neon() local
|
D | rndnu-neon-qdmulh.c | 71 const int32x4_t y_scaled = vrshlq_s32(y_product, vpost_shift); in xnn_qs8_requantize_rndnu__neon_qdmulh() local
|
D | gemmlowp-scalar.c | 104 const int32_t y_scaled = asr_s32(y_q31product, shift) + (int32_t) (y_remainder > threshold); in xnn_qs8_requantize_gemmlowp__scalar() local
|
D | rndna-scalar-unsigned32.c | 104 const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled); in xnn_qs8_requantize_rndna__scalar_unsigned32() local
|
/external/XNNPACK/src/qu8-requantization/ |
D | rndna-neon.c | 111 …const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23… in xnn_qu8_requantize_rndna__neon() local 120 const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled)); in xnn_qu8_requantize_rndna__neon() local
|
D | fp32-scalar-fmagic.c | 45 const float y_scaled = (float) y * scale; in xnn_qu8_requantize_fp32__scalar_fmagic() local
|
D | fp32-sse2.c | 49 const __m128 y_scaled = _mm_mul_ps(_mm_cvtepi32_ps(y), vscale); in xnn_qu8_requantize_fp32__sse2() local
|
D | fp32-scalar-lrintf.c | 43 const float y_scaled = (float) y * scale; in xnn_qu8_requantize_fp32__scalar_lrintf() local
|
D | fp32-wasmsimd.c | 47 const v128_t y_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(y), vscale); in xnn_qu8_requantize_fp32__wasmsimd() local
|
D | rndna-scalar-signed64.c | 69 const int32_t y_scaled = (int32_t) asr_s64(y_adjusted_product + rounding, shift); in xnn_qu8_requantize_rndna__scalar_signed64() local
|
D | gemmlowp-neon.c | 78 const int32x4_t y_scaled = vrshlq_s32(y_adjusted_product, vshift); in xnn_qu8_requantize_gemmlowp__neon() local
|
D | fp32-neon.c | 57 const float32x4_t y_scaled = vmulq_f32(vcvtq_f32_s32(y), vscale); in xnn_qu8_requantize_fp32__neon() local
|
D | rndna-scalar-unsigned64.c | 76 const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled); in xnn_qu8_requantize_rndna__scalar_unsigned64() local
|
D | gemmlowp-scalar.c | 104 const int32_t y_scaled = asr_s32(y_q31product, shift) + (int32_t) (y_remainder > threshold); in xnn_qu8_requantize_gemmlowp__scalar() local
|