/external/XNNPACK/src/qu8-vlrelu/gen/ |
D | vlrelu-scalar-select-x4.c | 41 …const int32_t vmultiplier3 = XNN_UNPREDICTABLE(vacc3 >= 0) ? vpositive_multiplier : vnegative_mult… in xnn_qu8_vlrelu_ukernel__scalar_select_x4() local
|
D | vlrelu-scalar-andxor-x4.c | 41 int32_t vmultiplier3 = math_asr_s32(vacc3, 31); in xnn_qu8_vlrelu_ukernel__scalar_andxor_x4() local
|
D | vlrelu-avx-x32.c | 47 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__avx_x32() local
|
D | vlrelu-sse41-x32.c | 47 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__sse41_x32() local
|
D | vlrelu-wasmrelaxedsimd-x86-x32.c | 46 v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_x86_x32() local
|
D | vlrelu-wasmsimd-x86-x32.c | 46 v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__wasmsimd_x86_x32() local
|
D | vlrelu-avx2-x64.c | 47 __m256i vmultiplier3 = _mm256_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__avx2_x64() local
|
D | vlrelu-wasmrelaxedsimd-arm-x32.c | 45 v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15); in xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_arm_x32() local
|
D | vlrelu-wasmsimd-arm-x32.c | 45 v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15); in xnn_qu8_vlrelu_ukernel__wasmsimd_arm_x32() local
|
D | vlrelu-neon-x32.c | 55 const int16x8_t vmultiplier3 = vbslq_s16(vmask3, vpositive_multiplier, vnegative_multiplier); in xnn_qu8_vlrelu_ukernel__neon_x32() local
|
D | vlrelu-ssse3-x32.c | 51 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__ssse3_x32() local
|
D | vlrelu-sse2-x32.c | 51 __m128i vmultiplier3 = _mm_cmpgt_epi16(vextx3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__sse2_x32() local
|
/external/XNNPACK/src/qs8-vlrelu/gen/ |
D | vlrelu-scalar-andxor-x4.c | 41 int32_t vmultiplier3 = math_asr_s32(vacc3, 31); in xnn_qs8_vlrelu_ukernel__scalar_andxor_x4() local
|
D | vlrelu-scalar-select-x4.c | 41 …const int32_t vmultiplier3 = XNN_UNPREDICTABLE(vacc3 >= 0) ? vpositive_multiplier : vnegative_mult… in xnn_qs8_vlrelu_ukernel__scalar_select_x4() local
|
D | vlrelu-wasmsimd-x86-x32.c | 46 v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__wasmsimd_x86_x32() local
|
D | vlrelu-avx-x32.c | 47 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__avx_x32() local
|
D | vlrelu-wasmrelaxedsimd-x86-x32.c | 46 v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_x86_x32() local
|
D | vlrelu-sse41-x32.c | 47 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__sse41_x32() local
|
D | vlrelu-avx2-x64.c | 47 __m256i vmultiplier3 = _mm256_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__avx2_x64() local
|
D | vlrelu-wasmrelaxedsimd-arm-x32.c | 45 v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15); in xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_arm_x32() local
|
D | vlrelu-wasmsimd-arm-x32.c | 45 v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15); in xnn_qs8_vlrelu_ukernel__wasmsimd_arm_x32() local
|
D | vlrelu-neon-x32.c | 55 const int16x8_t vmultiplier3 = vbslq_s16(vmask3, vpositive_multiplier, vnegative_multiplier); in xnn_qs8_vlrelu_ukernel__neon_x32() local
|
D | vlrelu-ssse3-x32.c | 52 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__ssse3_x32() local
|
D | vlrelu-sse2-x32.c | 53 __m128i vmultiplier3 = _mm_cmpgt_epi16(vextx3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__sse2_x32() local
|
/external/XNNPACK/src/amalgam/ |
D | ssse3.c | 381 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qs8_vlrelu_ukernel__ssse3_x32() local 630 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point); in xnn_qu8_vlrelu_ukernel__ssse3_x32() local
|