/external/XNNPACK/src/qs8-vcvt/gen/ |
D | vcvt-armsimd32-x8.c | 48 vacc0 = __ssat(math_asr_s32(vacc0, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 49 vacc1 = __ssat(math_asr_s32(vacc1, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 50 vacc2 = __ssat(math_asr_s32(vacc2, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 51 vacc3 = __ssat(math_asr_s32(vacc3, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 52 vacc4 = __ssat(math_asr_s32(vacc4, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 53 vacc5 = __ssat(math_asr_s32(vacc5, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 54 vacc6 = __ssat(math_asr_s32(vacc6, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 55 vacc7 = __ssat(math_asr_s32(vacc7, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 79 vacc0 = __ssat(math_asr_s32(vacc0, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() 80 vacc1 = __ssat(math_asr_s32(vacc1, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x8() [all …]
|
D | vcvt-armsimd32-x4.c | 41 vacc0 = __ssat(math_asr_s32(vacc0, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x4() 42 vacc1 = __ssat(math_asr_s32(vacc1, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x4() 43 vacc2 = __ssat(math_asr_s32(vacc2, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x4() 44 vacc3 = __ssat(math_asr_s32(vacc3, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x4() 62 vacc0 = __ssat(math_asr_s32(vacc0, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x4() 63 vacc1 = __ssat(math_asr_s32(vacc1, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x4() 68 vacc0 = __ssat(math_asr_s32(vacc2, 1), 8); in xnn_qs8_vcvt_ukernel__armsimd32_x4()
|
/external/XNNPACK/src/qs8-vlrelu/gen/ |
D | vlrelu-armsimd32-x8.c | 58 vacc0 = __ssat(math_asr_s32(vacc0, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 59 vacc1 = __ssat(math_asr_s32(vacc1, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 60 vacc2 = __ssat(math_asr_s32(vacc2, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 61 vacc3 = __ssat(math_asr_s32(vacc3, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 62 vacc4 = __ssat(math_asr_s32(vacc4, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 63 vacc5 = __ssat(math_asr_s32(vacc5, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 64 vacc6 = __ssat(math_asr_s32(vacc6, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 65 vacc7 = __ssat(math_asr_s32(vacc7, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 94 vacc0 = __ssat(math_asr_s32(vacc0, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() 95 vacc1 = __ssat(math_asr_s32(vacc1, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() [all …]
|
D | vlrelu-armsimd32-x4.c | 47 vacc0 = __ssat(math_asr_s32(vacc0, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() 48 vacc1 = __ssat(math_asr_s32(vacc1, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() 49 vacc2 = __ssat(math_asr_s32(vacc2, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() 50 vacc3 = __ssat(math_asr_s32(vacc3, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() 73 vacc0 = __ssat(math_asr_s32(vacc0, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() 74 vacc1 = __ssat(math_asr_s32(vacc1, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() 79 vacc0 = __ssat(math_asr_s32(vacc2, 8), 8); in xnn_qs8_vlrelu_ukernel__armsimd32_x4()
|
/external/arm-neon-tests/ |
D | ref_integer.c | 219 sres = __ssat(svar1, 30); in exec_integer() 225 sres = __ssat(svar1, 19); in exec_integer() 231 sres = __ssat(svar1, 29); in exec_integer() 237 sres = __ssat(svar1, 12); in exec_integer() 243 sres = __ssat(svar1, 32); in exec_integer() 249 sres = __ssat(svar1, 1); in exec_integer()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x2c4-minmax-fp32-armsimd32.c | 114 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 115 vout0x1 = __ssat(vout0x1, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 116 vout1x0 = __ssat(vout1x0, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 117 vout1x1 = __ssat(vout1x1, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 89 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() 90 vout0x1 = __ssat(vout0x1, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 93 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() 94 vout1x0 = __ssat(vout1x0, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 76 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
/external/XNNPACK/src/qc8-gemm/gen/ |
D | 2x2c4-minmax-fp32-armsimd32.c | 116 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 117 vout0x1 = __ssat(vout0x1, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 118 vout1x0 = __ssat(vout1x0, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 119 vout1x1 = __ssat(vout1x1, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 91 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() 92 vout0x1 = __ssat(vout0x1, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 94 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() 95 vout1x0 = __ssat(vout1x0, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 77 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x2c4-minmax-fp32-armsimd32.c | 134 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 135 vout0x1 = __ssat(vout0x1, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 136 vout1x0 = __ssat(vout1x0, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 137 vout1x1 = __ssat(vout1x1, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 106 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() 107 vout0x1 = __ssat(vout0x1, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 113 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() 114 vout1x0 = __ssat(vout1x0, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 93 vout0x0 = __ssat(vout0x0, 8); in xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 2x2c4-minmax-fp32-armsimd32.c | 136 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 137 vout0x1 = __ssat(vout0x1, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 138 vout1x0 = __ssat(vout1x0, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 139 vout1x1 = __ssat(vout1x1, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 108 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() 109 vout0x1 = __ssat(vout0x1, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 114 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() 115 vout1x0 = __ssat(vout1x0, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 94 vout0x0 = __ssat(vout0x0, 8); in xnn_qc8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
/external/clang/test/Sema/ |
D | arm_acle.c | 27 return __ssat(t, v); // expected-error-re {{argument to {{.*}} must be a constant integer}} in test_ssat_const_diag()
|
/external/clang/test/CodeGen/ |
D | arm_acle.c | 275 return __ssat(t, 1); in test_ssat()
|
/external/XNNPACK/src/qs8-vcvt/ |
D | armsimd32.c.in | 24 $__XSAT = {"QS8": "__ssat", "QU8": "__usat"}[DATATYPE]
|
/external/clang/lib/Headers/ |
D | arm_acle.h | 235 #define __ssat(x, y) __builtin_arm_ssat(x, y) macro
|