Home
last modified time | relevance | path

Searched refs:s12 (Results 1 – 25 of 146) sorted by relevance

123456

/external/capstone/suite/MC/AArch64/
Dneon-scalar-cvt.s.cs10 0x95,0xfd,0x3f,0x5f = fcvtzs s21, s12, #1
12 0x95,0xfd,0x3f,0x7f = fcvtzu s21, s12, #1
15 0xac,0xc9,0x21,0x5e = fcvtas s12, s13
17 0xac,0xc9,0x21,0x7e = fcvtau s12, s13
21 0xac,0xb9,0x21,0x7e = fcvtmu s12, s13
25 0xac,0xa9,0x21,0x7e = fcvtnu s12, s13
29 0xac,0xa9,0xa1,0x7e = fcvtpu s12, s13
31 0xac,0xb9,0xa1,0x5e = fcvtzs s12, s13
33 0xac,0xb9,0xa1,0x7e = fcvtzu s12, s13
Dneon-scalar-fp-compare.s.cs2 0x6a,0xe5,0x2c,0x5e = fcmeq s10, s11, s12
6 0x6a,0xe5,0x2c,0x7e = fcmge s10, s11, s12
10 0x6a,0xe5,0xac,0x7e = fcmgt s10, s11, s12
18 0x6a,0xed,0x2c,0x7e = facge s10, s11, s12
20 0x6a,0xed,0xac,0x7e = facgt s10, s11, s12
Dneon-scalar-mul.s.cs9 0x13,0x93,0xac,0x5e = sqdmlal d19, s24, s12
12 0xcc,0xd2,0x6c,0x5e = sqdmull s12, h22, h12
13 0xcf,0xd2,0xac,0x5e = sqdmull d15, s22, s12
/external/llvm/test/MC/ARM/
Dvpush-vpop.s7 vpush {s8, s9, s10, s11, s12}
9 vpop {s8, s9, s10, s11, s12}
12 vpush.16 {s8, s9, s10, s11, s12}
14 vpop.64 {s8, s9, s10, s11, s12}
17 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
19 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a]
22 @ CHECK-ARM: vpush {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0x2d,0xed]
24 @ CHECK-ARM: vpop {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0xbd,0xec]
27 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
29 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a]
[all …]
Dfullfp16.s92 vcvt.u32.f16 s12, s12, #20
100 @ ARM: vcvt.u32.f16 s12, s12, #20 @ encoding: [0xc6,0x69,0xbf,0xee]
108 @ THUMB: vcvt.u32.f16 s12, s12, #20 @ encoding: [0xbf,0xee,0xc6,0x69]
161 vmaxnm.f16 s5, s12, s0
162 @ ARM: vmaxnm.f16 s5, s12, s0 @ encoding: [0x00,0x29,0xc6,0xfe]
163 @ THUMB: vmaxnm.f16 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x29]
165 vminnm.f16 s0, s0, s12
166 @ ARM: vminnm.f16 s0, s0, s12 @ encoding: [0x46,0x09,0x80,0xfe]
167 @ THUMB: vminnm.f16 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x09]
181 vrinta.f16 s12, s1
[all …]
Dthumb-fp-armv8.s86 vmaxnm.f32 s5, s12, s0
87 @ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x2a]
90 vminnm.f32 s0, s0, s12
91 @ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x0a]
117 vrinta.f32 s12, s1
118 @ CHECK: vrinta.f32 s12, s1 @ encoding: [0xb8,0xfe,0x60,0x6a]
121 vrintn.f32 s12, s1
122 @ CHECK: vrintn.f32 s12, s1 @ encoding: [0xb9,0xfe,0x60,0x6a]
125 vrintp.f32 s12, s1
126 @ CHECK: vrintp.f32 s12, s1 @ encoding: [0xba,0xfe,0x60,0x6a]
[all …]
Dfp-armv8.s83 vmaxnm.f32 s5, s12, s0
84 @ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0x00,0x2a,0xc6,0xfe]
87 vminnm.f32 s0, s0, s12
88 @ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x46,0x0a,0x80,0xfe]
111 vrinta.f32 s12, s1
112 @ CHECK: vrinta.f32 s12, s1 @ encoding: [0x60,0x6a,0xb8,0xfe]
115 vrintn.f32 s12, s1
116 @ CHECK: vrintn.f32 s12, s1 @ encoding: [0x60,0x6a,0xb9,0xfe]
119 vrintp.f32 s12, s1
120 @ CHECK: vrintp.f32 s12, s1 @ encoding: [0x60,0x6a,0xba,0xfe]
[all …]
/external/llvm/test/MC/AArch64/
Dneon-scalar-cvt.s58 fcvtzs s21, s12, #1
70 fcvtzu s21, s12, #1
92 fcvtas s12, s13
105 fcvtau s12, s13
131 fcvtmu s12, s13
157 fcvtnu s12, s13
183 fcvtpu s12, s13
195 fcvtzs s12, s13
208 fcvtzu s12, s13
Dneon-scalar-fp-compare.s10 fcmeq s10, s11, s12
40 fcmge s10, s11, s12
70 fcmgt s10, s11, s12
136 facge s10, s11, s12
148 facgt s10, s11, s12
Dneon-scalar-mul.s42 sqdmlal d19, s24, s12
61 sqdmull s12, h22, h12
62 sqdmull d15, s22, s12
/external/rust/crates/ring/crypto/curve25519/
Dcurve25519.c968 int64_t s12 = 2097151 & (load_4(s + 31) >> 4); in x25519_sc_reduce() local
999 s12 += s23 * 470296; in x25519_sc_reduce()
1008 s12 += s22 * 654183; in x25519_sc_reduce()
1017 s12 -= s21 * 997805; in x25519_sc_reduce()
1026 s12 += s20 * 136657; in x25519_sc_reduce()
1035 s12 -= s19 * 683901; in x25519_sc_reduce()
1055 carry12 = (s12 + (1 << 20)) >> 21; in x25519_sc_reduce()
1057 s12 -= int64_lshift21(carry12); in x25519_sc_reduce()
1072 s12 += carry11; in x25519_sc_reduce()
1121 s0 += s12 * 666643; in x25519_sc_reduce()
[all …]
/external/rust/crates/quiche/deps/boringssl/src/crypto/curve25519/
Dcurve25519.c1070 int64_t s12 = 2097151 & (load_4(s + 31) >> 4); in x25519_sc_reduce() local
1101 s12 += s23 * 470296; in x25519_sc_reduce()
1110 s12 += s22 * 654183; in x25519_sc_reduce()
1119 s12 -= s21 * 997805; in x25519_sc_reduce()
1128 s12 += s20 * 136657; in x25519_sc_reduce()
1137 s12 -= s19 * 683901; in x25519_sc_reduce()
1157 carry12 = (s12 + (1 << 20)) >> 21; in x25519_sc_reduce()
1159 s12 -= int64_lshift21(carry12); in x25519_sc_reduce()
1174 s12 += carry11; in x25519_sc_reduce()
1223 s0 += s12 * 666643; in x25519_sc_reduce()
[all …]
/external/boringssl/src/crypto/curve25519/
Dcurve25519.c1063 int64_t s12 = 2097151 & (load_4(s + 31) >> 4); in x25519_sc_reduce() local
1094 s12 += s23 * 470296; in x25519_sc_reduce()
1103 s12 += s22 * 654183; in x25519_sc_reduce()
1112 s12 -= s21 * 997805; in x25519_sc_reduce()
1121 s12 += s20 * 136657; in x25519_sc_reduce()
1130 s12 -= s19 * 683901; in x25519_sc_reduce()
1150 carry12 = (s12 + (1 << 20)) >> 21; in x25519_sc_reduce()
1152 s12 -= int64_lshift21(carry12); in x25519_sc_reduce()
1167 s12 += carry11; in x25519_sc_reduce()
1216 s0 += s12 * 666643; in x25519_sc_reduce()
[all …]
/external/cronet/third_party/boringssl/src/crypto/curve25519/
Dcurve25519.c1063 int64_t s12 = 2097151 & (load_4(s + 31) >> 4); in x25519_sc_reduce() local
1094 s12 += s23 * 470296; in x25519_sc_reduce()
1103 s12 += s22 * 654183; in x25519_sc_reduce()
1112 s12 -= s21 * 997805; in x25519_sc_reduce()
1121 s12 += s20 * 136657; in x25519_sc_reduce()
1130 s12 -= s19 * 683901; in x25519_sc_reduce()
1150 carry12 = (s12 + (1 << 20)) >> 21; in x25519_sc_reduce()
1152 s12 -= int64_lshift21(carry12); in x25519_sc_reduce()
1167 s12 += carry11; in x25519_sc_reduce()
1216 s0 += s12 * 666643; in x25519_sc_reduce()
[all …]
/external/aac/libArithCoding/src/
Dac_arith_coder.cpp578 ULONG s12 = (fMax((UINT)s, (UINT)1) << 12) - 1; in get_pk_v2() local
579 if (s12 > p[485]) { in get_pk_v2()
582 if (s12 > p[255]) p += 256; in get_pk_v2()
585 if (s12 > p[127]) { in get_pk_v2()
588 if (s12 > p[63]) { in get_pk_v2()
591 if (s12 > p[31]) { in get_pk_v2()
594 if (s12 > p[15]) { in get_pk_v2()
597 if (s12 > p[7]) { in get_pk_v2()
600 if (s12 > p[3]) { in get_pk_v2()
603 if (s12 > p[1]) { in get_pk_v2()
[all …]
/external/capstone/suite/MC/ARM/
Dthumb-fp-armv8.s.cs34 0xc6,0xfe,0x00,0x2a = vmaxnm.f32 s5, s12, s0
36 0x80,0xfe,0x46,0x0a = vminnm.f32 s0, s0, s12
45 0xb8,0xfe,0x60,0x6a = vrinta.f32 s12, s1
47 0xb9,0xfe,0x60,0x6a = vrintn.f32 s12, s1
49 0xba,0xfe,0x60,0x6a = vrintp.f32 s12, s1
51 0xbb,0xfe,0x60,0x6a = vrintm.f32 s12, s1
Dvpush-vpop.s.cs3 0x05,0x4a,0x2d,0xed = vpush {s8, s9, s10, s11, s12}
5 0x05,0x4a,0xbd,0xec = vpop {s8, s9, s10, s11, s12}
7 0x05,0x4a,0x2d,0xed = vpush {s8, s9, s10, s11, s12}
9 0x05,0x4a,0xbd,0xec = vpop {s8, s9, s10, s11, s12}
Dvpush-vpop-thumb.s.cs3 0x2d,0xed,0x05,0x4a = vpush {s8, s9, s10, s11, s12}
5 0xbd,0xec,0x05,0x4a = vpop {s8, s9, s10, s11, s12}
7 0x2d,0xed,0x05,0x4a = vpush {s8, s9, s10, s11, s12}
9 0xbd,0xec,0x05,0x4a = vpop {s8, s9, s10, s11, s12}
Dfp-armv8.s.cs34 0x00,0x2a,0xc6,0xfe = vmaxnm.f32 s5, s12, s0
36 0x46,0x0a,0x80,0xfe = vminnm.f32 s0, s0, s12
45 0x60,0x6a,0xb8,0xfe = vrinta.f32 s12, s1
47 0x60,0x6a,0xb9,0xfe = vrintn.f32 s12, s1
49 0x60,0x6a,0xba,0xfe = vrintp.f32 s12, s1
51 0x60,0x6a,0xbb,0xfe = vrintm.f32 s12, s1
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Dadd.h115 int32x4_t s12 = vaddq_s32(x112, x212); in AddElementwiseInt8() local
119 s12 = vqrdmulhq_n_s32(s12, params.output_multiplier); in AddElementwiseInt8()
124 s12 = RoundingDivideByPOT(s12, -params.output_shift); in AddElementwiseInt8()
128 const int16x4_t s12_narrowed = vmovn_s32(s12); in AddElementwiseInt8()
197 __m256i s12 = in AddElementwiseInt16() local
205 s12 = _mm256_add_epi32(s12, input1_offset); in AddElementwiseInt16()
211 s12 = avx2_utils::MultiplyByQuantizedMultiplier( in AddElementwiseInt16()
212 s12, params.input1_multiplier, input1_left_shift); in AddElementwiseInt16()
219 __m256i s2 = _mm256_add_epi32(s12, s22); in AddElementwiseInt16()
297 int32x4_t s12 = vaddq_s32(x112, x122); in AddElementwiseInt16() local
[all …]
/external/libvpx/vp9/common/x86/
Dvp9_highbd_iht16x16_add_sse4.c58 s10[2], s11[2], s12[2], s13[2], s14[2], s15[2]; in highbd_iadst16_4col_sse4_1() local
70 highbd_iadst_butterfly_sse4_1(io[3], io[12], cospi_25_64, cospi_7_64, s12, in highbd_iadst16_4col_sse4_1()
83 x4[0] = _mm_add_epi64(s4[0], s12[0]); in highbd_iadst16_4col_sse4_1()
84 x4[1] = _mm_add_epi64(s4[1], s12[1]); in highbd_iadst16_4col_sse4_1()
99 x12[0] = _mm_sub_epi64(s4[0], s12[0]); in highbd_iadst16_4col_sse4_1()
100 x12[1] = _mm_sub_epi64(s4[1], s12[1]); in highbd_iadst16_4col_sse4_1()
179 s12); in highbd_iadst16_4col_sse4_1()
183 x8[0] = _mm_add_epi64(s8[0], s12[0]); in highbd_iadst16_4col_sse4_1()
184 x8[1] = _mm_add_epi64(s8[1], s12[1]); in highbd_iadst16_4col_sse4_1()
191 x12[0] = _mm_sub_epi64(s8[0], s12[0]); in highbd_iadst16_4col_sse4_1()
[all …]
/external/libvpx/vp9/common/arm/neon/
Dvp9_iht16x16_add_neon.c40 int32x4_t s8[2], s9[2], s10[2], s11[2], s12[2], s13[2], s14[2], s15[2]; in vpx_iadst16x16_256_add_half1d() local
141 iadst_butterfly_lane_0_1_neon(x[12], x[13], c_25_7_29_3, s12, s13); in vpx_iadst16x16_256_add_half1d()
148 x[4] = add_dct_const_round_shift_low_8(s4, s12); in vpx_iadst16x16_256_add_half1d()
156 x[12] = sub_dct_const_round_shift_low_8(s4, s12); in vpx_iadst16x16_256_add_half1d()
172 iadst_butterfly_lane_1_0_neon(x[13], x[12], c_4_28_20_12, s13, s12); in vpx_iadst16x16_256_add_half1d()
183 x[8] = add_dct_const_round_shift_low_8(s8, s12); in vpx_iadst16x16_256_add_half1d()
187 x[12] = sub_dct_const_round_shift_low_8(s8, s12); in vpx_iadst16x16_256_add_half1d()
203 iadst_butterfly_lane_2_3_neon(x[12], x[13], c_16_n16_8_24, s12, s13); in vpx_iadst16x16_256_add_half1d()
218 x[12] = add_dct_const_round_shift_low_8(s12, s14); in vpx_iadst16x16_256_add_half1d()
220 x[14] = sub_dct_const_round_shift_low_8(s12, s14); in vpx_iadst16x16_256_add_half1d()
/external/XNNPACK/src/f32-gemm/
D4x4-minmax-aarch32-vfp-ld64.S100 VMLA.F32 s16, s12, s0
102 VMLA.F32 s20, s12, s2
104 VMLA.F32 s24, s12, s4
106 VMLA.F32 s28, s12, s6
130 VMLA.F32 s18, s12, s1
132 VMLA.F32 s22, s12, s3
134 VMLA.F32 s26, s12, s5
136 VMLA.F32 s30, s12, s7
274 VMLA.F32 s16, s12, s0
279 VMLA.F32 s20, s12, s1
[all …]
D4x4-aarch32-vfp-ld64.S114 VMLA.F32 s16, s12, s1
116 VMLA.F32 s20, s12, s3
118 VMLA.F32 s24, s12, s5
120 VMLA.F32 s28, s12, s7
172 VMLA.F32 s16, s12, s0
177 VMLA.F32 s20, s12, s1
182 VMLA.F32 s24, s12, s2
187 VMLA.F32 s28, s12, s3
/external/llvm/test/MC/Disassembler/ARM/
Dfullfp16-arm.txt69 # CHECK: vcvt.u32.f16 s12, s12, #20
118 # CHECK: vmaxnm.f16 s5, s12, s0
121 # CHECK: vminnm.f16 s0, s0, s12
133 # CHECK: vrinta.f16 s12, s1
136 # CHECK: vrintn.f16 s12, s1
139 # CHECK: vrintp.f16 s12, s1
142 # CHECK: vrintm.f16 s12, s1

123456