Home
last modified time | relevance | path

Searched refs:s12 (Results 1 – 25 of 152) sorted by relevance

1234567

/external/capstone/suite/MC/AArch64/
Dneon-scalar-cvt.s.cs10 0x95,0xfd,0x3f,0x5f = fcvtzs s21, s12, #1
12 0x95,0xfd,0x3f,0x7f = fcvtzu s21, s12, #1
15 0xac,0xc9,0x21,0x5e = fcvtas s12, s13
17 0xac,0xc9,0x21,0x7e = fcvtau s12, s13
21 0xac,0xb9,0x21,0x7e = fcvtmu s12, s13
25 0xac,0xa9,0x21,0x7e = fcvtnu s12, s13
29 0xac,0xa9,0xa1,0x7e = fcvtpu s12, s13
31 0xac,0xb9,0xa1,0x5e = fcvtzs s12, s13
33 0xac,0xb9,0xa1,0x7e = fcvtzu s12, s13
Dneon-scalar-fp-compare.s.cs2 0x6a,0xe5,0x2c,0x5e = fcmeq s10, s11, s12
6 0x6a,0xe5,0x2c,0x7e = fcmge s10, s11, s12
10 0x6a,0xe5,0xac,0x7e = fcmgt s10, s11, s12
18 0x6a,0xed,0x2c,0x7e = facge s10, s11, s12
20 0x6a,0xed,0xac,0x7e = facgt s10, s11, s12
Dneon-scalar-mul.s.cs9 0x13,0x93,0xac,0x5e = sqdmlal d19, s24, s12
12 0xcc,0xd2,0x6c,0x5e = sqdmull s12, h22, h12
13 0xcf,0xd2,0xac,0x5e = sqdmull d15, s22, s12
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/ARM/
Dvpush-vpop.s7 vpush {s8, s9, s10, s11, s12}
9 vpop {s8, s9, s10, s11, s12}
12 vpush.16 {s8, s9, s10, s11, s12}
14 vpop.64 {s8, s9, s10, s11, s12}
17 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
19 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a]
22 @ CHECK-ARM: vpush {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0x2d,0xed]
24 @ CHECK-ARM: vpop {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0xbd,0xec]
27 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
29 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a]
[all …]
Dfullfp16.s92 vcvt.u32.f16 s12, s12, #20
100 @ ARM: vcvt.u32.f16 s12, s12, #20 @ encoding: [0xc6,0x69,0xbf,0xee]
108 @ THUMB: vcvt.u32.f16 s12, s12, #20 @ encoding: [0xbf,0xee,0xc6,0x69]
161 vmaxnm.f16 s5, s12, s0
162 @ ARM: vmaxnm.f16 s5, s12, s0 @ encoding: [0x00,0x29,0xc6,0xfe]
163 @ THUMB: vmaxnm.f16 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x29]
165 vminnm.f16 s0, s0, s12
166 @ ARM: vminnm.f16 s0, s0, s12 @ encoding: [0x46,0x09,0x80,0xfe]
167 @ THUMB: vminnm.f16 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x09]
181 vrinta.f16 s12, s1
[all …]
Dthumb-fp-armv8.s86 vmaxnm.f32 s5, s12, s0
87 @ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x2a]
90 vminnm.f32 s0, s0, s12
91 @ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x0a]
117 vrinta.f32 s12, s1
118 @ CHECK: vrinta.f32 s12, s1 @ encoding: [0xb8,0xfe,0x60,0x6a]
121 vrintn.f32 s12, s1
122 @ CHECK: vrintn.f32 s12, s1 @ encoding: [0xb9,0xfe,0x60,0x6a]
125 vrintp.f32 s12, s1
126 @ CHECK: vrintp.f32 s12, s1 @ encoding: [0xba,0xfe,0x60,0x6a]
[all …]
Dfp-armv8.s83 vmaxnm.f32 s5, s12, s0
84 @ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0x00,0x2a,0xc6,0xfe]
87 vminnm.f32 s0, s0, s12
88 @ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x46,0x0a,0x80,0xfe]
111 vrinta.f32 s12, s1
112 @ CHECK: vrinta.f32 s12, s1 @ encoding: [0x60,0x6a,0xb8,0xfe]
115 vrintn.f32 s12, s1
116 @ CHECK: vrintn.f32 s12, s1 @ encoding: [0x60,0x6a,0xb9,0xfe]
119 vrintp.f32 s12, s1
120 @ CHECK: vrintp.f32 s12, s1 @ encoding: [0x60,0x6a,0xba,0xfe]
[all …]
/external/llvm/test/MC/ARM/
Dvpush-vpop.s7 vpush {s8, s9, s10, s11, s12}
9 vpop {s8, s9, s10, s11, s12}
12 vpush.16 {s8, s9, s10, s11, s12}
14 vpop.64 {s8, s9, s10, s11, s12}
17 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
19 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a]
22 @ CHECK-ARM: vpush {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0x2d,0xed]
24 @ CHECK-ARM: vpop {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0xbd,0xec]
27 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a]
29 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a]
[all …]
Dfullfp16.s92 vcvt.u32.f16 s12, s12, #20
100 @ ARM: vcvt.u32.f16 s12, s12, #20 @ encoding: [0xc6,0x69,0xbf,0xee]
108 @ THUMB: vcvt.u32.f16 s12, s12, #20 @ encoding: [0xbf,0xee,0xc6,0x69]
161 vmaxnm.f16 s5, s12, s0
162 @ ARM: vmaxnm.f16 s5, s12, s0 @ encoding: [0x00,0x29,0xc6,0xfe]
163 @ THUMB: vmaxnm.f16 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x29]
165 vminnm.f16 s0, s0, s12
166 @ ARM: vminnm.f16 s0, s0, s12 @ encoding: [0x46,0x09,0x80,0xfe]
167 @ THUMB: vminnm.f16 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x09]
181 vrinta.f16 s12, s1
[all …]
Dfp-armv8.s83 vmaxnm.f32 s5, s12, s0
84 @ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0x00,0x2a,0xc6,0xfe]
87 vminnm.f32 s0, s0, s12
88 @ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x46,0x0a,0x80,0xfe]
111 vrinta.f32 s12, s1
112 @ CHECK: vrinta.f32 s12, s1 @ encoding: [0x60,0x6a,0xb8,0xfe]
115 vrintn.f32 s12, s1
116 @ CHECK: vrintn.f32 s12, s1 @ encoding: [0x60,0x6a,0xb9,0xfe]
119 vrintp.f32 s12, s1
120 @ CHECK: vrintp.f32 s12, s1 @ encoding: [0x60,0x6a,0xba,0xfe]
[all …]
Dthumb-fp-armv8.s86 vmaxnm.f32 s5, s12, s0
87 @ CHECK: vmaxnm.f32 s5, s12, s0 @ encoding: [0xc6,0xfe,0x00,0x2a]
90 vminnm.f32 s0, s0, s12
91 @ CHECK: vminnm.f32 s0, s0, s12 @ encoding: [0x80,0xfe,0x46,0x0a]
117 vrinta.f32 s12, s1
118 @ CHECK: vrinta.f32 s12, s1 @ encoding: [0xb8,0xfe,0x60,0x6a]
121 vrintn.f32 s12, s1
122 @ CHECK: vrintn.f32 s12, s1 @ encoding: [0xb9,0xfe,0x60,0x6a]
125 vrintp.f32 s12, s1
126 @ CHECK: vrintp.f32 s12, s1 @ encoding: [0xba,0xfe,0x60,0x6a]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/
Dneon-scalar-cvt.s58 fcvtzs s21, s12, #1
70 fcvtzu s21, s12, #1
92 fcvtas s12, s13
105 fcvtau s12, s13
131 fcvtmu s12, s13
157 fcvtnu s12, s13
183 fcvtpu s12, s13
195 fcvtzs s12, s13
208 fcvtzu s12, s13
Dneon-scalar-fp-compare.s10 fcmeq s10, s11, s12
40 fcmge s10, s11, s12
70 fcmgt s10, s11, s12
136 facge s10, s11, s12
148 facgt s10, s11, s12
Dneon-scalar-mul.s42 sqdmlal d19, s24, s12
61 sqdmull s12, h22, h12
62 sqdmull d15, s22, s12
/external/llvm/test/MC/AArch64/
Dneon-scalar-cvt.s58 fcvtzs s21, s12, #1
70 fcvtzu s21, s12, #1
92 fcvtas s12, s13
105 fcvtau s12, s13
131 fcvtmu s12, s13
157 fcvtnu s12, s13
183 fcvtpu s12, s13
195 fcvtzs s12, s13
208 fcvtzu s12, s13
Dneon-scalar-fp-compare.s10 fcmeq s10, s11, s12
40 fcmge s10, s11, s12
70 fcmgt s10, s11, s12
136 facge s10, s11, s12
148 facgt s10, s11, s12
Dneon-scalar-mul.s42 sqdmlal d19, s24, s12
61 sqdmull s12, h22, h12
62 sqdmull d15, s22, s12
/external/aac/libArithCoding/src/
Dac_arith_coder.cpp578 ULONG s12 = (fMax((UINT)s, (UINT)1) << 12) - 1; in get_pk_v2() local
579 if (s12 > p[485]) { in get_pk_v2()
582 if (s12 > p[255]) p += 256; in get_pk_v2()
585 if (s12 > p[127]) { in get_pk_v2()
588 if (s12 > p[63]) { in get_pk_v2()
591 if (s12 > p[31]) { in get_pk_v2()
594 if (s12 > p[15]) { in get_pk_v2()
597 if (s12 > p[7]) { in get_pk_v2()
600 if (s12 > p[3]) { in get_pk_v2()
603 if (s12 > p[1]) { in get_pk_v2()
[all …]
/external/boringssl/src/third_party/fiat/
Dcurve25519.c1078 int64_t s12 = 2097151 & (load_4(s + 31) >> 4); in x25519_sc_reduce() local
1109 s12 += s23 * 470296; in x25519_sc_reduce()
1118 s12 += s22 * 654183; in x25519_sc_reduce()
1127 s12 -= s21 * 997805; in x25519_sc_reduce()
1136 s12 += s20 * 136657; in x25519_sc_reduce()
1145 s12 -= s19 * 683901; in x25519_sc_reduce()
1165 carry12 = (s12 + (1 << 20)) >> 21; in x25519_sc_reduce()
1167 s12 -= int64_lshift21(carry12); in x25519_sc_reduce()
1182 s12 += carry11; in x25519_sc_reduce()
1231 s0 += s12 * 666643; in x25519_sc_reduce()
[all …]
/external/capstone/suite/MC/ARM/
Dvpush-vpop.s.cs3 0x05,0x4a,0x2d,0xed = vpush {s8, s9, s10, s11, s12}
5 0x05,0x4a,0xbd,0xec = vpop {s8, s9, s10, s11, s12}
7 0x05,0x4a,0x2d,0xed = vpush {s8, s9, s10, s11, s12}
9 0x05,0x4a,0xbd,0xec = vpop {s8, s9, s10, s11, s12}
Dvpush-vpop-thumb.s.cs3 0x2d,0xed,0x05,0x4a = vpush {s8, s9, s10, s11, s12}
5 0xbd,0xec,0x05,0x4a = vpop {s8, s9, s10, s11, s12}
7 0x2d,0xed,0x05,0x4a = vpush {s8, s9, s10, s11, s12}
9 0xbd,0xec,0x05,0x4a = vpop {s8, s9, s10, s11, s12}
Dthumb-fp-armv8.s.cs34 0xc6,0xfe,0x00,0x2a = vmaxnm.f32 s5, s12, s0
36 0x80,0xfe,0x46,0x0a = vminnm.f32 s0, s0, s12
45 0xb8,0xfe,0x60,0x6a = vrinta.f32 s12, s1
47 0xb9,0xfe,0x60,0x6a = vrintn.f32 s12, s1
49 0xba,0xfe,0x60,0x6a = vrintp.f32 s12, s1
51 0xbb,0xfe,0x60,0x6a = vrintm.f32 s12, s1
Dfp-armv8.s.cs34 0x00,0x2a,0xc6,0xfe = vmaxnm.f32 s5, s12, s0
36 0x46,0x0a,0x80,0xfe = vminnm.f32 s0, s0, s12
45 0x60,0x6a,0xb8,0xfe = vrinta.f32 s12, s1
47 0x60,0x6a,0xb9,0xfe = vrintn.f32 s12, s1
49 0x60,0x6a,0xba,0xfe = vrintp.f32 s12, s1
51 0x60,0x6a,0xbb,0xfe = vrintm.f32 s12, s1
/external/libvpx/libvpx/vp9/common/x86/
Dvp9_highbd_iht16x16_add_sse4.c58 s10[2], s11[2], s12[2], s13[2], s14[2], s15[2]; in highbd_iadst16_4col_sse4_1() local
70 highbd_iadst_butterfly_sse4_1(io[3], io[12], cospi_25_64, cospi_7_64, s12, in highbd_iadst16_4col_sse4_1()
83 x4[0] = _mm_add_epi64(s4[0], s12[0]); in highbd_iadst16_4col_sse4_1()
84 x4[1] = _mm_add_epi64(s4[1], s12[1]); in highbd_iadst16_4col_sse4_1()
99 x12[0] = _mm_sub_epi64(s4[0], s12[0]); in highbd_iadst16_4col_sse4_1()
100 x12[1] = _mm_sub_epi64(s4[1], s12[1]); in highbd_iadst16_4col_sse4_1()
179 s12); in highbd_iadst16_4col_sse4_1()
183 x8[0] = _mm_add_epi64(s8[0], s12[0]); in highbd_iadst16_4col_sse4_1()
184 x8[1] = _mm_add_epi64(s8[1], s12[1]); in highbd_iadst16_4col_sse4_1()
191 x12[0] = _mm_sub_epi64(s8[0], s12[0]); in highbd_iadst16_4col_sse4_1()
[all …]
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_iht16x16_add_neon.c40 int32x4_t s8[2], s9[2], s10[2], s11[2], s12[2], s13[2], s14[2], s15[2]; in vpx_iadst16x16_256_add_half1d() local
141 iadst_butterfly_lane_0_1_neon(x[12], x[13], c_25_7_29_3, s12, s13); in vpx_iadst16x16_256_add_half1d()
148 x[4] = add_dct_const_round_shift_low_8(s4, s12); in vpx_iadst16x16_256_add_half1d()
156 x[12] = sub_dct_const_round_shift_low_8(s4, s12); in vpx_iadst16x16_256_add_half1d()
172 iadst_butterfly_lane_1_0_neon(x[13], x[12], c_4_28_20_12, s13, s12); in vpx_iadst16x16_256_add_half1d()
183 x[8] = add_dct_const_round_shift_low_8(s8, s12); in vpx_iadst16x16_256_add_half1d()
187 x[12] = sub_dct_const_round_shift_low_8(s8, s12); in vpx_iadst16x16_256_add_half1d()
203 iadst_butterfly_lane_2_3_neon(x[12], x[13], c_16_n16_8_24, s12, s13); in vpx_iadst16x16_256_add_half1d()
218 x[12] = add_dct_const_round_shift_low_8(s12, s14); in vpx_iadst16x16_256_add_half1d()
220 x[14] = sub_dct_const_round_shift_low_8(s12, s14); in vpx_iadst16x16_256_add_half1d()

1234567