Home
last modified time | relevance | path

Searched refs:sqsub (Results 1 – 25 of 70) sorted by relevance

123

/external/llvm-project/llvm/test/MC/AArch64/SVE/
Dsqsub.s11 sqsub z0.b, z0.b, z0.b label
17 sqsub z0.h, z0.h, z0.h label
23 sqsub z0.s, z0.s, z0.s label
29 sqsub z0.d, z0.d, z0.d label
35 sqsub z0.b, z0.b, #0 label
41 sqsub z31.b, z31.b, #255 label
47 sqsub z0.h, z0.h, #0 label
53 sqsub z0.h, z0.h, #0, lsl #8 label
59 sqsub z31.h, z31.h, #255, lsl #8 label
65 sqsub z31.h, z31.h, #65280 label
[all …]
Dsqsub-diagnostics.s4 sqsub z22.h, z10.h, z32.h label
10 sqsub z20.h, z2.h, z31.x label
16 sqsub z27.h, z11.h, z27.b label
25 sqsub z0.b, z0.b, #0, lsl #8 // #0, lsl #8 is not valid for .b label
30 sqsub z0.b, z0.b, #-1 label
35 sqsub z0.b, z0.b, #1, lsl #8 label
40 sqsub z0.b, z0.b, #256 label
45 sqsub z0.h, z0.h, #-1 label
50 sqsub z0.h, z0.h, #256, lsl #8 label
55 sqsub z0.h, z0.h, #65536 label
[all …]
/external/llvm-project/llvm/test/MC/AArch64/SVE2/
Dsqsub.s10 sqsub z0.b, p0/m, z0.b, z1.b label
16 sqsub z0.h, p0/m, z0.h, z1.h label
22 sqsub z29.s, p7/m, z29.s, z30.s label
28 sqsub z31.d, p7/m, z31.d, z30.d label
43 sqsub z31.d, p0/m, z31.d, z30.d label
55 sqsub z31.d, p7/m, z31.d, z30.d label
Dsqsub-diagnostics.s6 sqsub z0.b, p0/m, z1.b, z2.b label
15 sqsub z0.b, p0/m, z0.d, z1.d label
20 sqsub z0.b, p0/m, z0.b, z1.h label
29 sqsub z0.b, p0/z, z0.b, z1.b label
34 sqsub z0.b, p8/m, z0.b, z1.b label
/external/llvm-project/llvm/test/CodeGen/AArch64/
Darm64-vqsub.ll5 ;CHECK: sqsub.8b
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
14 ;CHECK: sqsub.4h
17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
23 ;CHECK: sqsub.2s
26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
59 ;CHECK: sqsub.16b
62 %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
68 ;CHECK: sqsub.8h
71 %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
[all …]
Dssub_sat_vec.ll39 ; CHECK-NEXT: sqsub v0.16b, v0.16b, v1.16b
48 ; CHECK-NEXT: sqsub v0.16b, v0.16b, v2.16b
49 ; CHECK-NEXT: sqsub v1.16b, v1.16b, v3.16b
58 ; CHECK-NEXT: sqsub v0.16b, v0.16b, v4.16b
59 ; CHECK-NEXT: sqsub v1.16b, v1.16b, v5.16b
60 ; CHECK-NEXT: sqsub v2.16b, v2.16b, v6.16b
61 ; CHECK-NEXT: sqsub v3.16b, v3.16b, v7.16b
70 ; CHECK-NEXT: sqsub v0.8h, v0.8h, v1.8h
79 ; CHECK-NEXT: sqsub v0.8h, v0.8h, v2.8h
80 ; CHECK-NEXT: sqsub v1.8h, v1.8h, v3.8h
[all …]
Darm64-neon-v8.1a.ll22 declare <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16>, <4 x i16>)
23 declare <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16>, <8 x i16>)
24 declare <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32>, <2 x i32>)
25 declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
26 declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32)
27 declare i16 @llvm.aarch64.neon.sqsub.i16(i16, i16)
76 %retval = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %acc, <4 x i16> %prod)
86 %retval = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %acc, <8 x i16> %prod)
96 %retval = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %acc, <2 x i32> %prod)
106 %retval = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %acc, <4 x i32> %prod)
[all …]
Darm64-arith-saturating.ll46 ; CHECK: sqsub s0, s0, s1
49 %vqsub.i = tail call i32 @llvm.aarch64.neon.sqsub.i32(i32 %vecext, i32 %vecext1) nounwind
55 ; CHECK: sqsub d0, d0, d1
58 %vqsub.i = tail call i64 @llvm.aarch64.neon.sqsub.i64(i64 %vecext, i64 %vecext1) nounwind
82 declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64) nounwind readnone
83 declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32) nounwind readnone
Dsve-intrinsics-int-arith.ll183 ; CHECK: sqsub z0.b, z0.b, z1.b
185 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
192 ; CHECK: sqsub z0.h, z0.h, z1.h
194 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
201 ; CHECK: sqsub z0.s, z0.s, z1.s
203 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
210 ; CHECK: sqsub z0.d, z0.d, z1.d
212 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
382 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
383 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
[all …]
Darm64-vecFold.ll125 …%vqsub2.i = tail call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %a0, <2 x i32> %a1) nounw…
127 ; CHECK: sqsub.2s v0, v0, v1
136 declare <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
Dsve-int-arith.ll120 ; CHECK-NEXT: sqsub z0.d, z0.d, z1.d
129 ; CHECK-NEXT: sqsub z0.s, z0.s, z1.s
138 ; CHECK-NEXT: sqsub z0.h, z0.h, z1.h
147 ; CHECK-NEXT: sqsub z0.b, z0.b, z1.b
Dsve-intrinsics-int-arith-imm.ll536 ; CHECK-NEXT: sqsub z0.b, z0.b, #27 // =0x1b
540 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
548 ; CHECK-NEXT: sqsub z0.h, z0.h, #43 // =0x2b
552 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
560 ; CHECK-NEXT: sqsub z0.h, z0.h, #2048 // =0x800
564 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
572 ; CHECK-NEXT: sqsub z0.s, z0.s, #1 // =0x1
576 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
584 ; CHECK-NEXT: sqsub z0.s, z0.s, #8192 // =0x2000
588 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
[all …]
Dsve-int-imm.ll369 ; CHECK: sqsub z0.b, z0.b, #30
379 ; CHECK: sqsub z0.h, z0.h, #30
389 ; CHECK: sqsub z0.h, z0.h, #1024
399 ; CHECK: sqsub z0.s, z0.s, #30
409 ; CHECK: sqsub z0.s, z0.s, #1024
419 ; CHECK: sqsub z0.d, z0.d, #30
429 ; CHECK: sqsub z0.d, z0.d, #1024
Dssub_sat.ll91 ; CHECK-NEXT: sqsub v0.4s, v0.4s, v1.4s
Darm64-neon-2velem-high.ll436 …%vqdmlsl17.i.i = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.…
448 …%vqdmlsl17.i.i = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.…
462 …%vqdmlsl11.i.i = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i…
474 …%vqdmlsl11.i.i = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i…
571 declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
572 declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>)
/external/llvm/test/CodeGen/AArch64/
Darm64-vqsub.ll5 ;CHECK: sqsub.8b
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
14 ;CHECK: sqsub.4h
17 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
23 ;CHECK: sqsub.2s
26 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
59 ;CHECK: sqsub.16b
62 %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
68 ;CHECK: sqsub.8h
71 %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
[all …]
Darm64-neon-v8.1a.ll19 declare <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16>, <4 x i16>)
20 declare <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16>, <8 x i16>)
21 declare <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32>, <2 x i32>)
22 declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
23 declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32)
24 declare i16 @llvm.aarch64.neon.sqsub.i16(i16, i16)
73 %retval = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %acc, <4 x i16> %prod)
83 %retval = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %acc, <8 x i16> %prod)
93 %retval = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %acc, <2 x i32> %prod)
103 %retval = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %acc, <4 x i32> %prod)
[all …]
Darm64-arith-saturating.ll46 ; CHECK: sqsub s0, s0, s1
49 %vqsub.i = tail call i32 @llvm.aarch64.neon.sqsub.i32(i32 %vecext, i32 %vecext1) nounwind
55 ; CHECK: sqsub d0, d0, d1
58 %vqsub.i = tail call i64 @llvm.aarch64.neon.sqsub.i64(i64 %vecext, i64 %vecext1) nounwind
82 declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64) nounwind readnone
83 declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32) nounwind readnone
Darm64-vecFold.ll125 …%vqsub2.i = tail call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %a0, <2 x i32> %a1) nounw…
127 ; CHECK: sqsub.2s v0, v0, v1
136 declare <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
/external/llvm/test/MC/AArch64/
Dneon-saturating-add-sub.s47 sqsub v0.8b, v1.8b, v2.8b
48 sqsub v0.16b, v1.16b, v2.16b
49 sqsub v0.4h, v1.4h, v2.4h
50 sqsub v0.8h, v1.8h, v2.8h
51 sqsub v0.2s, v1.2s, v2.2s
52 sqsub v0.4s, v1.4s, v2.4s
53 sqsub v0.2d, v1.2d, v2.2d
Dneon-scalar-saturating-add-sub.s32 sqsub b0, b1, b2
33 sqsub h10, h11, h12
34 sqsub s20, s21, s2
35 sqsub d17, d31, d8
/external/capstone/suite/MC/AArch64/
Dneon-saturating-add-sub.s.cs16 0x20,0x2c,0x22,0x0e = sqsub v0.8b, v1.8b, v2.8b
17 0x20,0x2c,0x22,0x4e = sqsub v0.16b, v1.16b, v2.16b
18 0x20,0x2c,0x62,0x0e = sqsub v0.4h, v1.4h, v2.4h
19 0x20,0x2c,0x62,0x4e = sqsub v0.8h, v1.8h, v2.8h
20 0x20,0x2c,0xa2,0x0e = sqsub v0.2s, v1.2s, v2.2s
21 0x20,0x2c,0xa2,0x4e = sqsub v0.4s, v1.4s, v2.4s
22 0x20,0x2c,0xe2,0x4e = sqsub v0.2d, v1.2d, v2.2d
Dneon-scalar-saturating-add-sub.s.cs10 0x20,0x2c,0x22,0x5e = sqsub b0, b1, b2
11 0x6a,0x2d,0x6c,0x5e = sqsub h10, h11, h12
12 0xb4,0x2e,0xa2,0x5e = sqsub s20, s21, s2
13 0xf1,0x2f,0xe8,0x5e = sqsub d17, d31, d8
/external/llvm-project/llvm/test/MC/AArch64/
Dneon-saturating-add-sub.s47 sqsub v0.8b, v1.8b, v2.8b
48 sqsub v0.16b, v1.16b, v2.16b
49 sqsub v0.4h, v1.4h, v2.4h
50 sqsub v0.8h, v1.8h, v2.8h
51 sqsub v0.2s, v1.2s, v2.2s
52 sqsub v0.4s, v1.4s, v2.4s
53 sqsub v0.2d, v1.2d, v2.2d
Dneon-scalar-saturating-add-sub.s32 sqsub b0, b1, b2
33 sqsub h10, h11, h12
34 sqsub s20, s21, s2
35 sqsub d17, d31, d8

123