• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
3
4declare <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8>, <1 x i8>)
5declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
6declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>)
7declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>)
8declare <12 x i8> @llvm.uadd.sat.v12i8(<12 x i8>, <12 x i8>)
9declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
10declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>)
11declare <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8>, <64 x i8>)
12
13declare <1 x i16> @llvm.uadd.sat.v1i16(<1 x i16>, <1 x i16>)
14declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
15declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>)
16declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
17declare <12 x i16> @llvm.uadd.sat.v12i16(<12 x i16>, <12 x i16>)
18declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
19declare <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16>, <32 x i16>)
20
21declare <16 x i1> @llvm.uadd.sat.v16i1(<16 x i1>, <16 x i1>)
22declare <16 x i4> @llvm.uadd.sat.v16i4(<16 x i4>, <16 x i4>)
23
24declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
25declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
26declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
27declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>)
28declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
29declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
30declare <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>)
31
32declare <4 x i24> @llvm.uadd.sat.v4i24(<4 x i24>, <4 x i24>)
33declare <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128>, <2 x i128>)
34
35define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
36; CHECK-LABEL: v16i8:
37; CHECK:       // %bb.0:
38; CHECK-NEXT:    uqadd v0.16b, v0.16b, v1.16b
39; CHECK-NEXT:    ret
40  %z = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
41  ret <16 x i8> %z
42}
43
44define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
45; CHECK-LABEL: v32i8:
46; CHECK:       // %bb.0:
47; CHECK-NEXT:    uqadd v0.16b, v0.16b, v2.16b
48; CHECK-NEXT:    uqadd v1.16b, v1.16b, v3.16b
49; CHECK-NEXT:    ret
50  %z = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %x, <32 x i8> %y)
51  ret <32 x i8> %z
52}
53
54define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
55; CHECK-LABEL: v64i8:
56; CHECK:       // %bb.0:
57; CHECK-NEXT:    uqadd v0.16b, v0.16b, v4.16b
58; CHECK-NEXT:    uqadd v1.16b, v1.16b, v5.16b
59; CHECK-NEXT:    uqadd v2.16b, v2.16b, v6.16b
60; CHECK-NEXT:    uqadd v3.16b, v3.16b, v7.16b
61; CHECK-NEXT:    ret
62  %z = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %x, <64 x i8> %y)
63  ret <64 x i8> %z
64}
65
66define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
67; CHECK-LABEL: v8i16:
68; CHECK:       // %bb.0:
69; CHECK-NEXT:    uqadd v0.8h, v0.8h, v1.8h
70; CHECK-NEXT:    ret
71  %z = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
72  ret <8 x i16> %z
73}
74
75define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
76; CHECK-LABEL: v16i16:
77; CHECK:       // %bb.0:
78; CHECK-NEXT:    uqadd v0.8h, v0.8h, v2.8h
79; CHECK-NEXT:    uqadd v1.8h, v1.8h, v3.8h
80; CHECK-NEXT:    ret
81  %z = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %x, <16 x i16> %y)
82  ret <16 x i16> %z
83}
84
85define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
86; CHECK-LABEL: v32i16:
87; CHECK:       // %bb.0:
88; CHECK-NEXT:    uqadd v0.8h, v0.8h, v4.8h
89; CHECK-NEXT:    uqadd v1.8h, v1.8h, v5.8h
90; CHECK-NEXT:    uqadd v2.8h, v2.8h, v6.8h
91; CHECK-NEXT:    uqadd v3.8h, v3.8h, v7.8h
92; CHECK-NEXT:    ret
93  %z = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %x, <32 x i16> %y)
94  ret <32 x i16> %z
95}
96
97define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
98; CHECK-LABEL: v8i8:
99; CHECK:       // %bb.0:
100; CHECK-NEXT:    ldr d0, [x0]
101; CHECK-NEXT:    ldr d1, [x1]
102; CHECK-NEXT:    uqadd v0.8b, v0.8b, v1.8b
103; CHECK-NEXT:    str d0, [x2]
104; CHECK-NEXT:    ret
105  %x = load <8 x i8>, <8 x i8>* %px
106  %y = load <8 x i8>, <8 x i8>* %py
107  %z = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
108  store <8 x i8> %z, <8 x i8>* %pz
109  ret void
110}
111
112define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
113; CHECK-LABEL: v4i8:
114; CHECK:       // %bb.0:
115; CHECK-NEXT:    ldrb w8, [x0]
116; CHECK-NEXT:    ldrb w9, [x1]
117; CHECK-NEXT:    ldrb w10, [x0, #1]
118; CHECK-NEXT:    ldrb w11, [x1, #1]
119; CHECK-NEXT:    fmov s0, w8
120; CHECK-NEXT:    fmov s1, w9
121; CHECK-NEXT:    ldrb w8, [x0, #2]
122; CHECK-NEXT:    ldrb w9, [x1, #2]
123; CHECK-NEXT:    mov v0.h[1], w10
124; CHECK-NEXT:    mov v1.h[1], w11
125; CHECK-NEXT:    ldrb w10, [x0, #3]
126; CHECK-NEXT:    ldrb w11, [x1, #3]
127; CHECK-NEXT:    mov v0.h[2], w8
128; CHECK-NEXT:    mov v1.h[2], w9
129; CHECK-NEXT:    mov v0.h[3], w10
130; CHECK-NEXT:    mov v1.h[3], w11
131; CHECK-NEXT:    shl v1.4h, v1.4h, #8
132; CHECK-NEXT:    shl v0.4h, v0.4h, #8
133; CHECK-NEXT:    uqadd v0.4h, v0.4h, v1.4h
134; CHECK-NEXT:    ushr v0.4h, v0.4h, #8
135; CHECK-NEXT:    xtn v0.8b, v0.8h
136; CHECK-NEXT:    str s0, [x2]
137; CHECK-NEXT:    ret
138  %x = load <4 x i8>, <4 x i8>* %px
139  %y = load <4 x i8>, <4 x i8>* %py
140  %z = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
141  store <4 x i8> %z, <4 x i8>* %pz
142  ret void
143}
144
145define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
146; CHECK-LABEL: v2i8:
147; CHECK:       // %bb.0:
148; CHECK-NEXT:    ldrb w8, [x0]
149; CHECK-NEXT:    ldrb w9, [x1]
150; CHECK-NEXT:    ldrb w10, [x0, #1]
151; CHECK-NEXT:    ldrb w11, [x1, #1]
152; CHECK-NEXT:    fmov s0, w8
153; CHECK-NEXT:    fmov s1, w9
154; CHECK-NEXT:    mov v0.s[1], w10
155; CHECK-NEXT:    mov v1.s[1], w11
156; CHECK-NEXT:    shl v1.2s, v1.2s, #24
157; CHECK-NEXT:    shl v0.2s, v0.2s, #24
158; CHECK-NEXT:    uqadd v0.2s, v0.2s, v1.2s
159; CHECK-NEXT:    ushr v0.2s, v0.2s, #24
160; CHECK-NEXT:    mov w8, v0.s[1]
161; CHECK-NEXT:    fmov w9, s0
162; CHECK-NEXT:    strb w9, [x2]
163; CHECK-NEXT:    strb w8, [x2, #1]
164; CHECK-NEXT:    ret
165  %x = load <2 x i8>, <2 x i8>* %px
166  %y = load <2 x i8>, <2 x i8>* %py
167  %z = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
168  store <2 x i8> %z, <2 x i8>* %pz
169  ret void
170}
171
172define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
173; CHECK-LABEL: v4i16:
174; CHECK:       // %bb.0:
175; CHECK-NEXT:    ldr d0, [x0]
176; CHECK-NEXT:    ldr d1, [x1]
177; CHECK-NEXT:    uqadd v0.4h, v0.4h, v1.4h
178; CHECK-NEXT:    str d0, [x2]
179; CHECK-NEXT:    ret
180  %x = load <4 x i16>, <4 x i16>* %px
181  %y = load <4 x i16>, <4 x i16>* %py
182  %z = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
183  store <4 x i16> %z, <4 x i16>* %pz
184  ret void
185}
186
187define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
188; CHECK-LABEL: v2i16:
189; CHECK:       // %bb.0:
190; CHECK-NEXT:    ldrh w8, [x0]
191; CHECK-NEXT:    ldrh w9, [x1]
192; CHECK-NEXT:    ldrh w10, [x0, #2]
193; CHECK-NEXT:    ldrh w11, [x1, #2]
194; CHECK-NEXT:    fmov s0, w8
195; CHECK-NEXT:    fmov s1, w9
196; CHECK-NEXT:    mov v0.s[1], w10
197; CHECK-NEXT:    mov v1.s[1], w11
198; CHECK-NEXT:    shl v1.2s, v1.2s, #16
199; CHECK-NEXT:    shl v0.2s, v0.2s, #16
200; CHECK-NEXT:    uqadd v0.2s, v0.2s, v1.2s
201; CHECK-NEXT:    ushr v0.2s, v0.2s, #16
202; CHECK-NEXT:    mov w8, v0.s[1]
203; CHECK-NEXT:    fmov w9, s0
204; CHECK-NEXT:    strh w9, [x2]
205; CHECK-NEXT:    strh w8, [x2, #2]
206; CHECK-NEXT:    ret
207  %x = load <2 x i16>, <2 x i16>* %px
208  %y = load <2 x i16>, <2 x i16>* %py
209  %z = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
210  store <2 x i16> %z, <2 x i16>* %pz
211  ret void
212}
213
214define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
215; CHECK-LABEL: v12i8:
216; CHECK:       // %bb.0:
217; CHECK-NEXT:    uqadd v0.16b, v0.16b, v1.16b
218; CHECK-NEXT:    ret
219  %z = call <12 x i8> @llvm.uadd.sat.v12i8(<12 x i8> %x, <12 x i8> %y)
220  ret <12 x i8> %z
221}
222
223define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
224; CHECK-LABEL: v12i16:
225; CHECK:       // %bb.0:
226; CHECK-NEXT:    ldp q0, q1, [x0]
227; CHECK-NEXT:    ldp q3, q2, [x1]
228; CHECK-NEXT:    uqadd v1.8h, v1.8h, v2.8h
229; CHECK-NEXT:    uqadd v0.8h, v0.8h, v3.8h
230; CHECK-NEXT:    str q0, [x2]
231; CHECK-NEXT:    str d1, [x2, #16]
232; CHECK-NEXT:    ret
233  %x = load <12 x i16>, <12 x i16>* %px
234  %y = load <12 x i16>, <12 x i16>* %py
235  %z = call <12 x i16> @llvm.uadd.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
236  store <12 x i16> %z, <12 x i16>* %pz
237  ret void
238}
239
240define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
241; CHECK-LABEL: v1i8:
242; CHECK:       // %bb.0:
243; CHECK-NEXT:    ldr b0, [x0]
244; CHECK-NEXT:    ldr b1, [x1]
245; CHECK-NEXT:    uqadd v0.8b, v0.8b, v1.8b
246; CHECK-NEXT:    st1 { v0.b }[0], [x2]
247; CHECK-NEXT:    ret
248  %x = load <1 x i8>, <1 x i8>* %px
249  %y = load <1 x i8>, <1 x i8>* %py
250  %z = call <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
251  store <1 x i8> %z, <1 x i8>* %pz
252  ret void
253}
254
255define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
256; CHECK-LABEL: v1i16:
257; CHECK:       // %bb.0:
258; CHECK-NEXT:    ldr h0, [x0]
259; CHECK-NEXT:    ldr h1, [x1]
260; CHECK-NEXT:    uqadd v0.4h, v0.4h, v1.4h
261; CHECK-NEXT:    str h0, [x2]
262; CHECK-NEXT:    ret
263  %x = load <1 x i16>, <1 x i16>* %px
264  %y = load <1 x i16>, <1 x i16>* %py
265  %z = call <1 x i16> @llvm.uadd.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
266  store <1 x i16> %z, <1 x i16>* %pz
267  ret void
268}
269
270define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) nounwind {
271; CHECK-LABEL: v16i4:
272; CHECK:       // %bb.0:
273; CHECK-NEXT:    movi v2.16b, #15
274; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
275; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
276; CHECK-NEXT:    shl v1.16b, v1.16b, #4
277; CHECK-NEXT:    shl v0.16b, v0.16b, #4
278; CHECK-NEXT:    uqadd v0.16b, v0.16b, v1.16b
279; CHECK-NEXT:    ushr v0.16b, v0.16b, #4
280; CHECK-NEXT:    ret
281  %z = call <16 x i4> @llvm.uadd.sat.v16i4(<16 x i4> %x, <16 x i4> %y)
282  ret <16 x i4> %z
283}
284
285define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) nounwind {
286; CHECK-LABEL: v16i1:
287; CHECK:       // %bb.0:
288; CHECK-NEXT:    movi v2.16b, #1
289; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
290; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
291; CHECK-NEXT:    shl v1.16b, v1.16b, #7
292; CHECK-NEXT:    shl v0.16b, v0.16b, #7
293; CHECK-NEXT:    uqadd v0.16b, v0.16b, v1.16b
294; CHECK-NEXT:    ushr v0.16b, v0.16b, #7
295; CHECK-NEXT:    ret
296  %z = call <16 x i1> @llvm.uadd.sat.v16i1(<16 x i1> %x, <16 x i1> %y)
297  ret <16 x i1> %z
298}
299
300define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
301; CHECK-LABEL: v2i32:
302; CHECK:       // %bb.0:
303; CHECK-NEXT:    uqadd v0.2s, v0.2s, v1.2s
304; CHECK-NEXT:    ret
305  %z = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %x, <2 x i32> %y)
306  ret <2 x i32> %z
307}
308
309define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
310; CHECK-LABEL: v4i32:
311; CHECK:       // %bb.0:
312; CHECK-NEXT:    uqadd v0.4s, v0.4s, v1.4s
313; CHECK-NEXT:    ret
314  %z = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
315  ret <4 x i32> %z
316}
317
318define <8 x i32> @v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
319; CHECK-LABEL: v8i32:
320; CHECK:       // %bb.0:
321; CHECK-NEXT:    uqadd v0.4s, v0.4s, v2.4s
322; CHECK-NEXT:    uqadd v1.4s, v1.4s, v3.4s
323; CHECK-NEXT:    ret
324  %z = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %x, <8 x i32> %y)
325  ret <8 x i32> %z
326}
327
328define <16 x i32> @v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
329; CHECK-LABEL: v16i32:
330; CHECK:       // %bb.0:
331; CHECK-NEXT:    uqadd v0.4s, v0.4s, v4.4s
332; CHECK-NEXT:    uqadd v1.4s, v1.4s, v5.4s
333; CHECK-NEXT:    uqadd v2.4s, v2.4s, v6.4s
334; CHECK-NEXT:    uqadd v3.4s, v3.4s, v7.4s
335; CHECK-NEXT:    ret
336  %z = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %x, <16 x i32> %y)
337  ret <16 x i32> %z
338}
339
340define <2 x i64> @v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
341; CHECK-LABEL: v2i64:
342; CHECK:       // %bb.0:
343; CHECK-NEXT:    uqadd v0.2d, v0.2d, v1.2d
344; CHECK-NEXT:    ret
345  %z = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %x, <2 x i64> %y)
346  ret <2 x i64> %z
347}
348
349define <4 x i64> @v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
350; CHECK-LABEL: v4i64:
351; CHECK:       // %bb.0:
352; CHECK-NEXT:    uqadd v0.2d, v0.2d, v2.2d
353; CHECK-NEXT:    uqadd v1.2d, v1.2d, v3.2d
354; CHECK-NEXT:    ret
355  %z = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %x, <4 x i64> %y)
356  ret <4 x i64> %z
357}
358
359define <8 x i64> @v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
360; CHECK-LABEL: v8i64:
361; CHECK:       // %bb.0:
362; CHECK-NEXT:    uqadd v0.2d, v0.2d, v4.2d
363; CHECK-NEXT:    uqadd v1.2d, v1.2d, v5.2d
364; CHECK-NEXT:    uqadd v2.2d, v2.2d, v6.2d
365; CHECK-NEXT:    uqadd v3.2d, v3.2d, v7.2d
366; CHECK-NEXT:    ret
367  %z = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %x, <8 x i64> %y)
368  ret <8 x i64> %z
369}
370
371define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
372; CHECK-LABEL: v2i128:
373; CHECK:       // %bb.0:
374; CHECK-NEXT:    adds x8, x2, x6
375; CHECK-NEXT:    adcs x9, x3, x7
376; CHECK-NEXT:    cmp x8, x2
377; CHECK-NEXT:    cset w10, lo
378; CHECK-NEXT:    cmp x9, x3
379; CHECK-NEXT:    cset w11, lo
380; CHECK-NEXT:    csel w10, w10, w11, eq
381; CHECK-NEXT:    cmp w10, #0 // =0
382; CHECK-NEXT:    csinv x3, x9, xzr, eq
383; CHECK-NEXT:    csinv x2, x8, xzr, eq
384; CHECK-NEXT:    adds x8, x0, x4
385; CHECK-NEXT:    adcs x9, x1, x5
386; CHECK-NEXT:    cmp x8, x0
387; CHECK-NEXT:    cset w10, lo
388; CHECK-NEXT:    cmp x9, x1
389; CHECK-NEXT:    cset w11, lo
390; CHECK-NEXT:    csel w10, w10, w11, eq
391; CHECK-NEXT:    cmp w10, #0 // =0
392; CHECK-NEXT:    csinv x8, x8, xzr, eq
393; CHECK-NEXT:    csinv x1, x9, xzr, eq
394; CHECK-NEXT:    fmov d0, x8
395; CHECK-NEXT:    mov v0.d[1], x1
396; CHECK-NEXT:    fmov x0, d0
397; CHECK-NEXT:    ret
398  %z = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %x, <2 x i128> %y)
399  ret <2 x i128> %z
400}
401