Lines Matching full:x
10 define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a) {
15 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
16 %elt = insertelement <vscale x 16 x i8> undef, i8 -128, i32 0
17 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
18 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg,
19 <vscale x 16 x i8> %a,
20 <vscale x 16 x i8> %splat)
21 ret <vscale x 16 x i8> %out
24 define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a) {
29 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
30 %elt = insertelement <vscale x 8 x i16> undef, i16 127, i32 0
31 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
32 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %pg,
33 <vscale x 8 x i16> %a,
34 <vscale x 8 x i16> %splat)
35 ret <vscale x 8 x i16> %out
38 define <vscale x 8 x i16> @smax_i16_out_of_range(<vscale x 8 x i16> %a) {
46 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
47 %elt = insertelement <vscale x 8 x i16> undef, i16 129, i32 0
48 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
49 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %pg,
50 <vscale x 8 x i16> %a,
51 <vscale x 8 x i16> %splat)
52 ret <vscale x 8 x i16> %out
55 define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a) {
60 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
61 %elt = insertelement <vscale x 4 x i32> undef, i32 -128, i32 0
62 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
63 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %pg,
64 <vscale x 4 x i32> %a,
65 <vscale x 4 x i32> %splat)
66 ret <vscale x 4 x i32> %out
69 define <vscale x 4 x i32> @smax_i32_out_of_range(<vscale x 4 x i32> %a) {
77 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
78 %elt = insertelement <vscale x 4 x i32> undef, i32 -129, i32 0
79 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
80 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %pg,
81 <vscale x 4 x i32> %a,
82 <vscale x 4 x i32> %splat)
83 ret <vscale x 4 x i32> %out
86 define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a) {
91 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
92 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i64 0
93 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
94 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %pg,
95 <vscale x 2 x i64> %a,
96 <vscale x 2 x i64> %splat)
97 ret <vscale x 2 x i64> %out
100 define <vscale x 2 x i64> @smax_i64_out_of_range(<vscale x 2 x i64> %a) {
108 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
109 %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i64 0
110 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
111 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %pg,
112 <vscale x 2 x i64> %a,
113 <vscale x 2 x i64> %splat)
114 ret <vscale x 2 x i64> %out
120 define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a) {
125 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
126 %elt = insertelement <vscale x 16 x i8> undef, i8 127, i32 0
127 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
128 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg,
129 <vscale x 16 x i8> %a,
130 <vscale x 16 x i8> %splat)
131 ret <vscale x 16 x i8> %out
134 define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a) {
139 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
140 %elt = insertelement <vscale x 8 x i16> undef, i16 -128, i32 0
141 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
142 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %pg,
143 <vscale x 8 x i16> %a,
144 <vscale x 8 x i16> %splat)
145 ret <vscale x 8 x i16> %out
148 define <vscale x 8 x i16> @smin_i16_out_of_range(<vscale x 8 x i16> %a) {
156 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
157 %elt = insertelement <vscale x 8 x i16> undef, i16 -129, i32 0
158 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
159 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %pg,
160 <vscale x 8 x i16> %a,
161 <vscale x 8 x i16> %splat)
162 ret <vscale x 8 x i16> %out
165 define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a) {
170 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
171 %elt = insertelement <vscale x 4 x i32> undef, i32 127, i32 0
172 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
173 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %pg,
174 <vscale x 4 x i32> %a,
175 <vscale x 4 x i32> %splat)
176 ret <vscale x 4 x i32> %out
179 define <vscale x 4 x i32> @smin_i32_out_of_range(<vscale x 4 x i32> %a) {
187 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
188 %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
189 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
190 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %pg,
191 <vscale x 4 x i32> %a,
192 <vscale x 4 x i32> %splat)
193 ret <vscale x 4 x i32> %out
197 define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a) {
202 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
203 %elt = insertelement <vscale x 2 x i64> undef, i64 -128, i64 0
204 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
205 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %pg,
206 <vscale x 2 x i64> %a,
207 <vscale x 2 x i64> %splat)
208 ret <vscale x 2 x i64> %out
211 define <vscale x 2 x i64> @smin_i64_out_of_range(<vscale x 2 x i64> %a) {
218 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
219 %elt = insertelement <vscale x 2 x i64> undef, i64 -256, i64 0
220 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
221 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %pg,
222 <vscale x 2 x i64> %a,
223 <vscale x 2 x i64> %splat)
224 ret <vscale x 2 x i64> %out
229 define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a) {
234 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
235 %elt = insertelement <vscale x 16 x i8> undef, i8 0, i32 0
236 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
237 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg,
238 <vscale x 16 x i8> %a,
239 <vscale x 16 x i8> %splat)
240 ret <vscale x 16 x i8> %out
243 define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a) {
248 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
249 %elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
250 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
251 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %pg,
252 <vscale x 8 x i16> %a,
253 <vscale x 8 x i16> %splat)
254 ret <vscale x 8 x i16> %out
257 define <vscale x 8 x i16> @umax_i16_out_of_range(<vscale x 8 x i16> %a) {
265 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
266 %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
267 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
268 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %pg,
269 <vscale x 8 x i16> %a,
270 <vscale x 8 x i16> %splat)
271 ret <vscale x 8 x i16> %out
274 define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a) {
279 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
280 %elt = insertelement <vscale x 4 x i32> undef, i32 0, i32 0
281 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
282 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %pg,
283 <vscale x 4 x i32> %a,
284 <vscale x 4 x i32> %splat)
285 ret <vscale x 4 x i32> %out
288 define <vscale x 4 x i32> @umax_i32_out_of_range(<vscale x 4 x i32> %a) {
296 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
297 %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
298 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
299 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %pg,
300 <vscale x 4 x i32> %a,
301 <vscale x 4 x i32> %splat)
302 ret <vscale x 4 x i32> %out
305 define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a) {
310 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
311 %elt = insertelement <vscale x 2 x i64> undef, i64 255, i64 0
312 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
313 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %pg,
314 <vscale x 2 x i64> %a,
315 <vscale x 2 x i64> %splat)
316 ret <vscale x 2 x i64> %out
319 define <vscale x 2 x i64> @umax_i64_out_of_range(<vscale x 2 x i64> %a) {
327 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
328 %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i64 0
329 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
330 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %pg,
331 <vscale x 2 x i64> %a,
332 <vscale x 2 x i64> %splat)
333 ret <vscale x 2 x i64> %out
338 define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a) {
343 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
344 %elt = insertelement <vscale x 16 x i8> undef, i8 255, i32 0
345 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
346 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg,
347 <vscale x 16 x i8> %a,
348 <vscale x 16 x i8> %splat)
349 ret <vscale x 16 x i8> %out
352 define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a) {
357 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
358 %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
359 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
360 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %pg,
361 <vscale x 8 x i16> %a,
362 <vscale x 8 x i16> %splat)
363 ret <vscale x 8 x i16> %out
366 define <vscale x 8 x i16> @umin_i16_out_of_range(<vscale x 8 x i16> %a) {
374 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
375 %elt = insertelement <vscale x 8 x i16> undef, i16 257, i32 0
376 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
377 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %pg,
378 <vscale x 8 x i16> %a,
379 <vscale x 8 x i16> %splat)
380 ret <vscale x 8 x i16> %out
383 define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a) {
388 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
389 %elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
390 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
391 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %pg,
392 <vscale x 4 x i32> %a,
393 <vscale x 4 x i32> %splat)
394 ret <vscale x 4 x i32> %out
397 define <vscale x 4 x i32> @umin_i32_out_of_range(<vscale x 4 x i32> %a) {
405 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
406 %elt = insertelement <vscale x 4 x i32> undef, i32 257, i32 0
407 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
408 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %pg,
409 <vscale x 4 x i32> %a,
410 <vscale x 4 x i32> %splat)
411 ret <vscale x 4 x i32> %out
414 define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a) {
419 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
420 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i64 0
421 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
422 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg,
423 <vscale x 2 x i64> %a,
424 <vscale x 2 x i64> %splat)
425 ret <vscale x 2 x i64> %out
428 define <vscale x 2 x i64> @umin_i64_out_of_range(<vscale x 2 x i64> %a) {
436 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
437 %elt = insertelement <vscale x 2 x i64> undef, i64 65535, i64 0
438 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
439 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg,
440 <vscale x 2 x i64> %a,
441 <vscale x 2 x i64> %splat)
442 ret <vscale x 2 x i64> %out
447 define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
452 %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
453 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
454 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
455 <vscale x 16 x i8> %splat)
456 ret <vscale x 16 x i8> %out
459 define <vscale x 8 x i16> @sqadd_h_lowimm(<vscale x 8 x i16> %a) {
464 %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
465 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
466 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
467 <vscale x 8 x i16> %splat)
468 ret <vscale x 8 x i16> %out
471 define <vscale x 8 x i16> @sqadd_h_highimm(<vscale x 8 x i16> %a) {
476 %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
477 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
478 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
479 <vscale x 8 x i16> %splat)
480 ret <vscale x 8 x i16> %out
483 define <vscale x 4 x i32> @sqadd_s_lowimm(<vscale x 4 x i32> %a) {
488 %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
489 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
490 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
491 <vscale x 4 x i32> %splat)
492 ret <vscale x 4 x i32> %out
495 define <vscale x 4 x i32> @sqadd_s_highimm(<vscale x 4 x i32> %a) {
500 %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
501 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
502 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
503 <vscale x 4 x i32> %splat)
504 ret <vscale x 4 x i32> %out
507 define <vscale x 2 x i64> @sqadd_d_lowimm(<vscale x 2 x i64> %a) {
512 %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
513 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
514 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
515 <vscale x 2 x i64> %splat)
516 ret <vscale x 2 x i64> %out
519 define <vscale x 2 x i64> @sqadd_d_highimm(<vscale x 2 x i64> %a) {
524 %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
525 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
526 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
527 <vscale x 2 x i64> %splat)
528 ret <vscale x 2 x i64> %out
533 define <vscale x 16 x i8> @sqsub_b_lowimm(<vscale x 16 x i8> %a) {
538 %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
539 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
540 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
541 <vscale x 16 x i8> %splat)
542 ret <vscale x 16 x i8> %out
545 define <vscale x 8 x i16> @sqsub_h_lowimm(<vscale x 8 x i16> %a) {
550 %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
551 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
552 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
553 <vscale x 8 x i16> %splat)
554 ret <vscale x 8 x i16> %out
557 define <vscale x 8 x i16> @sqsub_h_highimm(<vscale x 8 x i16> %a) {
562 %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
563 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
564 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
565 <vscale x 8 x i16> %splat)
566 ret <vscale x 8 x i16> %out
569 define <vscale x 4 x i32> @sqsub_s_lowimm(<vscale x 4 x i32> %a) {
574 %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
575 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
576 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
577 <vscale x 4 x i32> %splat)
578 ret <vscale x 4 x i32> %out
581 define <vscale x 4 x i32> @sqsub_s_highimm(<vscale x 4 x i32> %a) {
586 %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
587 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
588 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
589 <vscale x 4 x i32> %splat)
590 ret <vscale x 4 x i32> %out
593 define <vscale x 2 x i64> @sqsub_d_lowimm(<vscale x 2 x i64> %a) {
598 %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
599 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
600 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
601 <vscale x 2 x i64> %splat)
602 ret <vscale x 2 x i64> %out
605 define <vscale x 2 x i64> @sqsub_d_highimm(<vscale x 2 x i64> %a) {
610 %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
611 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
612 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
613 <vscale x 2 x i64> %splat)
614 ret <vscale x 2 x i64> %out
619 define <vscale x 16 x i8> @uqadd_b_lowimm(<vscale x 16 x i8> %a) {
624 %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
625 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
626 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a,
627 <vscale x 16 x i8> %splat)
628 ret <vscale x 16 x i8> %out
631 define <vscale x 8 x i16> @uqadd_h_lowimm(<vscale x 8 x i16> %a) {
636 %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
637 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
638 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
639 <vscale x 8 x i16> %splat)
640 ret <vscale x 8 x i16> %out
643 define <vscale x 8 x i16> @uqadd_h_highimm(<vscale x 8 x i16> %a) {
648 %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
649 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
650 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
651 <vscale x 8 x i16> %splat)
652 ret <vscale x 8 x i16> %out
655 define <vscale x 4 x i32> @uqadd_s_lowimm(<vscale x 4 x i32> %a) {
660 %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
661 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
662 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
663 <vscale x 4 x i32> %splat)
664 ret <vscale x 4 x i32> %out
669 define <vscale x 16 x i8> @uqsub_b_lowimm(<vscale x 16 x i8> %a) {
674 %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
675 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
676 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a,
677 <vscale x 16 x i8> %splat)
678 ret <vscale x 16 x i8> %out
681 define <vscale x 8 x i16> @uqsub_h_lowimm(<vscale x 8 x i16> %a) {
686 %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
687 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
688 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
689 <vscale x 8 x i16> %splat)
690 ret <vscale x 8 x i16> %out
693 define <vscale x 8 x i16> @uqsub_h_highimm(<vscale x 8 x i16> %a) {
698 %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
699 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
700 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
701 <vscale x 8 x i16> %splat)
702 ret <vscale x 8 x i16> %out
705 define <vscale x 4 x i32> @uqsub_s_lowimm(<vscale x 4 x i32> %a) {
710 %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
711 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
712 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
713 <vscale x 4 x i32> %splat)
714 ret <vscale x 4 x i32> %out
717 define <vscale x 4 x i32> @uqsub_s_highimm(<vscale x 4 x i32> %a) {
722 %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
723 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
724 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
725 <vscale x 4 x i32> %splat)
726 ret <vscale x 4 x i32> %out
729 define <vscale x 2 x i64> @uqsub_d_lowimm(<vscale x 2 x i64> %a) {
734 %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
735 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
736 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
737 <vscale x 2 x i64> %splat)
738 ret <vscale x 2 x i64> %out
741 define <vscale x 2 x i64> @uqsub_d_highimm(<vscale x 2 x i64> %a) {
746 %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
747 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
748 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
749 <vscale x 2 x i64> %splat)
750 ret <vscale x 2 x i64> %out
754 define <vscale x 4 x i32> @uqadd_s_highimm(<vscale x 4 x i32> %a) {
759 %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
760 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
761 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
762 <vscale x 4 x i32> %splat)
763 ret <vscale x 4 x i32> %out
766 define <vscale x 2 x i64> @uqadd_d_lowimm(<vscale x 2 x i64> %a) {
771 %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
772 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
773 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
774 <vscale x 2 x i64> %splat)
775 ret <vscale x 2 x i64> %out
778 define <vscale x 2 x i64> @uqadd_d_highimm(<vscale x 2 x i64> %a) {
783 %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
784 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
785 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
786 <vscale x 2 x i64> %splat)
787 ret <vscale x 2 x i64> %out
792 define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
797 %elt = insertelement <vscale x 16 x i8> undef, i8 9, i32 0
798 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
799 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
800 <vscale x 16 x i8> %a,
801 <vscale x 16 x i8> %splat)
802 ret <vscale x 16 x i8> %out
805 define <vscale x 16 x i8> @asr_i8_all_active(<vscale x 16 x i8> %a) {
810 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
811 %elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
812 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
813 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
814 <vscale x 16 x i8> %a,
815 <vscale x 16 x i8> %splat)
816 ret <vscale x 16 x i8> %out
820 define <vscale x 16 x i8> @asr_i8_too_small(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
826 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
827 <vscale x 16 x i8> %a,
828 <vscale x 16 x i8> zeroinitializer)
829 ret <vscale x 16 x i8> %out
832 define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
837 %elt = insertelement <vscale x 8 x i16> undef, i16 17, i32 0
838 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
839 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
840 <vscale x 8 x i16> %a,
841 <vscale x 8 x i16> %splat)
842 ret <vscale x 8 x i16> %out
845 define <vscale x 8 x i16> @asr_i16_all_active(<vscale x 8 x i16> %a) {
850 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
851 %elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
852 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
853 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
854 <vscale x 8 x i16> %a,
855 <vscale x 8 x i16> %splat)
856 ret <vscale x 8 x i16> %out
860 define <vscale x 8 x i16> @asr_i16_too_small(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
866 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
867 <vscale x 8 x i16> %a,
868 <vscale x 8 x i16> zeroinitializer)
869 ret <vscale x 8 x i16> %out
872 define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
877 %elt = insertelement <vscale x 4 x i32> undef, i32 33, i32 0
878 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
879 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
880 <vscale x 4 x i32> %a,
881 <vscale x 4 x i32> %splat)
882 ret <vscale x 4 x i32> %out
885 define <vscale x 4 x i32> @asr_i32_all_active(<vscale x 4 x i32> %a) {
890 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
891 %elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
892 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
893 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
894 <vscale x 4 x i32> %a,
895 <vscale x 4 x i32> %splat)
896 ret <vscale x 4 x i32> %out
900 define <vscale x 4 x i32> @asr_i32_too_small(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
906 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
907 <vscale x 4 x i32> %a,
908 <vscale x 4 x i32> zeroinitializer)
909 ret <vscale x 4 x i32> %out
912 define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
917 %elt = insertelement <vscale x 2 x i64> undef, i64 65, i64 0
918 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
919 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
920 <vscale x 2 x i64> %a,
921 <vscale x 2 x i64> %splat)
922 ret <vscale x 2 x i64> %out
925 define <vscale x 2 x i64> @asr_i64_all_active(<vscale x 2 x i64> %a) {
930 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
931 %elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
932 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
933 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
934 <vscale x 2 x i64> %a,
935 <vscale x 2 x i64> %splat)
936 ret <vscale x 2 x i64> %out
940 define <vscale x 2 x i64> @asr_i64_too_small(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
946 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
947 <vscale x 2 x i64> %a,
948 <vscale x 2 x i64> zeroinitializer)
949 ret <vscale x 2 x i64> %out
954 define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
959 %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
960 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
961 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
962 <vscale x 16 x i8> %a,
963 <vscale x 16 x i8> %splat)
964 ret <vscale x 16 x i8> %out
967 define <vscale x 16 x i8> @lsl_i8_all_active(<vscale x 16 x i8> %a) {
972 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
973 %elt = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
974 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
975 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
976 <vscale x 16 x i8> %a,
977 <vscale x 16 x i8> %splat)
978 ret <vscale x 16 x i8> %out
982 define <vscale x 16 x i8> @lsl_i8_too_big(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
988 %elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
989 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
990 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
991 <vscale x 16 x i8> %a,
992 <vscale x 16 x i8> %splat)
993 ret <vscale x 16 x i8> %out
996 define <vscale x 16 x i8> @lsl_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1001 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
1002 <vscale x 16 x i8> %a,
1003 <vscale x 16 x i8> zeroinitializer)
1004 ret <vscale x 16 x i8> %out
1007 define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1012 %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
1013 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
1014 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
1015 <vscale x 8 x i16> %a,
1016 <vscale x 8 x i16> %splat)
1017 ret <vscale x 8 x i16> %out
1020 define <vscale x 8 x i16> @lsl_i16_all_active(<vscale x 8 x i16> %a) {
1025 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
1026 %elt = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
1027 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
1028 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
1029 <vscale x 8 x i16> %a,
1030 <vscale x 8 x i16> %splat)
1031 ret <vscale x 8 x i16> %out
1035 define <vscale x 8 x i16> @lsl_i16_too_big(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1041 %elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
1042 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
1043 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
1044 <vscale x 8 x i16> %a,
1045 <vscale x 8 x i16> %splat)
1046 ret <vscale x 8 x i16> %out
1049 define <vscale x 8 x i16> @lsl_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1054 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
1055 <vscale x 8 x i16> %a,
1056 <vscale x 8 x i16> zeroinitializer)
1057 ret <vscale x 8 x i16> %out
1060 define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1065 %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
1066 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
1067 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
1068 <vscale x 4 x i32> %a,
1069 <vscale x 4 x i32> %splat)
1070 ret <vscale x 4 x i32> %out
1073 define <vscale x 4 x i32> @lsl_i32_all_active(<vscale x 4 x i32> %a) {
1078 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
1079 %elt = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
1080 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
1081 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
1082 <vscale x 4 x i32> %a,
1083 <vscale x 4 x i32> %splat)
1084 ret <vscale x 4 x i32> %out
1088 define <vscale x 4 x i32> @lsl_i32_too_big(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1094 %elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
1095 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
1096 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
1097 <vscale x 4 x i32> %a,
1098 <vscale x 4 x i32> %splat)
1099 ret <vscale x 4 x i32> %out
1102 define <vscale x 4 x i32> @lsl_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1107 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
1108 <vscale x 4 x i32> %a,
1109 <vscale x 4 x i32> zeroinitializer)
1110 ret <vscale x 4 x i32> %out
1113 define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1118 %elt = insertelement <vscale x 2 x i64> undef, i64 63, i64 0
1119 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
1120 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
1121 <vscale x 2 x i64> %a,
1122 <vscale x 2 x i64> %splat)
1123 ret <vscale x 2 x i64> %out
1126 define <vscale x 2 x i64> @lsl_i64_all_active(<vscale x 2 x i64> %a) {
1131 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
1132 %elt = insertelement <vscale x 2 x i64> undef, i64 63, i64 0
1133 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
1134 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
1135 <vscale x 2 x i64> %a,
1136 <vscale x 2 x i64> %splat)
1137 ret <vscale x 2 x i64> %out
1141 define <vscale x 2 x i64> @lsl_i64_too_big(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1147 %elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
1148 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
1149 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
1150 <vscale x 2 x i64> %a,
1151 <vscale x 2 x i64> %splat)
1152 ret <vscale x 2 x i64> %out
1155 define <vscale x 2 x i64> @lsl_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1160 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
1161 <vscale x 2 x i64> %a,
1162 <vscale x 2 x i64> zeroinitializer)
1163 ret <vscale x 2 x i64> %out
1168 define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1173 %elt = insertelement <vscale x 16 x i8> undef, i8 9, i32 0
1174 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
1175 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
1176 <vscale x 16 x i8> %a,
1177 <vscale x 16 x i8> %splat)
1178 ret <vscale x 16 x i8> %out
1181 define <vscale x 16 x i8> @lsr_i8_all_active(<vscale x 16 x i8> %a) {
1186 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
1187 %elt = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
1188 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero…
1189 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
1190 <vscale x 16 x i8> %a,
1191 <vscale x 16 x i8> %splat)
1192 ret <vscale x 16 x i8> %out
1196 define <vscale x 16 x i8> @lsr_i8_too_small(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1202 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
1203 <vscale x 16 x i8> %a,
1204 <vscale x 16 x i8> zeroinitializer)
1205 ret <vscale x 16 x i8> %out
1208 define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1213 %elt = insertelement <vscale x 8 x i16> undef, i16 17, i32 0
1214 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
1215 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
1216 <vscale x 8 x i16> %a,
1217 <vscale x 8 x i16> %splat)
1218 ret <vscale x 8 x i16> %out
1221 define <vscale x 8 x i16> @lsr_i16_all_active(<vscale x 8 x i16> %a) {
1226 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
1227 %elt = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
1228 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi…
1229 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
1230 <vscale x 8 x i16> %a,
1231 <vscale x 8 x i16> %splat)
1232 ret <vscale x 8 x i16> %out
1236 define <vscale x 8 x i16> @lsr_i16_too_small(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1242 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
1243 <vscale x 8 x i16> %a,
1244 <vscale x 8 x i16> zeroinitializer)
1245 ret <vscale x 8 x i16> %out
1248 define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1253 %elt = insertelement <vscale x 4 x i32> undef, i32 33, i32 0
1254 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
1255 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
1256 <vscale x 4 x i32> %a,
1257 <vscale x 4 x i32> %splat)
1258 ret <vscale x 4 x i32> %out
1261 define <vscale x 4 x i32> @lsr_i32_all_active(<vscale x 4 x i32> %a) {
1266 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
1267 %elt = insertelement <vscale x 4 x i32> undef, i32 32, i32 0
1268 …%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroi…
1269 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
1270 <vscale x 4 x i32> %a,
1271 <vscale x 4 x i32> %splat)
1272 ret <vscale x 4 x i32> %out
1276 define <vscale x 4 x i32> @lsr_i32_too_small(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1282 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
1283 <vscale x 4 x i32> %a,
1284 <vscale x 4 x i32> zeroinitializer)
1285 ret <vscale x 4 x i32> %out
1288 define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1293 %elt = insertelement <vscale x 2 x i64> undef, i64 65, i64 0
1294 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
1295 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
1296 <vscale x 2 x i64> %a,
1297 <vscale x 2 x i64> %splat)
1298 ret <vscale x 2 x i64> %out
1301 define <vscale x 2 x i64> @lsr_i64_all_active(<vscale x 2 x i64> %a) {
1306 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
1307 %elt = insertelement <vscale x 2 x i64> undef, i64 64, i64 0
1308 …%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroi…
1309 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
1310 <vscale x 2 x i64> %a,
1311 <vscale x 2 x i64> %splat)
1312 ret <vscale x 2 x i64> %out
1316 define <vscale x 2 x i64> @lsr_i64_too_small(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1322 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
1323 <vscale x 2 x i64> %a,
1324 <vscale x 2 x i64> zeroinitializer)
1325 ret <vscale x 2 x i64> %out
1328 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
1329 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
1330 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
1331 declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
1333 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
1334 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
1335 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
1336 declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
1338 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
1339 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
1340 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
1341 declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
1343 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
1344 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
1345 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
1346 declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
1348 declare <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <…
1349 declare <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <v…
1350 declare <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <v…
1351 declare <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <v…
1353 declare <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <…
1354 declare <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <v…
1355 declare <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <v…
1356 declare <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <v…
1358 declare <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <…
1359 declare <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <v…
1360 declare <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <v…
1361 declare <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <v…
1363 declare <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <…
1364 declare <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <v…
1365 declare <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <v…
1366 declare <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <v…
1368 declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <v…
1369 declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vs…
1370 declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vs…
1371 declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vs…
1373 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <v…
1374 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vs…
1375 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vs…
1376 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vs…
1378 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <v…
1379 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vs…
1380 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vs…
1381 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vs…
1383 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)
1384 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 %pattern)
1385 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern)
1386 declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 %pattern)