• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s 2>%t | FileCheck %s
2; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
3
4; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
5; WARN-NOT: warning
6
7;
8; SHRNB
9;
10
11define <vscale x 16 x i8> @shrnb_h(<vscale x 8 x i16> %a) {
12; CHECK-LABEL: shrnb_h:
13; CHECK: shrnb z0.b, z0.h, #8
14; CHECK-NEXT: ret
15  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %a,
16                                                                 i32 8)
17  ret <vscale x 16 x i8> %out
18}
19
20define <vscale x 8 x i16> @shrnb_s(<vscale x 4 x i32> %a) {
21; CHECK-LABEL: shrnb_s:
22; CHECK: shrnb z0.h, z0.s, #16
23; CHECK-NEXT: ret
24  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %a,
25                                                                 i32 16)
26  ret <vscale x 8 x i16> %out
27}
28
29define <vscale x 4 x i32> @shrnb_d(<vscale x 2 x i64> %a) {
30; CHECK-LABEL: shrnb_d:
31; CHECK: shrnb z0.s, z0.d, #32
32; CHECK-NEXT: ret
33  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %a,
34                                                                 i32 32)
35  ret <vscale x 4 x i32> %out
36}
37
38;
39; UQSHRNB
40;
41
42define <vscale x 16 x i8> @uqshrnb_h(<vscale x 8 x i16> %a) {
43; CHECK-LABEL: uqshrnb_h:
44; CHECK: uqshrnb z0.b, z0.h, #1
45; CHECK-NEXT: ret
46  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnb.nxv8i16(<vscale x 8 x i16> %a,
47                                                                   i32 1)
48  ret <vscale x 16 x i8> %out
49}
50
51define <vscale x 8 x i16> @uqshrnb_s(<vscale x 4 x i32> %a) {
52; CHECK-LABEL: uqshrnb_s:
53; CHECK: uqshrnb z0.h, z0.s, #1
54; CHECK-NEXT: ret
55  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnb.nxv4i32(<vscale x 4 x i32> %a,
56                                                                   i32 1)
57  ret <vscale x 8 x i16> %out
58}
59
60define <vscale x 4 x i32> @uqshrnb_d(<vscale x 2 x i64> %a) {
61; CHECK-LABEL: uqshrnb_d:
62; CHECK: uqshrnb z0.s, z0.d, #1
63; CHECK-NEXT: ret
64  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnb.nxv2i64(<vscale x 2 x i64> %a,
65                                                                   i32 1)
66  ret <vscale x 4 x i32> %out
67}
68
69;
70; SQSHRNB
71;
72
73define <vscale x 16 x i8> @sqshrnb_h(<vscale x 8 x i16> %a) {
74; CHECK-LABEL: sqshrnb_h:
75; CHECK: sqshrnb z0.b, z0.h, #1
76; CHECK-NEXT: ret
77  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnb.nxv8i16(<vscale x 8 x i16> %a,
78                                                                   i32 1)
79  ret <vscale x 16 x i8> %out
80}
81
82define <vscale x 8 x i16> @sqshrnb_s(<vscale x 4 x i32> %a) {
83; CHECK-LABEL: sqshrnb_s:
84; CHECK: sqshrnb z0.h, z0.s, #1
85; CHECK-NEXT: ret
86  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnb.nxv4i32(<vscale x 4 x i32> %a,
87                                                                   i32 1)
88  ret <vscale x 8 x i16> %out
89}
90
91define <vscale x 4 x i32> @sqshrnb_d(<vscale x 2 x i64> %a) {
92; CHECK-LABEL: sqshrnb_d:
93; CHECK: sqshrnb z0.s, z0.d, #1
94; CHECK-NEXT: ret
95  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnb.nxv2i64(<vscale x 2 x i64> %a,
96                                                                   i32 1)
97  ret <vscale x 4 x i32> %out
98}
99
100;
101; SQSHRUNB
102;
103
104define <vscale x 16 x i8> @sqshrunb_h(<vscale x 8 x i16> %a) {
105; CHECK-LABEL: qshrunb_h:
106; CHECK: sqshrunb z0.b, z0.h, #7
107; CHECK-NEXT: ret
108  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunb.nxv8i16(<vscale x 8 x i16> %a,
109                                                                    i32 7)
110  ret <vscale x 16 x i8> %out
111}
112
113define <vscale x 8 x i16> @sqshrunb_s(<vscale x 4 x i32> %a) {
114; CHECK-LABEL: sqshrunb_s:
115; CHECK: sqshrunb z0.h, z0.s, #15
116; CHECK-NEXT: ret
117  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunb.nxv4i32(<vscale x 4 x i32> %a,
118                                                                    i32 15)
119  ret <vscale x 8 x i16> %out
120}
121
122define <vscale x 4 x i32> @sqshrunb_d(<vscale x 2 x i64> %a) {
123; CHECK-LABEL: sqshrunb_d:
124; CHECK: sqshrunb z0.s, z0.d, #31
125; CHECK-NEXT: ret
126  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunb.nxv2i64(<vscale x 2 x i64> %a,
127                                                                    i32 31)
128  ret <vscale x 4 x i32> %out
129}
130
131;
132; UQRSHRNB
133;
134
135define <vscale x 16 x i8> @uqrshrnb_h(<vscale x 8 x i16> %a) {
136; CHECK-LABEL: uqrshrnb_h:
137; CHECK: uqrshrnb z0.b, z0.h, #2
138; CHECK-NEXT: ret
139  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnb.nxv8i16(<vscale x 8 x i16> %a,
140                                                                    i32 2)
141  ret <vscale x 16 x i8> %out
142}
143
144define <vscale x 8 x i16> @uqrshrnb_s(<vscale x 4 x i32> %a) {
145; CHECK-LABEL: uqrshrnb_s:
146; CHECK: uqrshrnb z0.h, z0.s, #2
147; CHECK-NEXT: ret
148  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnb.nxv4i32(<vscale x 4 x i32> %a,
149                                                                    i32 2)
150  ret <vscale x 8 x i16> %out
151}
152
153define <vscale x 4 x i32> @uqrshrnb_d(<vscale x 2 x i64> %a) {
154; CHECK-LABEL: uqrshrnb_d:
155; CHECK: uqrshrnb z0.s, z0.d, #2
156; CHECK-NEXT: ret
157  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnb.nxv2i64(<vscale x 2 x i64> %a,
158                                                                    i32 2)
159  ret <vscale x 4 x i32> %out
160}
161
162;
163; SQRSHRNB
164;
165
166define <vscale x 16 x i8> @sqrshrnb_h(<vscale x 8 x i16> %a) {
167; CHECK-LABEL: sqrshrnb_h:
168; CHECK: sqrshrnb z0.b, z0.h, #2
169; CHECK-NEXT: ret
170  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnb.nxv8i16(<vscale x 8 x i16> %a,
171                                                                    i32 2)
172  ret <vscale x 16 x i8> %out
173}
174
175define <vscale x 8 x i16> @sqrshrnb_s(<vscale x 4 x i32> %a) {
176; CHECK-LABEL: sqrshrnb_s:
177; CHECK: sqrshrnb z0.h, z0.s, #2
178; CHECK-NEXT: ret
179  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnb.nxv4i32(<vscale x 4 x i32> %a,
180                                                                    i32 2)
181  ret <vscale x 8 x i16> %out
182}
183
184define <vscale x 4 x i32> @sqrshrnb_d(<vscale x 2 x i64> %a) {
185; CHECK-LABEL: sqrshrnb_d:
186; CHECK: sqrshrnb z0.s, z0.d, #2
187; CHECK-NEXT: ret
188  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnb.nxv2i64(<vscale x 2 x i64> %a,
189                                                                    i32 2)
190  ret <vscale x 4 x i32> %out
191}
192
193;
194; SQRSHRUNB
195;
196
197define <vscale x 16 x i8> @sqrshrunb_h(<vscale x 8 x i16> %a) {
198; CHECK-LABEL: sqrshrunb_h:
199; CHECK: sqrshrunb z0.b, z0.h, #6
200; CHECK-NEXT: ret
201  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunb.nxv8i16(<vscale x 8 x i16> %a,
202                                                                     i32 6)
203  ret <vscale x 16 x i8> %out
204}
205
206define <vscale x 8 x i16> @sqrshrunb_s(<vscale x 4 x i32> %a) {
207; CHECK-LABEL: sqrshrunb_s:
208; CHECK: sqrshrunb z0.h, z0.s, #14
209; CHECK-NEXT: ret
210  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunb.nxv4i32(<vscale x 4 x i32> %a,
211                                                                     i32 14)
212  ret <vscale x 8 x i16> %out
213}
214
215define <vscale x 4 x i32> @sqrshrunb_d(<vscale x 2 x i64> %a) {
216; CHECK-LABEL: sqrshrunb_d:
217; CHECK: sqrshrunb z0.s, z0.d, #30
218; CHECK-NEXT: ret
219  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunb.nxv2i64(<vscale x 2 x i64> %a,
220                                                                     i32 30)
221  ret <vscale x 4 x i32> %out
222}
223
224;
225; SHRNT
226;
227
228define <vscale x 16 x i8> @shrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
229; CHECK-LABEL: shrnt_h:
230; CHECK: shrnt z0.b, z1.h, #3
231; CHECK-NEXT: ret
232  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnt.nxv8i16(<vscale x 16 x i8> %a,
233                                                                 <vscale x 8 x i16> %b,
234                                                                 i32 3)
235  ret <vscale x 16 x i8> %out
236}
237
238define <vscale x 8 x i16> @shrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
239; CHECK-LABEL: shrnt_s:
240; CHECK: shrnt z0.h, z1.s, #3
241; CHECK-NEXT: ret
242  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnt.nxv4i32(<vscale x 8 x i16> %a,
243                                                                 <vscale x 4 x i32> %b,
244                                                                 i32 3)
245  ret <vscale x 8 x i16> %out
246}
247
248define <vscale x 4 x i32> @shrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
249; CHECK-LABEL: shrnt_d:
250; CHECK: shrnt z0.s, z1.d, #3
251; CHECK-NEXT: ret
252  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnt.nxv2i64(<vscale x 4 x i32> %a,
253                                                                 <vscale x 2 x i64> %b,
254                                                                 i32 3)
255  ret <vscale x 4 x i32> %out
256}
257
258;
259; UQSHRNT
260;
261
262define <vscale x 16 x i8> @uqshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
263; CHECK-LABEL: uqshrnt_h:
264; CHECK: uqshrnt z0.b, z1.h, #5
265; CHECK-NEXT: ret
266  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnt.nxv8i16(<vscale x 16 x i8> %a,
267                                                                   <vscale x 8 x i16> %b,
268                                                                   i32 5)
269  ret <vscale x 16 x i8> %out
270}
271
272define <vscale x 8 x i16> @uqshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
273; CHECK-LABEL: uqshrnt_s:
274; CHECK: uqshrnt z0.h, z1.s, #13
275; CHECK-NEXT: ret
276  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnt.nxv4i32(<vscale x 8 x i16> %a,
277                                                                   <vscale x 4 x i32> %b,
278                                                                   i32 13)
279  ret <vscale x 8 x i16> %out
280}
281
282define <vscale x 4 x i32> @uqshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
283; CHECK-LABEL: uqshrnt_d:
284; CHECK: uqshrnt z0.s, z1.d, #29
285; CHECK-NEXT: ret
286  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnt.nxv2i64(<vscale x 4 x i32> %a,
287                                                                   <vscale x 2 x i64> %b,
288                                                                   i32 29)
289  ret <vscale x 4 x i32> %out
290}
291
292;
293; SQSHRNT
294;
295
296define <vscale x 16 x i8> @sqshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
297; CHECK-LABEL: sqshrnt_h:
298; CHECK: sqshrnt z0.b, z1.h, #5
299; CHECK-NEXT: ret
300  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnt.nxv8i16(<vscale x 16 x i8> %a,
301                                                                   <vscale x 8 x i16> %b,
302                                                                   i32 5)
303  ret <vscale x 16 x i8> %out
304}
305
306define <vscale x 8 x i16> @sqshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
307; CHECK-LABEL: sqshrnt_s:
308; CHECK: sqshrnt z0.h, z1.s, #13
309; CHECK-NEXT: ret
310  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnt.nxv4i32(<vscale x 8 x i16> %a,
311                                                                   <vscale x 4 x i32> %b,
312                                                                   i32 13)
313  ret <vscale x 8 x i16> %out
314}
315
316define <vscale x 4 x i32> @sqshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
317; CHECK-LABEL: sqshrnt_d:
318; CHECK: sqshrnt z0.s, z1.d, #29
319; CHECK-NEXT: ret
320  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnt.nxv2i64(<vscale x 4 x i32> %a,
321                                                                   <vscale x 2 x i64> %b,
322                                                                   i32 29)
323  ret <vscale x 4 x i32> %out
324}
325
326;
327; SQSHRUNT
328;
329
330define <vscale x 16 x i8> @sqshrunt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
331; CHECK-LABEL: sqshrunt_h:
332; CHECK: sqshrunt z0.b, z1.h, #4
333; CHECK-NEXT: ret
334  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunt.nxv8i16(<vscale x 16 x i8> %a,
335                                                                    <vscale x 8 x i16> %b,
336                                                                    i32 4)
337  ret <vscale x 16 x i8> %out
338}
339
340define <vscale x 8 x i16> @sqshrunt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
341; CHECK-LABEL: sqshrunt_s:
342; CHECK: sqshrunt z0.h, z1.s, #4
343; CHECK-NEXT: ret
344  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunt.nxv4i32(<vscale x 8 x i16> %a,
345                                                                    <vscale x 4 x i32> %b,
346                                                                    i32 4)
347  ret <vscale x 8 x i16> %out
348}
349
350define <vscale x 4 x i32> @sqshrunt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
351; CHECK-LABEL: sqshrunt_d:
352; CHECK: sqshrunt z0.s, z1.d, #4
353; CHECK-NEXT: ret
354  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunt.nxv2i64(<vscale x 4 x i32> %a,
355                                                                    <vscale x 2 x i64> %b,
356                                                                    i32 4)
357  ret <vscale x 4 x i32> %out
358}
359
360;
361; UQRSHRNT
362;
363
364define <vscale x 16 x i8> @uqrshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
365; CHECK-LABEL: uqrshrnt_h:
366; CHECK: uqrshrnt z0.b, z1.h, #8
367; CHECK-NEXT: ret
368  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnt.nxv8i16(<vscale x 16 x i8> %a,
369                                                                    <vscale x 8 x i16> %b,
370                                                                    i32 8)
371  ret <vscale x 16 x i8> %out
372}
373
374define <vscale x 8 x i16> @uqrshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
375; CHECK-LABEL: uqrshrnt_s:
376; CHECK: uqrshrnt z0.h, z1.s, #12
377; CHECK-NEXT: ret
378  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnt.nxv4i32(<vscale x 8 x i16> %a,
379                                                                    <vscale x 4 x i32> %b,
380                                                                    i32 12)
381  ret <vscale x 8 x i16> %out
382}
383
384define <vscale x 4 x i32> @uqrshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
385; CHECK-LABEL: uqrshrnt_d:
386; CHECK: uqrshrnt z0.s, z1.d, #28
387; CHECK-NEXT: ret
388  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnt.nxv2i64(<vscale x 4 x i32> %a,
389                                                                    <vscale x 2 x i64> %b,
390                                                                    i32 28)
391  ret <vscale x 4 x i32> %out
392}
393
394;
395; SQRSHRNT
396;
397
398define <vscale x 16 x i8> @sqrshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
399; CHECK-LABEL: sqrshrnt_h:
400; CHECK: sqrshrnt z0.b, z1.h, #8
401; CHECK-NEXT: ret
402  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnt.nxv8i16(<vscale x 16 x i8> %a,
403                                                                    <vscale x 8 x i16> %b,
404                                                                    i32 8)
405  ret <vscale x 16 x i8> %out
406}
407
408define <vscale x 8 x i16> @sqrshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
409; CHECK-LABEL: sqrshrnt_s:
410; CHECK: sqrshrnt z0.h, z1.s, #12
411; CHECK-NEXT: ret
412  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnt.nxv4i32(<vscale x 8 x i16> %a,
413                                                                    <vscale x 4 x i32> %b,
414                                                                    i32 12)
415  ret <vscale x 8 x i16> %out
416}
417
418define <vscale x 4 x i32> @sqrshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
419; CHECK-LABEL: sqrshrnt_d:
420; CHECK: sqrshrnt z0.s, z1.d, #28
421; CHECK-NEXT: ret
422  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnt.nxv2i64(<vscale x 4 x i32> %a,
423                                                                    <vscale x 2 x i64> %b,
424                                                                    i32 28)
425  ret <vscale x 4 x i32> %out
426}
427
428;
429; SQRSHRUNT
430;
431
432define <vscale x 16 x i8> @sqrshrunt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
433; CHECK-LABEL: sqrshrunt_h:
434; CHECK: sqrshrunt z0.b, z1.h, #1
435; CHECK-NEXT: ret
436  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunt.nxv8i16(<vscale x 16 x i8> %a,
437                                                                     <vscale x 8 x i16> %b,
438                                                                     i32 1)
439  ret <vscale x 16 x i8> %out
440}
441
442define <vscale x 8 x i16> @sqrshrunt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
443; CHECK-LABEL: sqrshrunt_s:
444; CHECK: sqrshrunt z0.h, z1.s, #5
445; CHECK-NEXT: ret
446  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunt.nxv4i32(<vscale x 8 x i16> %a,
447                                                                     <vscale x 4 x i32> %b,
448                                                                     i32 5)
449  ret <vscale x 8 x i16> %out
450}
451
452define <vscale x 4 x i32> @sqrshrunt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
453; CHECK-LABEL: sqrshrunt_d:
454; CHECK: sqrshrunt z0.s, z1.d, #5
455; CHECK-NEXT: ret
456  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunt.nxv2i64(<vscale x 4 x i32> %a,
457                                                                     <vscale x 2 x i64> %b,
458                                                                     i32 5)
459  ret <vscale x 4 x i32> %out
460}
461
462declare <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16>, i32)
463declare <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32>, i32)
464declare <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64>, i32)
465
466declare <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnb.nxv8i16(<vscale x 8 x i16>, i32)
467declare <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnb.nxv4i32(<vscale x 4 x i32>, i32)
468declare <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnb.nxv2i64(<vscale x 2 x i64>, i32)
469
470declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnb.nxv8i16(<vscale x 8 x i16>, i32)
471declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnb.nxv4i32(<vscale x 4 x i32>, i32)
472declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnb.nxv2i64(<vscale x 2 x i64>, i32)
473
474declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnb.nxv8i16(<vscale x 8 x i16>, i32)
475declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnb.nxv4i32(<vscale x 4 x i32>, i32)
476declare <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnb.nxv2i64(<vscale x 2 x i64>, i32)
477
478declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnb.nxv8i16(<vscale x 8 x i16>, i32)
479declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnb.nxv4i32(<vscale x 4 x i32>, i32)
480declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnb.nxv2i64(<vscale x 2 x i64>, i32)
481
482declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunb.nxv8i16(<vscale x 8 x i16>, i32)
483declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunb.nxv4i32(<vscale x 4 x i32>, i32)
484declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunb.nxv2i64(<vscale x 2 x i64>, i32)
485
486declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunb.nxv8i16(<vscale x 8 x i16>, i32)
487declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunb.nxv4i32(<vscale x 4 x i32>, i32)
488declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunb.nxv2i64(<vscale x 2 x i64>, i32)
489
490declare <vscale x 16 x i8> @llvm.aarch64.sve.shrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
491declare <vscale x 8 x i16> @llvm.aarch64.sve.shrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
492declare <vscale x 4 x i32> @llvm.aarch64.sve.shrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
493
494declare <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
495declare <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
496declare <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
497
498declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
499declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
500declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
501
502declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
503declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
504declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
505
506declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
507declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
508declare <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
509
510declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
511declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
512declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
513
514declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
515declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
516declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
517