• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s 2>%t | FileCheck %s
2; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
3
4; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
5; WARN-NOT: warning
6
7;
8; ADDP
9;
10
11define <vscale x 16 x i8> @addp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
12; CHECK-LABEL: addp_i8:
13; CHECK: addp z0.b, p0/m, z0.b, z1.b
14; CHECK-NEXT: ret
15  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.addp.nxv16i8(<vscale x 16 x i1> %pg,
16                                                                <vscale x 16 x i8> %a,
17                                                                <vscale x 16 x i8> %b)
18  ret <vscale x 16 x i8> %out
19}
20
21define <vscale x 8 x i16> @addp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
22; CHECK-LABEL: addp_i16:
23; CHECK: addp z0.h, p0/m, z0.h, z1.h
24; CHECK-NEXT: ret
25  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.addp.nxv8i16(<vscale x 8 x i1> %pg,
26                                                                <vscale x 8 x i16> %a,
27                                                                <vscale x 8 x i16> %b)
28  ret <vscale x 8 x i16> %out
29}
30
31define <vscale x 4 x i32> @addp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
32; CHECK-LABEL: addp_i32:
33; CHECK: addp z0.s, p0/m, z0.s, z1.s
34; CHECK-NEXT: ret
35  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.addp.nxv4i32(<vscale x 4 x i1> %pg,
36                                                                <vscale x 4 x i32> %a,
37                                                                <vscale x 4 x i32> %b)
38  ret <vscale x 4 x i32> %out
39}
40
41define <vscale x 2 x i64> @addp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
42; CHECK-LABEL: addp_i64:
43; CHECK: addp z0.d, p0/m, z0.d, z1.d
44; CHECK-NEXT: ret
45  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.addp.nxv2i64(<vscale x 2 x i1> %pg,
46                                                                <vscale x 2 x i64> %a,
47                                                                <vscale x 2 x i64> %b)
48  ret <vscale x 2 x i64> %out
49}
50
51;
52; FADDP
53;
54
55define <vscale x 8 x half> @faddp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
56; CHECK-LABEL: faddp_f16:
57; CHECK: faddp z0.h, p0/m, z0.h, z1.h
58; CHECK-NEXT: ret
59  %out = call <vscale x 8 x half> @llvm.aarch64.sve.faddp.nxv8f16(<vscale x 8 x i1> %pg,
60                                                                  <vscale x 8 x half> %a,
61                                                                  <vscale x 8 x half> %b)
62  ret <vscale x 8 x half> %out
63}
64
65define <vscale x 4 x float> @faddp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
66; CHECK-LABEL: faddp_f32:
67; CHECK: faddp z0.s, p0/m, z0.s, z1.s
68; CHECK-NEXT: ret
69  %out = call <vscale x 4 x float> @llvm.aarch64.sve.faddp.nxv4f32(<vscale x 4 x i1> %pg,
70                                                                   <vscale x 4 x float> %a,
71                                                                   <vscale x 4 x float> %b)
72  ret <vscale x 4 x float> %out
73}
74
75define <vscale x 2 x double> @faddp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
76; CHECK-LABEL: faddp_f64:
77; CHECK: faddp z0.d, p0/m, z0.d, z1.d
78; CHECK-NEXT: ret
79  %out = call <vscale x 2 x double> @llvm.aarch64.sve.faddp.nxv2f64(<vscale x 2 x i1> %pg,
80                                                                    <vscale x 2 x double> %a,
81                                                                    <vscale x 2 x double> %b)
82  ret <vscale x 2 x double> %out
83}
84
85;
86; FMAXP
87;
88
89define <vscale x 8 x half> @fmaxp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
90; CHECK-LABEL: fmaxp_f16:
91; CHECK: fmaxp z0.h, p0/m, z0.h, z1.h
92; CHECK-NEXT: ret
93  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxp.nxv8f16(<vscale x 8 x i1> %pg,
94                                                                  <vscale x 8 x half> %a,
95                                                                  <vscale x 8 x half> %b)
96  ret <vscale x 8 x half> %out
97}
98
99define <vscale x 4 x float> @fmaxp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
100; CHECK-LABEL: fmaxp_f32:
101; CHECK: fmaxp z0.s, p0/m, z0.s, z1.s
102; CHECK-NEXT: ret
103  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxp.nxv4f32(<vscale x 4 x i1> %pg,
104                                                                   <vscale x 4 x float> %a,
105                                                                   <vscale x 4 x float> %b)
106  ret <vscale x 4 x float> %out
107}
108
109define <vscale x 2 x double> @fmaxp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
110; CHECK-LABEL: fmaxp_f64:
111; CHECK: fmaxp z0.d, p0/m, z0.d, z1.d
112; CHECK-NEXT: ret
113  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxp.nxv2f64(<vscale x 2 x i1> %pg,
114                                                                    <vscale x 2 x double> %a,
115                                                                    <vscale x 2 x double> %b)
116  ret <vscale x 2 x double> %out
117}
118
119;
120; FMAXNMP
121;
122
123define <vscale x 8 x half> @fmaxnmp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
124; CHECK-LABEL: fmaxnmp_f16:
125; CHECK: fmaxnmp z0.h, p0/m, z0.h, z1.h
126; CHECK-NEXT: ret
127  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnmp.nxv8f16(<vscale x 8 x i1> %pg,
128                                                                    <vscale x 8 x half> %a,
129                                                                    <vscale x 8 x half> %b)
130  ret <vscale x 8 x half> %out
131}
132
133define <vscale x 4 x float> @fmaxnmp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
134; CHECK-LABEL: fmaxnmp_f32:
135; CHECK: fmaxnmp z0.s, p0/m, z0.s, z1.s
136; CHECK-NEXT: ret
137  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnmp.nxv4f32(<vscale x 4 x i1> %pg,
138                                                                     <vscale x 4 x float> %a,
139                                                                     <vscale x 4 x float> %b)
140  ret <vscale x 4 x float> %out
141}
142
143define <vscale x 2 x double> @fmaxnmp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
144; CHECK-LABEL: fmaxnmp_f64:
145; CHECK: fmaxnmp z0.d, p0/m, z0.d, z1.d
146; CHECK-NEXT: ret
147  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnmp.nxv2f64(<vscale x 2 x i1> %pg,
148                                                                      <vscale x 2 x double> %a,
149                                                                      <vscale x 2 x double> %b)
150  ret <vscale x 2 x double> %out
151}
152
153;
154; FMINP
155;
156
157define <vscale x 8 x half> @fminp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
158; CHECK-LABEL: fminp_f16:
159; CHECK: fminp z0.h, p0/m, z0.h, z1.h
160; CHECK-NEXT: ret
161  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminp.nxv8f16(<vscale x 8 x i1> %pg,
162                                                                  <vscale x 8 x half> %a,
163                                                                  <vscale x 8 x half> %b)
164  ret <vscale x 8 x half> %out
165}
166
167define <vscale x 4 x float> @fminp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
168; CHECK-LABEL: fminp_f32:
169; CHECK: fminp z0.s, p0/m, z0.s, z1.s
170; CHECK-NEXT: ret
171  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminp.nxv4f32(<vscale x 4 x i1> %pg,
172                                                                   <vscale x 4 x float> %a,
173                                                                   <vscale x 4 x float> %b)
174  ret <vscale x 4 x float> %out
175}
176
177define <vscale x 2 x double> @fminp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
178; CHECK-LABEL: fminp_f64:
179; CHECK: fminp z0.d, p0/m, z0.d, z1.d
180; CHECK-NEXT: ret
181  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminp.nxv2f64(<vscale x 2 x i1> %pg,
182                                                                    <vscale x 2 x double> %a,
183                                                                    <vscale x 2 x double> %b)
184  ret <vscale x 2 x double> %out
185}
186
187;
188; FMINNMP
189;
190
191define <vscale x 8 x half> @fminnmp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
192; CHECK-LABEL: fminnmp_f16:
193; CHECK: fminnmp z0.h, p0/m, z0.h, z1.h
194; CHECK-NEXT: ret
195  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnmp.nxv8f16(<vscale x 8 x i1> %pg,
196                                                                    <vscale x 8 x half> %a,
197                                                                    <vscale x 8 x half> %b)
198  ret <vscale x 8 x half> %out
199}
200
201define <vscale x 4 x float> @fminnmp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
202; CHECK-LABEL: fminnmp_f32:
203; CHECK: fminnmp z0.s, p0/m, z0.s, z1.s
204; CHECK-NEXT: ret
205  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnmp.nxv4f32(<vscale x 4 x i1> %pg,
206                                                                     <vscale x 4 x float> %a,
207                                                                     <vscale x 4 x float> %b)
208  ret <vscale x 4 x float> %out
209}
210
211define <vscale x 2 x double> @fminnmp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
212; CHECK-LABEL: fminnmp_f64:
213; CHECK: fminnmp z0.d, p0/m, z0.d, z1.d
214; CHECK-NEXT: ret
215  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnmp.nxv2f64(<vscale x 2 x i1> %pg,
216                                                                      <vscale x 2 x double> %a,
217                                                                      <vscale x 2 x double> %b)
218  ret <vscale x 2 x double> %out
219}
220
221;
222; SMAXP
223;
224
225define <vscale x 16 x i8> @smaxp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
226; CHECK-LABEL: smaxp_i8:
227; CHECK: smaxp z0.b, p0/m, z0.b, z1.b
228; CHECK-NEXT: ret
229  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smaxp.nxv16i8(<vscale x 16 x i1> %pg,
230                                                                 <vscale x 16 x i8> %a,
231                                                                 <vscale x 16 x i8> %b)
232  ret <vscale x 16 x i8> %out
233}
234
235define <vscale x 8 x i16> @smaxp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
236; CHECK-LABEL: smaxp_i16:
237; CHECK: smaxp z0.h, p0/m, z0.h, z1.h
238; CHECK-NEXT: ret
239  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smaxp.nxv8i16(<vscale x 8 x i1> %pg,
240                                                                 <vscale x 8 x i16> %a,
241                                                                 <vscale x 8 x i16> %b)
242  ret <vscale x 8 x i16> %out
243}
244
245define <vscale x 4 x i32> @smaxp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
246; CHECK-LABEL: smaxp_i32:
247; CHECK: smaxp z0.s, p0/m, z0.s, z1.s
248; CHECK-NEXT: ret
249  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smaxp.nxv4i32(<vscale x 4 x i1> %pg,
250                                                                 <vscale x 4 x i32> %a,
251                                                                 <vscale x 4 x i32> %b)
252  ret <vscale x 4 x i32> %out
253}
254
255define <vscale x 2 x i64> @smaxp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
256; CHECK-LABEL: smaxp_i64:
257; CHECK: smaxp z0.d, p0/m, z0.d, z1.d
258; CHECK-NEXT: ret
259  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smaxp.nxv2i64(<vscale x 2 x i1> %pg,
260                                                                 <vscale x 2 x i64> %a,
261                                                                 <vscale x 2 x i64> %b)
262  ret <vscale x 2 x i64> %out
263}
264
265;
266; SMINP
267;
268
269define <vscale x 16 x i8> @sminp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
270; CHECK-LABEL: sminp_i8:
271; CHECK: sminp z0.b, p0/m, z0.b, z1.b
272; CHECK-NEXT: ret
273  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sminp.nxv16i8(<vscale x 16 x i1> %pg,
274                                                                 <vscale x 16 x i8> %a,
275                                                                 <vscale x 16 x i8> %b)
276  ret <vscale x 16 x i8> %out
277}
278
279define <vscale x 8 x i16> @sminp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
280; CHECK-LABEL: sminp_i16:
281; CHECK: sminp z0.h, p0/m, z0.h, z1.h
282; CHECK-NEXT: ret
283  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sminp.nxv8i16(<vscale x 8 x i1> %pg,
284                                                                 <vscale x 8 x i16> %a,
285                                                                 <vscale x 8 x i16> %b)
286  ret <vscale x 8 x i16> %out
287}
288
289define <vscale x 4 x i32> @sminp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
290; CHECK-LABEL: sminp_i32:
291; CHECK: sminp z0.s, p0/m, z0.s, z1.s
292; CHECK-NEXT: ret
293  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sminp.nxv4i32(<vscale x 4 x i1> %pg,
294                                                                 <vscale x 4 x i32> %a,
295                                                                 <vscale x 4 x i32> %b)
296  ret <vscale x 4 x i32> %out
297}
298
299define <vscale x 2 x i64> @sminp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
300; CHECK-LABEL: sminp_i64:
301; CHECK: sminp z0.d, p0/m, z0.d, z1.d
302; CHECK-NEXT: ret
303  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sminp.nxv2i64(<vscale x 2 x i1> %pg,
304                                                                 <vscale x 2 x i64> %a,
305                                                                 <vscale x 2 x i64> %b)
306  ret <vscale x 2 x i64> %out
307}
308
309;
310; UMINP
311;
312
313define <vscale x 16 x i8> @uminp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
314; CHECK-LABEL: uminp_i8:
315; CHECK: uminp z0.b, p0/m, z0.b, z1.b
316; CHECK-NEXT: ret
317  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uminp.nxv16i8(<vscale x 16 x i1> %pg,
318                                                                 <vscale x 16 x i8> %a,
319                                                                 <vscale x 16 x i8> %b)
320  ret <vscale x 16 x i8> %out
321}
322
323define <vscale x 8 x i16> @uminp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
324; CHECK-LABEL: uminp_i16:
325; CHECK: uminp z0.h, p0/m, z0.h, z1.h
326; CHECK-NEXT: ret
327  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uminp.nxv8i16(<vscale x 8 x i1> %pg,
328                                                                 <vscale x 8 x i16> %a,
329                                                                 <vscale x 8 x i16> %b)
330  ret <vscale x 8 x i16> %out
331}
332
333define <vscale x 4 x i32> @uminp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
334; CHECK-LABEL: uminp_i32:
335; CHECK: uminp z0.s, p0/m, z0.s, z1.s
336; CHECK-NEXT: ret
337  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uminp.nxv4i32(<vscale x 4 x i1> %pg,
338                                                                 <vscale x 4 x i32> %a,
339                                                                 <vscale x 4 x i32> %b)
340  ret <vscale x 4 x i32> %out
341}
342
343define <vscale x 2 x i64> @uminp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
344; CHECK-LABEL: uminp_i64:
345; CHECK: uminp z0.d, p0/m, z0.d, z1.d
346; CHECK-NEXT: ret
347  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uminp.nxv2i64(<vscale x 2 x i1> %pg,
348                                                                 <vscale x 2 x i64> %a,
349                                                                 <vscale x 2 x i64> %b)
350  ret <vscale x 2 x i64> %out
351}
352
353;
354; UMAXP
355;
356
357define <vscale x 16 x i8> @umaxp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
358; CHECK-LABEL: umaxp_i8:
359; CHECK: umaxp z0.b, p0/m, z0.b, z1.b
360; CHECK-NEXT: ret
361  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umaxp.nxv16i8(<vscale x 16 x i1> %pg,
362                                                                 <vscale x 16 x i8> %a,
363                                                                 <vscale x 16 x i8> %b)
364  ret <vscale x 16 x i8> %out
365}
366
367define <vscale x 8 x i16> @umaxp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
368; CHECK-LABEL: umaxp_i16:
369; CHECK: umaxp z0.h, p0/m, z0.h, z1.h
370; CHECK-NEXT: ret
371  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umaxp.nxv8i16(<vscale x 8 x i1> %pg,
372                                                                 <vscale x 8 x i16> %a,
373                                                                 <vscale x 8 x i16> %b)
374  ret <vscale x 8 x i16> %out
375}
376
377define <vscale x 4 x i32> @umaxp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
378; CHECK-LABEL: umaxp_i32:
379; CHECK: umaxp z0.s, p0/m, z0.s, z1.s
380; CHECK-NEXT: ret
381  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umaxp.nxv4i32(<vscale x 4 x i1> %pg,
382                                                                 <vscale x 4 x i32> %a,
383                                                                 <vscale x 4 x i32> %b)
384  ret <vscale x 4 x i32> %out
385}
386
387define <vscale x 2 x i64> @umaxp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
388; CHECK-LABEL: umaxp_i64:
389; CHECK: umaxp z0.d, p0/m, z0.d, z1.d
390; CHECK-NEXT: ret
391  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umaxp.nxv2i64(<vscale x 2 x i1> %pg,
392                                                                 <vscale x 2 x i64> %a,
393                                                                 <vscale x 2 x i64> %b)
394  ret <vscale x 2 x i64> %out
395}
396
397declare <vscale x 16 x i8> @llvm.aarch64.sve.addp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
398declare <vscale x 8 x i16> @llvm.aarch64.sve.addp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
399declare <vscale x 4 x i32> @llvm.aarch64.sve.addp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
400declare <vscale x 2 x i64> @llvm.aarch64.sve.addp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
401
402declare <vscale x 8 x half> @llvm.aarch64.sve.faddp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
403declare <vscale x 4 x float> @llvm.aarch64.sve.faddp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
404declare <vscale x 2 x double> @llvm.aarch64.sve.faddp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
405
406declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
407declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
408declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
409
410declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnmp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
411declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnmp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
412declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnmp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
413
414declare <vscale x 8 x half> @llvm.aarch64.sve.fminp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
415declare <vscale x 4 x float> @llvm.aarch64.sve.fminp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
416declare <vscale x 2 x double> @llvm.aarch64.sve.fminp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
417
418declare <vscale x 8 x half> @llvm.aarch64.sve.fminnmp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
419declare <vscale x 4 x float> @llvm.aarch64.sve.fminnmp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
420declare <vscale x 2 x double> @llvm.aarch64.sve.fminnmp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
421
422declare <vscale x 16 x i8> @llvm.aarch64.sve.smaxp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
423declare <vscale x 8 x i16> @llvm.aarch64.sve.smaxp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
424declare <vscale x 4 x i32> @llvm.aarch64.sve.smaxp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
425declare <vscale x 2 x i64> @llvm.aarch64.sve.smaxp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
426
427declare <vscale x 16 x i8> @llvm.aarch64.sve.sminp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
428declare <vscale x 8 x i16> @llvm.aarch64.sve.sminp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
429declare <vscale x 4 x i32> @llvm.aarch64.sve.sminp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
430declare <vscale x 2 x i64> @llvm.aarch64.sve.sminp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
431
432declare <vscale x 16 x i8> @llvm.aarch64.sve.umaxp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
433declare <vscale x 8 x i16> @llvm.aarch64.sve.umaxp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
434declare <vscale x 4 x i32> @llvm.aarch64.sve.umaxp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
435declare <vscale x 2 x i64> @llvm.aarch64.sve.umaxp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
436
437declare <vscale x 16 x i8> @llvm.aarch64.sve.uminp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
438declare <vscale x 8 x i16> @llvm.aarch64.sve.uminp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
439declare <vscale x 4 x i32> @llvm.aarch64.sve.uminp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
440declare <vscale x 2 x i64> @llvm.aarch64.sve.uminp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
441