• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6 
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10 
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17 
test_svsubr_s8_z(svbool_t pg,svint8_t op1,svint8_t op2)18 svint8_t test_svsubr_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
19 {
20   // CHECK-LABEL: test_svsubr_s8_z
21   // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
22   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
23   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
24   return SVE_ACLE_FUNC(svsubr,_s8,_z,)(pg, op1, op2);
25 }
26 
test_svsubr_s16_z(svbool_t pg,svint16_t op1,svint16_t op2)27 svint16_t test_svsubr_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
28 {
29   // CHECK-LABEL: test_svsubr_s16_z
30   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
31   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
32   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
33   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
34   return SVE_ACLE_FUNC(svsubr,_s16,_z,)(pg, op1, op2);
35 }
36 
test_svsubr_s32_z(svbool_t pg,svint32_t op1,svint32_t op2)37 svint32_t test_svsubr_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
38 {
39   // CHECK-LABEL: test_svsubr_s32_z
40   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
41   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
42   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
43   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
44   return SVE_ACLE_FUNC(svsubr,_s32,_z,)(pg, op1, op2);
45 }
46 
test_svsubr_s64_z(svbool_t pg,svint64_t op1,svint64_t op2)47 svint64_t test_svsubr_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
48 {
49   // CHECK-LABEL: test_svsubr_s64_z
50   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
51   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
52   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
53   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
54   return SVE_ACLE_FUNC(svsubr,_s64,_z,)(pg, op1, op2);
55 }
56 
test_svsubr_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2)57 svuint8_t test_svsubr_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
58 {
59   // CHECK-LABEL: test_svsubr_u8_z
60   // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
61   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
62   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
63   return SVE_ACLE_FUNC(svsubr,_u8,_z,)(pg, op1, op2);
64 }
65 
test_svsubr_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2)66 svuint16_t test_svsubr_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
67 {
68   // CHECK-LABEL: test_svsubr_u16_z
69   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
70   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
71   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
72   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
73   return SVE_ACLE_FUNC(svsubr,_u16,_z,)(pg, op1, op2);
74 }
75 
test_svsubr_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2)76 svuint32_t test_svsubr_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
77 {
78   // CHECK-LABEL: test_svsubr_u32_z
79   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
80   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
81   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
82   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
83   return SVE_ACLE_FUNC(svsubr,_u32,_z,)(pg, op1, op2);
84 }
85 
test_svsubr_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2)86 svuint64_t test_svsubr_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
87 {
88   // CHECK-LABEL: test_svsubr_u64_z
89   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
90   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
91   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
92   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
93   return SVE_ACLE_FUNC(svsubr,_u64,_z,)(pg, op1, op2);
94 }
95 
test_svsubr_s8_m(svbool_t pg,svint8_t op1,svint8_t op2)96 svint8_t test_svsubr_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
97 {
98   // CHECK-LABEL: test_svsubr_s8_m
99   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
100   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
101   return SVE_ACLE_FUNC(svsubr,_s8,_m,)(pg, op1, op2);
102 }
103 
test_svsubr_s16_m(svbool_t pg,svint16_t op1,svint16_t op2)104 svint16_t test_svsubr_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
105 {
106   // CHECK-LABEL: test_svsubr_s16_m
107   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
108   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
109   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
110   return SVE_ACLE_FUNC(svsubr,_s16,_m,)(pg, op1, op2);
111 }
112 
test_svsubr_s32_m(svbool_t pg,svint32_t op1,svint32_t op2)113 svint32_t test_svsubr_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
114 {
115   // CHECK-LABEL: test_svsubr_s32_m
116   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
117   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
118   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
119   return SVE_ACLE_FUNC(svsubr,_s32,_m,)(pg, op1, op2);
120 }
121 
test_svsubr_s64_m(svbool_t pg,svint64_t op1,svint64_t op2)122 svint64_t test_svsubr_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
123 {
124   // CHECK-LABEL: test_svsubr_s64_m
125   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
126   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
127   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
128   return SVE_ACLE_FUNC(svsubr,_s64,_m,)(pg, op1, op2);
129 }
130 
test_svsubr_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2)131 svuint8_t test_svsubr_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
132 {
133   // CHECK-LABEL: test_svsubr_u8_m
134   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
135   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
136   return SVE_ACLE_FUNC(svsubr,_u8,_m,)(pg, op1, op2);
137 }
138 
test_svsubr_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2)139 svuint16_t test_svsubr_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
140 {
141   // CHECK-LABEL: test_svsubr_u16_m
142   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
143   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
144   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
145   return SVE_ACLE_FUNC(svsubr,_u16,_m,)(pg, op1, op2);
146 }
147 
test_svsubr_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2)148 svuint32_t test_svsubr_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
149 {
150   // CHECK-LABEL: test_svsubr_u32_m
151   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
152   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
153   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
154   return SVE_ACLE_FUNC(svsubr,_u32,_m,)(pg, op1, op2);
155 }
156 
test_svsubr_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2)157 svuint64_t test_svsubr_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
158 {
159   // CHECK-LABEL: test_svsubr_u64_m
160   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
161   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
162   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
163   return SVE_ACLE_FUNC(svsubr,_u64,_m,)(pg, op1, op2);
164 }
165 
test_svsubr_s8_x(svbool_t pg,svint8_t op1,svint8_t op2)166 svint8_t test_svsubr_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
167 {
168   // CHECK-LABEL: test_svsubr_s8_x
169   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
170   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
171   return SVE_ACLE_FUNC(svsubr,_s8,_x,)(pg, op1, op2);
172 }
173 
test_svsubr_s16_x(svbool_t pg,svint16_t op1,svint16_t op2)174 svint16_t test_svsubr_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
175 {
176   // CHECK-LABEL: test_svsubr_s16_x
177   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
178   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
179   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
180   return SVE_ACLE_FUNC(svsubr,_s16,_x,)(pg, op1, op2);
181 }
182 
test_svsubr_s32_x(svbool_t pg,svint32_t op1,svint32_t op2)183 svint32_t test_svsubr_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
184 {
185   // CHECK-LABEL: test_svsubr_s32_x
186   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
187   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
188   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
189   return SVE_ACLE_FUNC(svsubr,_s32,_x,)(pg, op1, op2);
190 }
191 
test_svsubr_s64_x(svbool_t pg,svint64_t op1,svint64_t op2)192 svint64_t test_svsubr_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
193 {
194   // CHECK-LABEL: test_svsubr_s64_x
195   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
196   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
197   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
198   return SVE_ACLE_FUNC(svsubr,_s64,_x,)(pg, op1, op2);
199 }
200 
test_svsubr_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2)201 svuint8_t test_svsubr_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
202 {
203   // CHECK-LABEL: test_svsubr_u8_x
204   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
205   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
206   return SVE_ACLE_FUNC(svsubr,_u8,_x,)(pg, op1, op2);
207 }
208 
test_svsubr_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2)209 svuint16_t test_svsubr_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
210 {
211   // CHECK-LABEL: test_svsubr_u16_x
212   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
213   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
214   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
215   return SVE_ACLE_FUNC(svsubr,_u16,_x,)(pg, op1, op2);
216 }
217 
test_svsubr_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2)218 svuint32_t test_svsubr_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
219 {
220   // CHECK-LABEL: test_svsubr_u32_x
221   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
222   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
223   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
224   return SVE_ACLE_FUNC(svsubr,_u32,_x,)(pg, op1, op2);
225 }
226 
test_svsubr_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2)227 svuint64_t test_svsubr_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
228 {
229   // CHECK-LABEL: test_svsubr_u64_x
230   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
231   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
232   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
233   return SVE_ACLE_FUNC(svsubr,_u64,_x,)(pg, op1, op2);
234 }
235 
test_svsubr_n_s8_z(svbool_t pg,svint8_t op1,int8_t op2)236 svint8_t test_svsubr_n_s8_z(svbool_t pg, svint8_t op1, int8_t op2)
237 {
238   // CHECK-LABEL: test_svsubr_n_s8_z
239   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
240   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
241   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
242   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
243   return SVE_ACLE_FUNC(svsubr,_n_s8,_z,)(pg, op1, op2);
244 }
245 
test_svsubr_n_s16_z(svbool_t pg,svint16_t op1,int16_t op2)246 svint16_t test_svsubr_n_s16_z(svbool_t pg, svint16_t op1, int16_t op2)
247 {
248   // CHECK-LABEL: test_svsubr_n_s16_z
249   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
250   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
251   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
252   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
253   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
254   return SVE_ACLE_FUNC(svsubr,_n_s16,_z,)(pg, op1, op2);
255 }
256 
test_svsubr_n_s32_z(svbool_t pg,svint32_t op1,int32_t op2)257 svint32_t test_svsubr_n_s32_z(svbool_t pg, svint32_t op1, int32_t op2)
258 {
259   // CHECK-LABEL: test_svsubr_n_s32_z
260   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
261   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
262   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
263   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
264   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
265   return SVE_ACLE_FUNC(svsubr,_n_s32,_z,)(pg, op1, op2);
266 }
267 
test_svsubr_n_s64_z(svbool_t pg,svint64_t op1,int64_t op2)268 svint64_t test_svsubr_n_s64_z(svbool_t pg, svint64_t op1, int64_t op2)
269 {
270   // CHECK-LABEL: test_svsubr_n_s64_z
271   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
272   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
273   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
274   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
275   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
276   return SVE_ACLE_FUNC(svsubr,_n_s64,_z,)(pg, op1, op2);
277 }
278 
test_svsubr_n_u8_z(svbool_t pg,svuint8_t op1,uint8_t op2)279 svuint8_t test_svsubr_n_u8_z(svbool_t pg, svuint8_t op1, uint8_t op2)
280 {
281   // CHECK-LABEL: test_svsubr_n_u8_z
282   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
283   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
284   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
285   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
286   return SVE_ACLE_FUNC(svsubr,_n_u8,_z,)(pg, op1, op2);
287 }
288 
test_svsubr_n_u16_z(svbool_t pg,svuint16_t op1,uint16_t op2)289 svuint16_t test_svsubr_n_u16_z(svbool_t pg, svuint16_t op1, uint16_t op2)
290 {
291   // CHECK-LABEL: test_svsubr_n_u16_z
292   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
293   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
294   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
295   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
296   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
297   return SVE_ACLE_FUNC(svsubr,_n_u16,_z,)(pg, op1, op2);
298 }
299 
test_svsubr_n_u32_z(svbool_t pg,svuint32_t op1,uint32_t op2)300 svuint32_t test_svsubr_n_u32_z(svbool_t pg, svuint32_t op1, uint32_t op2)
301 {
302   // CHECK-LABEL: test_svsubr_n_u32_z
303   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
304   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
305   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
306   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
307   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
308   return SVE_ACLE_FUNC(svsubr,_n_u32,_z,)(pg, op1, op2);
309 }
310 
test_svsubr_n_u64_z(svbool_t pg,svuint64_t op1,uint64_t op2)311 svuint64_t test_svsubr_n_u64_z(svbool_t pg, svuint64_t op1, uint64_t op2)
312 {
313   // CHECK-LABEL: test_svsubr_n_u64_z
314   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
315   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
316   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
317   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
318   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
319   return SVE_ACLE_FUNC(svsubr,_n_u64,_z,)(pg, op1, op2);
320 }
321 
test_svsubr_n_s8_m(svbool_t pg,svint8_t op1,int8_t op2)322 svint8_t test_svsubr_n_s8_m(svbool_t pg, svint8_t op1, int8_t op2)
323 {
324   // CHECK-LABEL: test_svsubr_n_s8_m
325   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
326   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
327   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
328   return SVE_ACLE_FUNC(svsubr,_n_s8,_m,)(pg, op1, op2);
329 }
330 
test_svsubr_n_s16_m(svbool_t pg,svint16_t op1,int16_t op2)331 svint16_t test_svsubr_n_s16_m(svbool_t pg, svint16_t op1, int16_t op2)
332 {
333   // CHECK-LABEL: test_svsubr_n_s16_m
334   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
335   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
336   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
337   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
338   return SVE_ACLE_FUNC(svsubr,_n_s16,_m,)(pg, op1, op2);
339 }
340 
test_svsubr_n_s32_m(svbool_t pg,svint32_t op1,int32_t op2)341 svint32_t test_svsubr_n_s32_m(svbool_t pg, svint32_t op1, int32_t op2)
342 {
343   // CHECK-LABEL: test_svsubr_n_s32_m
344   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
345   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
346   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
347   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
348   return SVE_ACLE_FUNC(svsubr,_n_s32,_m,)(pg, op1, op2);
349 }
350 
test_svsubr_n_s64_m(svbool_t pg,svint64_t op1,int64_t op2)351 svint64_t test_svsubr_n_s64_m(svbool_t pg, svint64_t op1, int64_t op2)
352 {
353   // CHECK-LABEL: test_svsubr_n_s64_m
354   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
355   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
356   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
357   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
358   return SVE_ACLE_FUNC(svsubr,_n_s64,_m,)(pg, op1, op2);
359 }
360 
test_svsubr_n_u8_m(svbool_t pg,svuint8_t op1,uint8_t op2)361 svuint8_t test_svsubr_n_u8_m(svbool_t pg, svuint8_t op1, uint8_t op2)
362 {
363   // CHECK-LABEL: test_svsubr_n_u8_m
364   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
365   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
366   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
367   return SVE_ACLE_FUNC(svsubr,_n_u8,_m,)(pg, op1, op2);
368 }
369 
test_svsubr_n_u16_m(svbool_t pg,svuint16_t op1,uint16_t op2)370 svuint16_t test_svsubr_n_u16_m(svbool_t pg, svuint16_t op1, uint16_t op2)
371 {
372   // CHECK-LABEL: test_svsubr_n_u16_m
373   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
374   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
375   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
376   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
377   return SVE_ACLE_FUNC(svsubr,_n_u16,_m,)(pg, op1, op2);
378 }
379 
test_svsubr_n_u32_m(svbool_t pg,svuint32_t op1,uint32_t op2)380 svuint32_t test_svsubr_n_u32_m(svbool_t pg, svuint32_t op1, uint32_t op2)
381 {
382   // CHECK-LABEL: test_svsubr_n_u32_m
383   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
384   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
385   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
386   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
387   return SVE_ACLE_FUNC(svsubr,_n_u32,_m,)(pg, op1, op2);
388 }
389 
test_svsubr_n_u64_m(svbool_t pg,svuint64_t op1,uint64_t op2)390 svuint64_t test_svsubr_n_u64_m(svbool_t pg, svuint64_t op1, uint64_t op2)
391 {
392   // CHECK-LABEL: test_svsubr_n_u64_m
393   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
394   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
395   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
396   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
397   return SVE_ACLE_FUNC(svsubr,_n_u64,_m,)(pg, op1, op2);
398 }
399 
test_svsubr_n_s8_x(svbool_t pg,svint8_t op1,int8_t op2)400 svint8_t test_svsubr_n_s8_x(svbool_t pg, svint8_t op1, int8_t op2)
401 {
402   // CHECK-LABEL: test_svsubr_n_s8_x
403   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
404   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
405   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
406   return SVE_ACLE_FUNC(svsubr,_n_s8,_x,)(pg, op1, op2);
407 }
408 
test_svsubr_n_s16_x(svbool_t pg,svint16_t op1,int16_t op2)409 svint16_t test_svsubr_n_s16_x(svbool_t pg, svint16_t op1, int16_t op2)
410 {
411   // CHECK-LABEL: test_svsubr_n_s16_x
412   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
413   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
414   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
415   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
416   return SVE_ACLE_FUNC(svsubr,_n_s16,_x,)(pg, op1, op2);
417 }
418 
test_svsubr_n_s32_x(svbool_t pg,svint32_t op1,int32_t op2)419 svint32_t test_svsubr_n_s32_x(svbool_t pg, svint32_t op1, int32_t op2)
420 {
421   // CHECK-LABEL: test_svsubr_n_s32_x
422   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
423   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
424   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
425   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
426   return SVE_ACLE_FUNC(svsubr,_n_s32,_x,)(pg, op1, op2);
427 }
428 
test_svsubr_n_s64_x(svbool_t pg,svint64_t op1,int64_t op2)429 svint64_t test_svsubr_n_s64_x(svbool_t pg, svint64_t op1, int64_t op2)
430 {
431   // CHECK-LABEL: test_svsubr_n_s64_x
432   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
433   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
434   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
435   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
436   return SVE_ACLE_FUNC(svsubr,_n_s64,_x,)(pg, op1, op2);
437 }
438 
test_svsubr_n_u8_x(svbool_t pg,svuint8_t op1,uint8_t op2)439 svuint8_t test_svsubr_n_u8_x(svbool_t pg, svuint8_t op1, uint8_t op2)
440 {
441   // CHECK-LABEL: test_svsubr_n_u8_x
442   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
443   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
444   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
445   return SVE_ACLE_FUNC(svsubr,_n_u8,_x,)(pg, op1, op2);
446 }
447 
test_svsubr_n_u16_x(svbool_t pg,svuint16_t op1,uint16_t op2)448 svuint16_t test_svsubr_n_u16_x(svbool_t pg, svuint16_t op1, uint16_t op2)
449 {
450   // CHECK-LABEL: test_svsubr_n_u16_x
451   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
452   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
453   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
454   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
455   return SVE_ACLE_FUNC(svsubr,_n_u16,_x,)(pg, op1, op2);
456 }
457 
test_svsubr_n_u32_x(svbool_t pg,svuint32_t op1,uint32_t op2)458 svuint32_t test_svsubr_n_u32_x(svbool_t pg, svuint32_t op1, uint32_t op2)
459 {
460   // CHECK-LABEL: test_svsubr_n_u32_x
461   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
462   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
463   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
464   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
465   return SVE_ACLE_FUNC(svsubr,_n_u32,_x,)(pg, op1, op2);
466 }
467 
test_svsubr_n_u64_x(svbool_t pg,svuint64_t op1,uint64_t op2)468 svuint64_t test_svsubr_n_u64_x(svbool_t pg, svuint64_t op1, uint64_t op2)
469 {
470   // CHECK-LABEL: test_svsubr_n_u64_x
471   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
472   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
473   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
474   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
475   return SVE_ACLE_FUNC(svsubr,_n_u64,_x,)(pg, op1, op2);
476 }
477 
test_svsubr_f16_z(svbool_t pg,svfloat16_t op1,svfloat16_t op2)478 svfloat16_t test_svsubr_f16_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
479 {
480   // CHECK-LABEL: test_svsubr_f16_z
481   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
482   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.sel.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> zeroinitializer)
483   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[SEL]], <vscale x 8 x half> %op2)
484   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
485   return SVE_ACLE_FUNC(svsubr,_f16,_z,)(pg, op1, op2);
486 }
487 
test_svsubr_f32_z(svbool_t pg,svfloat32_t op1,svfloat32_t op2)488 svfloat32_t test_svsubr_f32_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
489 {
490   // CHECK-LABEL: test_svsubr_f32_z
491   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
492   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.sel.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> zeroinitializer)
493   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[SEL]], <vscale x 4 x float> %op2)
494   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
495   return SVE_ACLE_FUNC(svsubr,_f32,_z,)(pg, op1, op2);
496 }
497 
test_svsubr_f64_z(svbool_t pg,svfloat64_t op1,svfloat64_t op2)498 svfloat64_t test_svsubr_f64_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
499 {
500   // CHECK-LABEL: test_svsubr_f64_z
501   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
502   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> zeroinitializer)
503   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[SEL]], <vscale x 2 x double> %op2)
504   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
505   return SVE_ACLE_FUNC(svsubr,_f64,_z,)(pg, op1, op2);
506 }
507 
test_svsubr_f16_m(svbool_t pg,svfloat16_t op1,svfloat16_t op2)508 svfloat16_t test_svsubr_f16_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
509 {
510   // CHECK-LABEL: test_svsubr_f16_m
511   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
512   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
513   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
514   return SVE_ACLE_FUNC(svsubr,_f16,_m,)(pg, op1, op2);
515 }
516 
test_svsubr_f32_m(svbool_t pg,svfloat32_t op1,svfloat32_t op2)517 svfloat32_t test_svsubr_f32_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
518 {
519   // CHECK-LABEL: test_svsubr_f32_m
520   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
521   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
522   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
523   return SVE_ACLE_FUNC(svsubr,_f32,_m,)(pg, op1, op2);
524 }
525 
test_svsubr_f64_m(svbool_t pg,svfloat64_t op1,svfloat64_t op2)526 svfloat64_t test_svsubr_f64_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
527 {
528   // CHECK-LABEL: test_svsubr_f64_m
529   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
530   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
531   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
532   return SVE_ACLE_FUNC(svsubr,_f64,_m,)(pg, op1, op2);
533 }
534 
test_svsubr_f16_x(svbool_t pg,svfloat16_t op1,svfloat16_t op2)535 svfloat16_t test_svsubr_f16_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
536 {
537   // CHECK-LABEL: test_svsubr_f16_x
538   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
539   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
540   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
541   return SVE_ACLE_FUNC(svsubr,_f16,_x,)(pg, op1, op2);
542 }
543 
test_svsubr_f32_x(svbool_t pg,svfloat32_t op1,svfloat32_t op2)544 svfloat32_t test_svsubr_f32_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
545 {
546   // CHECK-LABEL: test_svsubr_f32_x
547   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
548   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
549   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
550   return SVE_ACLE_FUNC(svsubr,_f32,_x,)(pg, op1, op2);
551 }
552 
test_svsubr_f64_x(svbool_t pg,svfloat64_t op1,svfloat64_t op2)553 svfloat64_t test_svsubr_f64_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
554 {
555   // CHECK-LABEL: test_svsubr_f64_x
556   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
557   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
558   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
559   return SVE_ACLE_FUNC(svsubr,_f64,_x,)(pg, op1, op2);
560 }
561 
test_svsubr_n_f16_z(svbool_t pg,svfloat16_t op1,float16_t op2)562 svfloat16_t test_svsubr_n_f16_z(svbool_t pg, svfloat16_t op1, float16_t op2)
563 {
564   // CHECK-LABEL: test_svsubr_n_f16_z
565   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
566   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
567   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.sel.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> zeroinitializer)
568   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[SEL]], <vscale x 8 x half> %[[DUP]])
569   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
570   return SVE_ACLE_FUNC(svsubr,_n_f16,_z,)(pg, op1, op2);
571 }
572 
test_svsubr_n_f32_z(svbool_t pg,svfloat32_t op1,float32_t op2)573 svfloat32_t test_svsubr_n_f32_z(svbool_t pg, svfloat32_t op1, float32_t op2)
574 {
575   // CHECK-LABEL: test_svsubr_n_f32_z
576   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
577   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
578   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.sel.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> zeroinitializer)
579   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[SEL]], <vscale x 4 x float> %[[DUP]])
580   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
581   return SVE_ACLE_FUNC(svsubr,_n_f32,_z,)(pg, op1, op2);
582 }
583 
test_svsubr_n_f64_z(svbool_t pg,svfloat64_t op1,float64_t op2)584 svfloat64_t test_svsubr_n_f64_z(svbool_t pg, svfloat64_t op1, float64_t op2)
585 {
586   // CHECK-LABEL: test_svsubr_n_f64_z
587   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
588   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
589   // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> zeroinitializer)
590   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[SEL]], <vscale x 2 x double> %[[DUP]])
591   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
592   return SVE_ACLE_FUNC(svsubr,_n_f64,_z,)(pg, op1, op2);
593 }
594 
test_svsubr_n_f16_m(svbool_t pg,svfloat16_t op1,float16_t op2)595 svfloat16_t test_svsubr_n_f16_m(svbool_t pg, svfloat16_t op1, float16_t op2)
596 {
597   // CHECK-LABEL: test_svsubr_n_f16_m
598   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
599   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
600   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
601   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
602   return SVE_ACLE_FUNC(svsubr,_n_f16,_m,)(pg, op1, op2);
603 }
604 
test_svsubr_n_f32_m(svbool_t pg,svfloat32_t op1,float32_t op2)605 svfloat32_t test_svsubr_n_f32_m(svbool_t pg, svfloat32_t op1, float32_t op2)
606 {
607   // CHECK-LABEL: test_svsubr_n_f32_m
608   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
609   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
610   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
611   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
612   return SVE_ACLE_FUNC(svsubr,_n_f32,_m,)(pg, op1, op2);
613 }
614 
test_svsubr_n_f64_m(svbool_t pg,svfloat64_t op1,float64_t op2)615 svfloat64_t test_svsubr_n_f64_m(svbool_t pg, svfloat64_t op1, float64_t op2)
616 {
617   // CHECK-LABEL: test_svsubr_n_f64_m
618   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
619   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
620   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
621   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
622   return SVE_ACLE_FUNC(svsubr,_n_f64,_m,)(pg, op1, op2);
623 }
624 
test_svsubr_n_f16_x(svbool_t pg,svfloat16_t op1,float16_t op2)625 svfloat16_t test_svsubr_n_f16_x(svbool_t pg, svfloat16_t op1, float16_t op2)
626 {
627   // CHECK-LABEL: test_svsubr_n_f16_x
628   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
629   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
630   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
631   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
632   return SVE_ACLE_FUNC(svsubr,_n_f16,_x,)(pg, op1, op2);
633 }
634 
test_svsubr_n_f32_x(svbool_t pg,svfloat32_t op1,float32_t op2)635 svfloat32_t test_svsubr_n_f32_x(svbool_t pg, svfloat32_t op1, float32_t op2)
636 {
637   // CHECK-LABEL: test_svsubr_n_f32_x
638   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
639   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
640   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
641   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
642   return SVE_ACLE_FUNC(svsubr,_n_f32,_x,)(pg, op1, op2);
643 }
644 
test_svsubr_n_f64_x(svbool_t pg,svfloat64_t op1,float64_t op2)645 svfloat64_t test_svsubr_n_f64_x(svbool_t pg, svfloat64_t op1, float64_t op2)
646 {
647   // CHECK-LABEL: test_svsubr_n_f64_x
648   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
649   // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
650   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
651   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
652   return SVE_ACLE_FUNC(svsubr,_n_f64,_x,)(pg, op1, op2);
653 }
654