1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17
test_sveor_s8_z(svbool_t pg,svint8_t op1,svint8_t op2)18 svint8_t test_sveor_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
19 {
20 // CHECK-LABEL: test_sveor_s8_z
21 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
22 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
23 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
24 return SVE_ACLE_FUNC(sveor,_s8,_z,)(pg, op1, op2);
25 }
26
test_sveor_s16_z(svbool_t pg,svint16_t op1,svint16_t op2)27 svint16_t test_sveor_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
28 {
29 // CHECK-LABEL: test_sveor_s16_z
30 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
31 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
32 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
33 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
34 return SVE_ACLE_FUNC(sveor,_s16,_z,)(pg, op1, op2);
35 }
36
test_sveor_s32_z(svbool_t pg,svint32_t op1,svint32_t op2)37 svint32_t test_sveor_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
38 {
39 // CHECK-LABEL: test_sveor_s32_z
40 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
41 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
42 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
43 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
44 return SVE_ACLE_FUNC(sveor,_s32,_z,)(pg, op1, op2);
45 }
46
test_sveor_s64_z(svbool_t pg,svint64_t op1,svint64_t op2)47 svint64_t test_sveor_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
48 {
49 // CHECK-LABEL: test_sveor_s64_z
50 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
51 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
52 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
53 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
54 return SVE_ACLE_FUNC(sveor,_s64,_z,)(pg, op1, op2);
55 }
56
test_sveor_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2)57 svuint8_t test_sveor_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
58 {
59 // CHECK-LABEL: test_sveor_u8_z
60 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
61 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
62 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
63 return SVE_ACLE_FUNC(sveor,_u8,_z,)(pg, op1, op2);
64 }
65
test_sveor_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2)66 svuint16_t test_sveor_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
67 {
68 // CHECK-LABEL: test_sveor_u16_z
69 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
70 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
71 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
72 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
73 return SVE_ACLE_FUNC(sveor,_u16,_z,)(pg, op1, op2);
74 }
75
test_sveor_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2)76 svuint32_t test_sveor_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
77 {
78 // CHECK-LABEL: test_sveor_u32_z
79 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
80 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
81 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
82 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
83 return SVE_ACLE_FUNC(sveor,_u32,_z,)(pg, op1, op2);
84 }
85
test_sveor_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2)86 svuint64_t test_sveor_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
87 {
88 // CHECK-LABEL: test_sveor_u64_z
89 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
90 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
91 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
92 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
93 return SVE_ACLE_FUNC(sveor,_u64,_z,)(pg, op1, op2);
94 }
95
test_sveor_s8_m(svbool_t pg,svint8_t op1,svint8_t op2)96 svint8_t test_sveor_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
97 {
98 // CHECK-LABEL: test_sveor_s8_m
99 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
100 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
101 return SVE_ACLE_FUNC(sveor,_s8,_m,)(pg, op1, op2);
102 }
103
test_sveor_s16_m(svbool_t pg,svint16_t op1,svint16_t op2)104 svint16_t test_sveor_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
105 {
106 // CHECK-LABEL: test_sveor_s16_m
107 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
108 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
109 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
110 return SVE_ACLE_FUNC(sveor,_s16,_m,)(pg, op1, op2);
111 }
112
test_sveor_s32_m(svbool_t pg,svint32_t op1,svint32_t op2)113 svint32_t test_sveor_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
114 {
115 // CHECK-LABEL: test_sveor_s32_m
116 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
117 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
118 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
119 return SVE_ACLE_FUNC(sveor,_s32,_m,)(pg, op1, op2);
120 }
121
test_sveor_s64_m(svbool_t pg,svint64_t op1,svint64_t op2)122 svint64_t test_sveor_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
123 {
124 // CHECK-LABEL: test_sveor_s64_m
125 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
126 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
127 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
128 return SVE_ACLE_FUNC(sveor,_s64,_m,)(pg, op1, op2);
129 }
130
test_sveor_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2)131 svuint8_t test_sveor_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
132 {
133 // CHECK-LABEL: test_sveor_u8_m
134 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
135 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
136 return SVE_ACLE_FUNC(sveor,_u8,_m,)(pg, op1, op2);
137 }
138
test_sveor_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2)139 svuint16_t test_sveor_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
140 {
141 // CHECK-LABEL: test_sveor_u16_m
142 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
143 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
144 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
145 return SVE_ACLE_FUNC(sveor,_u16,_m,)(pg, op1, op2);
146 }
147
test_sveor_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2)148 svuint32_t test_sveor_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
149 {
150 // CHECK-LABEL: test_sveor_u32_m
151 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
152 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
153 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
154 return SVE_ACLE_FUNC(sveor,_u32,_m,)(pg, op1, op2);
155 }
156
test_sveor_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2)157 svuint64_t test_sveor_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
158 {
159 // CHECK-LABEL: test_sveor_u64_m
160 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
161 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
162 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
163 return SVE_ACLE_FUNC(sveor,_u64,_m,)(pg, op1, op2);
164 }
165
test_sveor_s8_x(svbool_t pg,svint8_t op1,svint8_t op2)166 svint8_t test_sveor_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
167 {
168 // CHECK-LABEL: test_sveor_s8_x
169 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
170 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
171 return SVE_ACLE_FUNC(sveor,_s8,_x,)(pg, op1, op2);
172 }
173
test_sveor_s16_x(svbool_t pg,svint16_t op1,svint16_t op2)174 svint16_t test_sveor_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
175 {
176 // CHECK-LABEL: test_sveor_s16_x
177 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
178 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
179 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
180 return SVE_ACLE_FUNC(sveor,_s16,_x,)(pg, op1, op2);
181 }
182
test_sveor_s32_x(svbool_t pg,svint32_t op1,svint32_t op2)183 svint32_t test_sveor_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
184 {
185 // CHECK-LABEL: test_sveor_s32_x
186 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
187 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
188 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
189 return SVE_ACLE_FUNC(sveor,_s32,_x,)(pg, op1, op2);
190 }
191
test_sveor_s64_x(svbool_t pg,svint64_t op1,svint64_t op2)192 svint64_t test_sveor_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
193 {
194 // CHECK-LABEL: test_sveor_s64_x
195 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
196 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
197 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
198 return SVE_ACLE_FUNC(sveor,_s64,_x,)(pg, op1, op2);
199 }
200
test_sveor_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2)201 svuint8_t test_sveor_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
202 {
203 // CHECK-LABEL: test_sveor_u8_x
204 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
205 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
206 return SVE_ACLE_FUNC(sveor,_u8,_x,)(pg, op1, op2);
207 }
208
test_sveor_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2)209 svuint16_t test_sveor_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
210 {
211 // CHECK-LABEL: test_sveor_u16_x
212 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
213 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
214 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
215 return SVE_ACLE_FUNC(sveor,_u16,_x,)(pg, op1, op2);
216 }
217
test_sveor_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2)218 svuint32_t test_sveor_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
219 {
220 // CHECK-LABEL: test_sveor_u32_x
221 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
222 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
223 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
224 return SVE_ACLE_FUNC(sveor,_u32,_x,)(pg, op1, op2);
225 }
226
test_sveor_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2)227 svuint64_t test_sveor_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
228 {
229 // CHECK-LABEL: test_sveor_u64_x
230 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
231 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
232 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
233 return SVE_ACLE_FUNC(sveor,_u64,_x,)(pg, op1, op2);
234 }
235
test_sveor_n_s8_z(svbool_t pg,svint8_t op1,int8_t op2)236 svint8_t test_sveor_n_s8_z(svbool_t pg, svint8_t op1, int8_t op2)
237 {
238 // CHECK-LABEL: test_sveor_n_s8_z
239 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
240 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
241 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
242 return SVE_ACLE_FUNC(sveor,_n_s8,_z,)(pg, op1, op2);
243 }
244
test_sveor_n_s16_z(svbool_t pg,svint16_t op1,int16_t op2)245 svint16_t test_sveor_n_s16_z(svbool_t pg, svint16_t op1, int16_t op2)
246 {
247 // CHECK-LABEL: test_sveor_n_s16_z
248 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
249 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
250 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
251 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
252 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
253 return SVE_ACLE_FUNC(sveor,_n_s16,_z,)(pg, op1, op2);
254 }
255
test_sveor_n_s32_z(svbool_t pg,svint32_t op1,int32_t op2)256 svint32_t test_sveor_n_s32_z(svbool_t pg, svint32_t op1, int32_t op2)
257 {
258 // CHECK-LABEL: test_sveor_n_s32_z
259 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
260 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
261 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
262 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
263 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
264 return SVE_ACLE_FUNC(sveor,_n_s32,_z,)(pg, op1, op2);
265 }
266
test_sveor_n_s64_z(svbool_t pg,svint64_t op1,int64_t op2)267 svint64_t test_sveor_n_s64_z(svbool_t pg, svint64_t op1, int64_t op2)
268 {
269 // CHECK-LABEL: test_sveor_n_s64_z
270 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
271 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
272 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
273 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
274 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
275 return SVE_ACLE_FUNC(sveor,_n_s64,_z,)(pg, op1, op2);
276 }
277
test_sveor_n_u8_z(svbool_t pg,svuint8_t op1,uint8_t op2)278 svuint8_t test_sveor_n_u8_z(svbool_t pg, svuint8_t op1, uint8_t op2)
279 {
280 // CHECK-LABEL: test_sveor_n_u8_z
281 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
282 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
283 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
284 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
285 return SVE_ACLE_FUNC(sveor,_n_u8,_z,)(pg, op1, op2);
286 }
287
test_sveor_n_u16_z(svbool_t pg,svuint16_t op1,uint16_t op2)288 svuint16_t test_sveor_n_u16_z(svbool_t pg, svuint16_t op1, uint16_t op2)
289 {
290 // CHECK-LABEL: test_sveor_n_u16_z
291 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
292 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
293 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
294 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
295 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
296 return SVE_ACLE_FUNC(sveor,_n_u16,_z,)(pg, op1, op2);
297 }
298
test_sveor_n_u32_z(svbool_t pg,svuint32_t op1,uint32_t op2)299 svuint32_t test_sveor_n_u32_z(svbool_t pg, svuint32_t op1, uint32_t op2)
300 {
301 // CHECK-LABEL: test_sveor_n_u32_z
302 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
303 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
304 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
305 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
306 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
307 return SVE_ACLE_FUNC(sveor,_n_u32,_z,)(pg, op1, op2);
308 }
309
test_sveor_n_u64_z(svbool_t pg,svuint64_t op1,uint64_t op2)310 svuint64_t test_sveor_n_u64_z(svbool_t pg, svuint64_t op1, uint64_t op2)
311 {
312 // CHECK-LABEL: test_sveor_n_u64_z
313 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
314 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
315 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
316 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
317 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
318 return SVE_ACLE_FUNC(sveor,_n_u64,_z,)(pg, op1, op2);
319 }
320
test_sveor_n_s8_m(svbool_t pg,svint8_t op1,int8_t op2)321 svint8_t test_sveor_n_s8_m(svbool_t pg, svint8_t op1, int8_t op2)
322 {
323 // CHECK-LABEL: test_sveor_n_s8_m
324 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
325 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
326 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
327 return SVE_ACLE_FUNC(sveor,_n_s8,_m,)(pg, op1, op2);
328 }
329
test_sveor_n_s16_m(svbool_t pg,svint16_t op1,int16_t op2)330 svint16_t test_sveor_n_s16_m(svbool_t pg, svint16_t op1, int16_t op2)
331 {
332 // CHECK-LABEL: test_sveor_n_s16_m
333 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
334 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
335 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
336 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
337 return SVE_ACLE_FUNC(sveor,_n_s16,_m,)(pg, op1, op2);
338 }
339
test_sveor_n_s32_m(svbool_t pg,svint32_t op1,int32_t op2)340 svint32_t test_sveor_n_s32_m(svbool_t pg, svint32_t op1, int32_t op2)
341 {
342 // CHECK-LABEL: test_sveor_n_s32_m
343 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
344 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
345 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
346 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
347 return SVE_ACLE_FUNC(sveor,_n_s32,_m,)(pg, op1, op2);
348 }
349
test_sveor_n_s64_m(svbool_t pg,svint64_t op1,int64_t op2)350 svint64_t test_sveor_n_s64_m(svbool_t pg, svint64_t op1, int64_t op2)
351 {
352 // CHECK-LABEL: test_sveor_n_s64_m
353 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
354 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
355 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
356 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
357 return SVE_ACLE_FUNC(sveor,_n_s64,_m,)(pg, op1, op2);
358 }
359
test_sveor_n_u8_m(svbool_t pg,svuint8_t op1,uint8_t op2)360 svuint8_t test_sveor_n_u8_m(svbool_t pg, svuint8_t op1, uint8_t op2)
361 {
362 // CHECK-LABEL: test_sveor_n_u8_m
363 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
364 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
365 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
366 return SVE_ACLE_FUNC(sveor,_n_u8,_m,)(pg, op1, op2);
367 }
368
test_sveor_n_u16_m(svbool_t pg,svuint16_t op1,uint16_t op2)369 svuint16_t test_sveor_n_u16_m(svbool_t pg, svuint16_t op1, uint16_t op2)
370 {
371 // CHECK-LABEL: test_sveor_n_u16_m
372 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
373 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
374 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
375 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
376 return SVE_ACLE_FUNC(sveor,_n_u16,_m,)(pg, op1, op2);
377 }
378
test_sveor_n_u32_m(svbool_t pg,svuint32_t op1,uint32_t op2)379 svuint32_t test_sveor_n_u32_m(svbool_t pg, svuint32_t op1, uint32_t op2)
380 {
381 // CHECK-LABEL: test_sveor_n_u32_m
382 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
383 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
384 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
385 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
386 return SVE_ACLE_FUNC(sveor,_n_u32,_m,)(pg, op1, op2);
387 }
388
test_sveor_n_u64_m(svbool_t pg,svuint64_t op1,uint64_t op2)389 svuint64_t test_sveor_n_u64_m(svbool_t pg, svuint64_t op1, uint64_t op2)
390 {
391 // CHECK-LABEL: test_sveor_n_u64_m
392 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
393 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
394 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
395 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
396 return SVE_ACLE_FUNC(sveor,_n_u64,_m,)(pg, op1, op2);
397 }
398
test_sveor_n_s8_x(svbool_t pg,svint8_t op1,int8_t op2)399 svint8_t test_sveor_n_s8_x(svbool_t pg, svint8_t op1, int8_t op2)
400 {
401 // CHECK-LABEL: test_sveor_n_s8_x
402 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
403 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
404 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
405 return SVE_ACLE_FUNC(sveor,_n_s8,_x,)(pg, op1, op2);
406 }
407
test_sveor_n_s16_x(svbool_t pg,svint16_t op1,int16_t op2)408 svint16_t test_sveor_n_s16_x(svbool_t pg, svint16_t op1, int16_t op2)
409 {
410 // CHECK-LABEL: test_sveor_n_s16_x
411 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
412 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
413 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
414 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
415 return SVE_ACLE_FUNC(sveor,_n_s16,_x,)(pg, op1, op2);
416 }
417
test_sveor_n_s32_x(svbool_t pg,svint32_t op1,int32_t op2)418 svint32_t test_sveor_n_s32_x(svbool_t pg, svint32_t op1, int32_t op2)
419 {
420 // CHECK-LABEL: test_sveor_n_s32_x
421 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
422 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
423 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
424 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
425 return SVE_ACLE_FUNC(sveor,_n_s32,_x,)(pg, op1, op2);
426 }
427
test_sveor_n_s64_x(svbool_t pg,svint64_t op1,int64_t op2)428 svint64_t test_sveor_n_s64_x(svbool_t pg, svint64_t op1, int64_t op2)
429 {
430 // CHECK-LABEL: test_sveor_n_s64_x
431 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
432 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
433 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
434 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
435 return SVE_ACLE_FUNC(sveor,_n_s64,_x,)(pg, op1, op2);
436 }
437
test_sveor_n_u8_x(svbool_t pg,svuint8_t op1,uint8_t op2)438 svuint8_t test_sveor_n_u8_x(svbool_t pg, svuint8_t op1, uint8_t op2)
439 {
440 // CHECK-LABEL: test_sveor_n_u8_x
441 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
442 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
443 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
444 return SVE_ACLE_FUNC(sveor,_n_u8,_x,)(pg, op1, op2);
445 }
446
test_sveor_n_u16_x(svbool_t pg,svuint16_t op1,uint16_t op2)447 svuint16_t test_sveor_n_u16_x(svbool_t pg, svuint16_t op1, uint16_t op2)
448 {
449 // CHECK-LABEL: test_sveor_n_u16_x
450 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
451 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
452 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
453 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
454 return SVE_ACLE_FUNC(sveor,_n_u16,_x,)(pg, op1, op2);
455 }
456
test_sveor_n_u32_x(svbool_t pg,svuint32_t op1,uint32_t op2)457 svuint32_t test_sveor_n_u32_x(svbool_t pg, svuint32_t op1, uint32_t op2)
458 {
459 // CHECK-LABEL: test_sveor_n_u32_x
460 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
461 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
462 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
463 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
464 return SVE_ACLE_FUNC(sveor,_n_u32,_x,)(pg, op1, op2);
465 }
466
test_sveor_n_u64_x(svbool_t pg,svuint64_t op1,uint64_t op2)467 svuint64_t test_sveor_n_u64_x(svbool_t pg, svuint64_t op1, uint64_t op2)
468 {
469 // CHECK-LABEL: test_sveor_n_u64_x
470 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
471 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
472 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
473 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
474 return SVE_ACLE_FUNC(sveor,_n_u64,_x,)(pg, op1, op2);
475 }
476
test_sveor_b_z(svbool_t pg,svbool_t op1,svbool_t op2)477 svbool_t test_sveor_b_z(svbool_t pg, svbool_t op1, svbool_t op2)
478 {
479 // CHECK-LABEL: test_sveor_b_z
480 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
481 // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
482 return SVE_ACLE_FUNC(sveor,_b,_z,)(pg, op1, op2);
483 }
484