• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
2 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s
4 
5 #include <arm_sve.h>
6 
7 #ifdef SVE_OVERLOADED_FORMS
8 // A simple used,unused... macro, long enough to represent any SVE builtin.
9 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
10 #else
11 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
12 #endif
13 
test_svcmla_s8(svint8_t op1,svint8_t op2,svint8_t op3)14 svint8_t test_svcmla_s8(svint8_t op1, svint8_t op2, svint8_t op3)
15 {
16   // CHECK-LABEL: test_svcmla_s8
17   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 0)
18   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
19   // expected-warning@+1 {{implicit declaration of function 'svcmla_s8'}}
20   return SVE_ACLE_FUNC(svcmla,_s8,,)(op1, op2, op3, 0);
21 }
22 
test_svcmla_s8_1(svint8_t op1,svint8_t op2,svint8_t op3)23 svint8_t test_svcmla_s8_1(svint8_t op1, svint8_t op2, svint8_t op3)
24 {
25   // CHECK-LABEL: test_svcmla_s8_1
26   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 90)
27   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
28   // expected-warning@+1 {{implicit declaration of function 'svcmla_s8'}}
29   return SVE_ACLE_FUNC(svcmla,_s8,,)(op1, op2, op3, 90);
30 }
31 
test_svcmla_s8_2(svint8_t op1,svint8_t op2,svint8_t op3)32 svint8_t test_svcmla_s8_2(svint8_t op1, svint8_t op2, svint8_t op3)
33 {
34   // CHECK-LABEL: test_svcmla_s8_2
35   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 180)
36   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
37   // expected-warning@+1 {{implicit declaration of function 'svcmla_s8'}}
38   return SVE_ACLE_FUNC(svcmla,_s8,,)(op1, op2, op3, 180);
39 }
40 
test_svcmla_s8_3(svint8_t op1,svint8_t op2,svint8_t op3)41 svint8_t test_svcmla_s8_3(svint8_t op1, svint8_t op2, svint8_t op3)
42 {
43   // CHECK-LABEL: test_svcmla_s8_3
44   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 270)
45   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
46   // expected-warning@+1 {{implicit declaration of function 'svcmla_s8'}}
47   return SVE_ACLE_FUNC(svcmla,_s8,,)(op1, op2, op3, 270);
48 }
49 
test_svcmla_s16(svint16_t op1,svint16_t op2,svint16_t op3)50 svint16_t test_svcmla_s16(svint16_t op1, svint16_t op2, svint16_t op3)
51 {
52   // CHECK-LABEL: test_svcmla_s16
53   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
54   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
55   // expected-warning@+1 {{implicit declaration of function 'svcmla_s16'}}
56   return SVE_ACLE_FUNC(svcmla,_s16,,)(op1, op2, op3, 0);
57 }
58 
test_svcmla_s16_1(svint16_t op1,svint16_t op2,svint16_t op3)59 svint16_t test_svcmla_s16_1(svint16_t op1, svint16_t op2, svint16_t op3)
60 {
61   // CHECK-LABEL: test_svcmla_s16_1
62   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 90)
63   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
64   // expected-warning@+1 {{implicit declaration of function 'svcmla_s16'}}
65   return SVE_ACLE_FUNC(svcmla,_s16,,)(op1, op2, op3, 90);
66 }
67 
test_svcmla_s16_2(svint16_t op1,svint16_t op2,svint16_t op3)68 svint16_t test_svcmla_s16_2(svint16_t op1, svint16_t op2, svint16_t op3)
69 {
70   // CHECK-LABEL: test_svcmla_s16_2
71   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 180)
72   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
73   // expected-warning@+1 {{implicit declaration of function 'svcmla_s16'}}
74   return SVE_ACLE_FUNC(svcmla,_s16,,)(op1, op2, op3, 180);
75 }
76 
test_svcmla_s16_3(svint16_t op1,svint16_t op2,svint16_t op3)77 svint16_t test_svcmla_s16_3(svint16_t op1, svint16_t op2, svint16_t op3)
78 {
79   // CHECK-LABEL: test_svcmla_s16_3
80   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 270)
81   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
82   // expected-warning@+1 {{implicit declaration of function 'svcmla_s16'}}
83   return SVE_ACLE_FUNC(svcmla,_s16,,)(op1, op2, op3, 270);
84 }
85 
test_svcmla_s32(svint32_t op1,svint32_t op2,svint32_t op3)86 svint32_t test_svcmla_s32(svint32_t op1, svint32_t op2, svint32_t op3)
87 {
88   // CHECK-LABEL: test_svcmla_s32
89   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
90   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
91   // expected-warning@+1 {{implicit declaration of function 'svcmla_s32'}}
92   return SVE_ACLE_FUNC(svcmla,_s32,,)(op1, op2, op3, 0);
93 }
94 
test_svcmla_s32_1(svint32_t op1,svint32_t op2,svint32_t op3)95 svint32_t test_svcmla_s32_1(svint32_t op1, svint32_t op2, svint32_t op3)
96 {
97   // CHECK-LABEL: test_svcmla_s32_1
98   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 90)
99   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
100   // expected-warning@+1 {{implicit declaration of function 'svcmla_s32'}}
101   return SVE_ACLE_FUNC(svcmla,_s32,,)(op1, op2, op3, 90);
102 }
103 
test_svcmla_s32_2(svint32_t op1,svint32_t op2,svint32_t op3)104 svint32_t test_svcmla_s32_2(svint32_t op1, svint32_t op2, svint32_t op3)
105 {
106   // CHECK-LABEL: test_svcmla_s32_2
107   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 180)
108   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
109   // expected-warning@+1 {{implicit declaration of function 'svcmla_s32'}}
110   return SVE_ACLE_FUNC(svcmla,_s32,,)(op1, op2, op3, 180);
111 }
112 
test_svcmla_s32_3(svint32_t op1,svint32_t op2,svint32_t op3)113 svint32_t test_svcmla_s32_3(svint32_t op1, svint32_t op2, svint32_t op3)
114 {
115   // CHECK-LABEL: test_svcmla_s32_3
116   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 270)
117   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
118   // expected-warning@+1 {{implicit declaration of function 'svcmla_s32'}}
119   return SVE_ACLE_FUNC(svcmla,_s32,,)(op1, op2, op3, 270);
120 }
121 
test_svcmla_s64(svint64_t op1,svint64_t op2,svint64_t op3)122 svint64_t test_svcmla_s64(svint64_t op1, svint64_t op2, svint64_t op3)
123 {
124   // CHECK-LABEL: test_svcmla_s64
125   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0)
126   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
127   // expected-warning@+1 {{implicit declaration of function 'svcmla_s64'}}
128   return SVE_ACLE_FUNC(svcmla,_s64,,)(op1, op2, op3, 0);
129 }
130 
test_svcmla_s64_1(svint64_t op1,svint64_t op2,svint64_t op3)131 svint64_t test_svcmla_s64_1(svint64_t op1, svint64_t op2, svint64_t op3)
132 {
133   // CHECK-LABEL: test_svcmla_s64_1
134   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 90)
135   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
136   // expected-warning@+1 {{implicit declaration of function 'svcmla_s64'}}
137   return SVE_ACLE_FUNC(svcmla,_s64,,)(op1, op2, op3, 90);
138 }
139 
test_svcmla_s64_2(svint64_t op1,svint64_t op2,svint64_t op3)140 svint64_t test_svcmla_s64_2(svint64_t op1, svint64_t op2, svint64_t op3)
141 {
142   // CHECK-LABEL: test_svcmla_s64_2
143   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 180)
144   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
145   // expected-warning@+1 {{implicit declaration of function 'svcmla_s64'}}
146   return SVE_ACLE_FUNC(svcmla,_s64,,)(op1, op2, op3, 180);
147 }
148 
test_svcmla_s64_3(svint64_t op1,svint64_t op2,svint64_t op3)149 svint64_t test_svcmla_s64_3(svint64_t op1, svint64_t op2, svint64_t op3)
150 {
151   // CHECK-LABEL: test_svcmla_s64_3
152   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 270)
153   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
154   // expected-warning@+1 {{implicit declaration of function 'svcmla_s64'}}
155   return SVE_ACLE_FUNC(svcmla,_s64,,)(op1, op2, op3, 270);
156 }
157 
test_svcmla_u8(svuint8_t op1,svuint8_t op2,svuint8_t op3)158 svuint8_t test_svcmla_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3)
159 {
160   // CHECK-LABEL: test_svcmla_u8
161   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 0)
162   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
163   // expected-warning@+1 {{implicit declaration of function 'svcmla_u8'}}
164   return SVE_ACLE_FUNC(svcmla,_u8,,)(op1, op2, op3, 0);
165 }
166 
test_svcmla_u8_1(svuint8_t op1,svuint8_t op2,svuint8_t op3)167 svuint8_t test_svcmla_u8_1(svuint8_t op1, svuint8_t op2, svuint8_t op3)
168 {
169   // CHECK-LABEL: test_svcmla_u8_1
170   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 90)
171   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
172   // expected-warning@+1 {{implicit declaration of function 'svcmla_u8'}}
173   return SVE_ACLE_FUNC(svcmla,_u8,,)(op1, op2, op3, 90);
174 }
175 
test_svcmla_u8_2(svuint8_t op1,svuint8_t op2,svuint8_t op3)176 svuint8_t test_svcmla_u8_2(svuint8_t op1, svuint8_t op2, svuint8_t op3)
177 {
178   // CHECK-LABEL: test_svcmla_u8_2
179   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 180)
180   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
181   // expected-warning@+1 {{implicit declaration of function 'svcmla_u8'}}
182   return SVE_ACLE_FUNC(svcmla,_u8,,)(op1, op2, op3, 180);
183 }
184 
test_svcmla_u8_3(svuint8_t op1,svuint8_t op2,svuint8_t op3)185 svuint8_t test_svcmla_u8_3(svuint8_t op1, svuint8_t op2, svuint8_t op3)
186 {
187   // CHECK-LABEL: test_svcmla_u8_3
188   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cmla.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 270)
189   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
190   // expected-warning@+1 {{implicit declaration of function 'svcmla_u8'}}
191   return SVE_ACLE_FUNC(svcmla,_u8,,)(op1, op2, op3, 270);
192 }
193 
test_svcmla_u16(svuint16_t op1,svuint16_t op2,svuint16_t op3)194 svuint16_t test_svcmla_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
195 {
196   // CHECK-LABEL: test_svcmla_u16
197   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
198   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
199   // expected-warning@+1 {{implicit declaration of function 'svcmla_u16'}}
200   return SVE_ACLE_FUNC(svcmla,_u16,,)(op1, op2, op3, 0);
201 }
202 
test_svcmla_u16_1(svuint16_t op1,svuint16_t op2,svuint16_t op3)203 svuint16_t test_svcmla_u16_1(svuint16_t op1, svuint16_t op2, svuint16_t op3)
204 {
205   // CHECK-LABEL: test_svcmla_u16_1
206   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 90)
207   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
208   // expected-warning@+1 {{implicit declaration of function 'svcmla_u16'}}
209   return SVE_ACLE_FUNC(svcmla,_u16,,)(op1, op2, op3, 90);
210 }
211 
test_svcmla_u16_2(svuint16_t op1,svuint16_t op2,svuint16_t op3)212 svuint16_t test_svcmla_u16_2(svuint16_t op1, svuint16_t op2, svuint16_t op3)
213 {
214   // CHECK-LABEL: test_svcmla_u16_2
215   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 180)
216   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
217   // expected-warning@+1 {{implicit declaration of function 'svcmla_u16'}}
218   return SVE_ACLE_FUNC(svcmla,_u16,,)(op1, op2, op3, 180);
219 }
220 
test_svcmla_u16_3(svuint16_t op1,svuint16_t op2,svuint16_t op3)221 svuint16_t test_svcmla_u16_3(svuint16_t op1, svuint16_t op2, svuint16_t op3)
222 {
223   // CHECK-LABEL: test_svcmla_u16_3
224   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 270)
225   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
226   // expected-warning@+1 {{implicit declaration of function 'svcmla_u16'}}
227   return SVE_ACLE_FUNC(svcmla,_u16,,)(op1, op2, op3, 270);
228 }
229 
test_svcmla_u32(svuint32_t op1,svuint32_t op2,svuint32_t op3)230 svuint32_t test_svcmla_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
231 {
232   // CHECK-LABEL: test_svcmla_u32
233   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
234   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
235   // expected-warning@+1 {{implicit declaration of function 'svcmla_u32'}}
236   return SVE_ACLE_FUNC(svcmla,_u32,,)(op1, op2, op3, 0);
237 }
238 
test_svcmla_u32_1(svuint32_t op1,svuint32_t op2,svuint32_t op3)239 svuint32_t test_svcmla_u32_1(svuint32_t op1, svuint32_t op2, svuint32_t op3)
240 {
241   // CHECK-LABEL: test_svcmla_u32_1
242   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 90)
243   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
244   // expected-warning@+1 {{implicit declaration of function 'svcmla_u32'}}
245   return SVE_ACLE_FUNC(svcmla,_u32,,)(op1, op2, op3, 90);
246 }
247 
test_svcmla_u32_2(svuint32_t op1,svuint32_t op2,svuint32_t op3)248 svuint32_t test_svcmla_u32_2(svuint32_t op1, svuint32_t op2, svuint32_t op3)
249 {
250   // CHECK-LABEL: test_svcmla_u32_2
251   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 180)
252   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
253   // expected-warning@+1 {{implicit declaration of function 'svcmla_u32'}}
254   return SVE_ACLE_FUNC(svcmla,_u32,,)(op1, op2, op3, 180);
255 }
256 
test_svcmla_u32_3(svuint32_t op1,svuint32_t op2,svuint32_t op3)257 svuint32_t test_svcmla_u32_3(svuint32_t op1, svuint32_t op2, svuint32_t op3)
258 {
259   // CHECK-LABEL: test_svcmla_u32_3
260   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 270)
261   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
262   // expected-warning@+1 {{implicit declaration of function 'svcmla_u32'}}
263   return SVE_ACLE_FUNC(svcmla,_u32,,)(op1, op2, op3, 270);
264 }
265 
test_svcmla_u64(svuint64_t op1,svuint64_t op2,svuint64_t op3)266 svuint64_t test_svcmla_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
267 {
268   // CHECK-LABEL: test_svcmla_u64
269   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0)
270   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
271   // expected-warning@+1 {{implicit declaration of function 'svcmla_u64'}}
272   return SVE_ACLE_FUNC(svcmla,_u64,,)(op1, op2, op3, 0);
273 }
274 
test_svcmla_u64_1(svuint64_t op1,svuint64_t op2,svuint64_t op3)275 svuint64_t test_svcmla_u64_1(svuint64_t op1, svuint64_t op2, svuint64_t op3)
276 {
277   // CHECK-LABEL: test_svcmla_u64_1
278   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 90)
279   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
280   // expected-warning@+1 {{implicit declaration of function 'svcmla_u64'}}
281   return SVE_ACLE_FUNC(svcmla,_u64,,)(op1, op2, op3, 90);
282 }
283 
test_svcmla_u64_2(svuint64_t op1,svuint64_t op2,svuint64_t op3)284 svuint64_t test_svcmla_u64_2(svuint64_t op1, svuint64_t op2, svuint64_t op3)
285 {
286   // CHECK-LABEL: test_svcmla_u64_2
287   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 180)
288   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
289   // expected-warning@+1 {{implicit declaration of function 'svcmla_u64'}}
290   return SVE_ACLE_FUNC(svcmla,_u64,,)(op1, op2, op3, 180);
291 }
292 
test_svcmla_u64_3(svuint64_t op1,svuint64_t op2,svuint64_t op3)293 svuint64_t test_svcmla_u64_3(svuint64_t op1, svuint64_t op2, svuint64_t op3)
294 {
295   // CHECK-LABEL: test_svcmla_u64_3
296   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cmla.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 270)
297   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
298   // expected-warning@+1 {{implicit declaration of function 'svcmla_u64'}}
299   return SVE_ACLE_FUNC(svcmla,_u64,,)(op1, op2, op3, 270);
300 }
301 
test_svcmla_lane_s16(svint16_t op1,svint16_t op2,svint16_t op3)302 svint16_t test_svcmla_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3)
303 {
304   // CHECK-LABEL: test_svcmla_lane_s16
305   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.lane.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0, i32 90)
306   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
307   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_s16'}}
308   return SVE_ACLE_FUNC(svcmla_lane,_s16,,)(op1, op2, op3, 0, 90);
309 }
310 
test_svcmla_lane_s16_1(svint16_t op1,svint16_t op2,svint16_t op3)311 svint16_t test_svcmla_lane_s16_1(svint16_t op1, svint16_t op2, svint16_t op3)
312 {
313   // CHECK-LABEL: test_svcmla_lane_s16_1
314   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.lane.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 3, i32 180)
315   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
316   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_s16'}}
317   return SVE_ACLE_FUNC(svcmla_lane,_s16,,)(op1, op2, op3, 3, 180);
318 }
319 
test_svcmla_lane_s32(svint32_t op1,svint32_t op2,svint32_t op3)320 svint32_t test_svcmla_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3)
321 {
322   // CHECK-LABEL: test_svcmla_lane_s32
323   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.lane.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0, i32 270)
324   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
325   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_s32'}}
326   return SVE_ACLE_FUNC(svcmla_lane,_s32,,)(op1, op2, op3, 0, 270);
327 }
328 
test_svcmla_lane_s32_1(svint32_t op1,svint32_t op2,svint32_t op3)329 svint32_t test_svcmla_lane_s32_1(svint32_t op1, svint32_t op2, svint32_t op3)
330 {
331   // CHECK-LABEL: test_svcmla_lane_s32_1
332   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.lane.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 1, i32 0)
333   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
334   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_s32'}}
335   return SVE_ACLE_FUNC(svcmla_lane,_s32,,)(op1, op2, op3, 1, 0);
336 }
337 
test_svcmla_lane_u16(svuint16_t op1,svuint16_t op2,svuint16_t op3)338 svuint16_t test_svcmla_lane_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
339 {
340   // CHECK-LABEL: test_svcmla_lane_u16
341   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.lane.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0, i32 90)
342   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
343   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_u16'}}
344   return SVE_ACLE_FUNC(svcmla_lane,_u16,,)(op1, op2, op3, 0, 90);
345 }
346 
test_svcmla_lane_u16_1(svuint16_t op1,svuint16_t op2,svuint16_t op3)347 svuint16_t test_svcmla_lane_u16_1(svuint16_t op1, svuint16_t op2, svuint16_t op3)
348 {
349   // CHECK-LABEL: test_svcmla_lane_u16_1
350   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cmla.lane.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 3, i32 180)
351   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
352   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_u16'}}
353   return SVE_ACLE_FUNC(svcmla_lane,_u16,,)(op1, op2, op3, 3, 180);
354 }
355 
test_svcmla_lane_u32(svuint32_t op1,svuint32_t op2,svuint32_t op3)356 svuint32_t test_svcmla_lane_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
357 {
358   // CHECK-LABEL: test_svcmla_lane_u32
359   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.lane.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0, i32 270)
360   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
361   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_u32'}}
362   return SVE_ACLE_FUNC(svcmla_lane,_u32,,)(op1, op2, op3, 0, 270);
363 }
364 
test_svcmla_lane_u32_1(svuint32_t op1,svuint32_t op2,svuint32_t op3)365 svuint32_t test_svcmla_lane_u32_1(svuint32_t op1, svuint32_t op2, svuint32_t op3)
366 {
367   // CHECK-LABEL: test_svcmla_lane_u32_1
368   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cmla.lane.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 1, i32 0)
369   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
370   // expected-warning@+1 {{implicit declaration of function 'svcmla_lane_u32'}}
371   return SVE_ACLE_FUNC(svcmla_lane,_u32,,)(op1, op2, op3, 1, 0);
372 }
373