• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6 
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10 
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17 
test_svld1uh_s32(svbool_t pg,const uint16_t * base)18 svint32_t test_svld1uh_s32(svbool_t pg, const uint16_t *base)
19 {
20   // CHECK-LABEL: test_svld1uh_s32
21   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
22   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base)
23   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
24   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
25   return svld1uh_s32(pg, base);
26 }
27 
test_svld1uh_s64(svbool_t pg,const uint16_t * base)28 svint64_t test_svld1uh_s64(svbool_t pg, const uint16_t *base)
29 {
30   // CHECK-LABEL: test_svld1uh_s64
31   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
32   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base)
33   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
34   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
35   return svld1uh_s64(pg, base);
36 }
37 
test_svld1uh_u32(svbool_t pg,const uint16_t * base)38 svuint32_t test_svld1uh_u32(svbool_t pg, const uint16_t *base)
39 {
40   // CHECK-LABEL: test_svld1uh_u32
41   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
42   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base)
43   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
44   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
45   return svld1uh_u32(pg, base);
46 }
47 
test_svld1uh_u64(svbool_t pg,const uint16_t * base)48 svuint64_t test_svld1uh_u64(svbool_t pg, const uint16_t *base)
49 {
50   // CHECK-LABEL: test_svld1uh_u64
51   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
52   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base)
53   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
54   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
55   return svld1uh_u64(pg, base);
56 }
57 
test_svld1uh_vnum_s32(svbool_t pg,const uint16_t * base,int64_t vnum)58 svint32_t test_svld1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum)
59 {
60   // CHECK-LABEL: test_svld1uh_vnum_s32
61   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
62   // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>*
63   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BASE]], i64 %vnum, i64 0
64   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]])
65   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
66   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
67   return svld1uh_vnum_s32(pg, base, vnum);
68 }
69 
test_svld1uh_vnum_s64(svbool_t pg,const uint16_t * base,int64_t vnum)70 svint64_t test_svld1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum)
71 {
72   // CHECK-LABEL: test_svld1uh_vnum_s64
73   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
74   // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>*
75   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BASE]], i64 %vnum, i64 0
76   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]])
77   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
78   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
79   return svld1uh_vnum_s64(pg, base, vnum);
80 }
81 
test_svld1uh_vnum_u32(svbool_t pg,const uint16_t * base,int64_t vnum)82 svuint32_t test_svld1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum)
83 {
84   // CHECK-LABEL: test_svld1uh_vnum_u32
85   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
86   // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>*
87   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BASE]], i64 %vnum, i64 0
88   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]])
89   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
90   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
91   return svld1uh_vnum_u32(pg, base, vnum);
92 }
93 
test_svld1uh_vnum_u64(svbool_t pg,const uint16_t * base,int64_t vnum)94 svuint64_t test_svld1uh_vnum_u64(svbool_t pg, const uint16_t *base, int64_t vnum)
95 {
96   // CHECK-LABEL: test_svld1uh_vnum_u64
97   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
98   // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>*
99   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BASE]], i64 %vnum, i64 0
100   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]])
101   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
102   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
103   return svld1uh_vnum_u64(pg, base, vnum);
104 }
105 
test_svld1uh_gather_u32base_s32(svbool_t pg,svuint32_t bases)106 svint32_t test_svld1uh_gather_u32base_s32(svbool_t pg, svuint32_t bases) {
107   // CHECK-LABEL: test_svld1uh_gather_u32base_s32
108   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
109   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 0)
110   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
111   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
112   return SVE_ACLE_FUNC(svld1uh_gather, _u32base, _s32, )(pg, bases);
113 }
114 
test_svld1uh_gather_u64base_s64(svbool_t pg,svuint64_t bases)115 svint64_t test_svld1uh_gather_u64base_s64(svbool_t pg, svuint64_t bases) {
116   // CHECK-LABEL: test_svld1uh_gather_u64base_s64
117   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
118   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 0)
119   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
120   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
121   return SVE_ACLE_FUNC(svld1uh_gather, _u64base, _s64, )(pg, bases);
122 }
123 
test_svld1uh_gather_u32base_u32(svbool_t pg,svuint32_t bases)124 svuint32_t test_svld1uh_gather_u32base_u32(svbool_t pg, svuint32_t bases) {
125   // CHECK-LABEL: test_svld1uh_gather_u32base_u32
126   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
127   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 0)
128   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
129   // CHECK: et <vscale x 4 x i32> %[[ZEXT]]
130   return SVE_ACLE_FUNC(svld1uh_gather, _u32base, _u32, )(pg, bases);
131 }
132 
test_svld1uh_gather_u64base_u64(svbool_t pg,svuint64_t bases)133 svuint64_t test_svld1uh_gather_u64base_u64(svbool_t pg, svuint64_t bases) {
134   // CHECK-LABEL: test_svld1uh_gather_u64base_u64
135   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
136   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 0)
137   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
138   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
139   return SVE_ACLE_FUNC(svld1uh_gather, _u64base, _u64, )(pg, bases);
140 }
141 
test_svld1uh_gather_s32offset_s32(svbool_t pg,const uint16_t * base,svint32_t offsets)142 svint32_t test_svld1uh_gather_s32offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) {
143   // CHECK-LABEL: test_svld1uh_gather_s32offset_s32
144   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
145   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
146   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
147   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
148   return SVE_ACLE_FUNC(svld1uh_gather_, s32, offset_s32, )(pg, base, offsets);
149 }
150 
test_svld1uh_gather_s64offset_s64(svbool_t pg,const uint16_t * base,svint64_t offsets)151 svint64_t test_svld1uh_gather_s64offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) {
152   // CHECK-LABEL: test_svld1uh_gather_s64offset_s64
153   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
154   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
155   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %1 to <vscale x 2 x i64>
156   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
157   return SVE_ACLE_FUNC(svld1uh_gather_, s64, offset_s64, )(pg, base, offsets);
158 }
159 
test_svld1uh_gather_s32offset_u32(svbool_t pg,const uint16_t * base,svint32_t offsets)160 svuint32_t test_svld1uh_gather_s32offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) {
161   // CHECK-LABEL: test_svld1uh_gather_s32offset_u32
162   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
163   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
164   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
165   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
166   return SVE_ACLE_FUNC(svld1uh_gather_, s32, offset_u32, )(pg, base, offsets);
167 }
168 
test_svld1uh_gather_s64offset_u64(svbool_t pg,const uint16_t * base,svint64_t offsets)169 svuint64_t test_svld1uh_gather_s64offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) {
170   // CHECK-LABEL: test_svld1uh_gather_s64offset_u64
171   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
172   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
173   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %1 to <vscale x 2 x i64>
174   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
175   return SVE_ACLE_FUNC(svld1uh_gather_, s64, offset_u64, )(pg, base, offsets);
176 }
177 
test_svld1uh_gather_u32offset_s32(svbool_t pg,const uint16_t * base,svuint32_t offsets)178 svint32_t test_svld1uh_gather_u32offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) {
179   // CHECK-LABEL: test_svld1uh_gather_u32offset_s32
180   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
181   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
182   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
183   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
184   return SVE_ACLE_FUNC(svld1uh_gather_, u32, offset_s32, )(pg, base, offsets);
185 }
186 
test_svld1uh_gather_u64offset_s64(svbool_t pg,const uint16_t * base,svuint64_t offsets)187 svint64_t test_svld1uh_gather_u64offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) {
188   // CHECK-LABEL: test_svld1uh_gather_u64offset_s64
189   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
190   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
191   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
192   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
193   return SVE_ACLE_FUNC(svld1uh_gather_, u64, offset_s64, )(pg, base, offsets);
194 }
195 
test_svld1uh_gather_u32offset_u32(svbool_t pg,const uint16_t * base,svuint32_t offsets)196 svuint32_t test_svld1uh_gather_u32offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) {
197   // CHECK-LABEL: test_svld1uh_gather_u32offset_u32
198   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
199   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
200   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
201   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
202   return SVE_ACLE_FUNC(svld1uh_gather_, u32, offset_u32, )(pg, base, offsets);
203 }
204 
test_svld1uh_gather_u64offset_u64(svbool_t pg,const uint16_t * base,svuint64_t offsets)205 svuint64_t test_svld1uh_gather_u64offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) {
206   // CHECK-LABEL: test_svld1uh_gather_u64offset_u64
207   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
208   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
209   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %1 to <vscale x 2 x i64>
210   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
211   return SVE_ACLE_FUNC(svld1uh_gather_, u64, offset_u64, )(pg, base, offsets);
212 }
213 
test_svld1uh_gather_u32base_offset_s32(svbool_t pg,svuint32_t bases,int64_t offset)214 svint32_t test_svld1uh_gather_u32base_offset_s32(svbool_t pg, svuint32_t bases, int64_t offset) {
215   // CHECK-LABEL: test_svld1uh_gather_u32base_offset_s32
216   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
217   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %offset)
218   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
219   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
220   return SVE_ACLE_FUNC(svld1uh_gather, _u32base, _offset_s32, )(pg, bases, offset);
221 }
222 
test_svld1uh_gather_u64base_offset_s64(svbool_t pg,svuint64_t bases,int64_t offset)223 svint64_t test_svld1uh_gather_u64base_offset_s64(svbool_t pg, svuint64_t bases, int64_t offset) {
224   // CHECK-LABEL: test_svld1uh_gather_u64base_offset_s64
225   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
226   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %offset)
227   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
228   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
229   return SVE_ACLE_FUNC(svld1uh_gather, _u64base, _offset_s64, )(pg, bases, offset);
230 }
231 
test_svld1uh_gather_u32base_offset_u32(svbool_t pg,svuint32_t bases,int64_t offset)232 svuint32_t test_svld1uh_gather_u32base_offset_u32(svbool_t pg, svuint32_t bases, int64_t offset) {
233   // CHECK-LABEL: test_svld1uh_gather_u32base_offset_u32
234   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
235   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %offset)
236   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
237   // CHECK: ret <vscale x 4 x i32> %2
238   return SVE_ACLE_FUNC(svld1uh_gather, _u32base, _offset_u32, )(pg, bases, offset);
239 }
240 
test_svld1uh_gather_u64base_offset_u64(svbool_t pg,svuint64_t bases,int64_t offset)241 svuint64_t test_svld1uh_gather_u64base_offset_u64(svbool_t pg, svuint64_t bases, int64_t offset) {
242   // CHECK-LABEL: test_svld1uh_gather_u64base_offset_u64
243   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
244   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %offset)
245   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
246   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
247   return SVE_ACLE_FUNC(svld1uh_gather, _u64base, _offset_u64, )(pg, bases, offset);
248 }
249 
test_svld1uh_gather_s32index_s32(svbool_t pg,const uint16_t * base,svint32_t indices)250 svint32_t test_svld1uh_gather_s32index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) {
251   // CHECK-LABEL: test_svld1uh_gather_s32index_s32
252   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
253   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
254   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
255   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
256   return SVE_ACLE_FUNC(svld1uh_gather_, s32, index_s32, )(pg, base, indices);
257 }
258 
test_svld1uh_gather_s64index_s64(svbool_t pg,const uint16_t * base,svint64_t indices)259 svint64_t test_svld1uh_gather_s64index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) {
260   // CHECK-LABEL: test_svld1uh_gather_s64index_s64
261   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
262   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
263   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
264   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
265   return SVE_ACLE_FUNC(svld1uh_gather_, s64, index_s64, )(pg, base, indices);
266 }
267 
test_svld1uh_gather_s32index_u32(svbool_t pg,const uint16_t * base,svint32_t indices)268 svuint32_t test_svld1uh_gather_s32index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) {
269   // CHECK-LABEL: test_svld1uh_gather_s32index_u32
270   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
271   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
272   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
273   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
274   return SVE_ACLE_FUNC(svld1uh_gather_, s32, index_u32, )(pg, base, indices);
275 }
276 
test_svld1uh_gather_s64index_u64(svbool_t pg,const uint16_t * base,svint64_t indices)277 svuint64_t test_svld1uh_gather_s64index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) {
278   // CHECK-LABEL: test_svld1uh_gather_s64index_u64
279   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
280   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
281   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
282   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
283   return SVE_ACLE_FUNC(svld1uh_gather_, s64, index_u64, )(pg, base, indices);
284 }
285 
test_svld1uh_gather_u32index_s32(svbool_t pg,const uint16_t * base,svuint32_t indices)286 svint32_t test_svld1uh_gather_u32index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) {
287   // CHECK-LABEL: test_svld1uh_gather_u32index_s32
288   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
289   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
290   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
291   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
292   return SVE_ACLE_FUNC(svld1uh_gather_, u32, index_s32, )(pg, base, indices);
293 }
294 
test_svld1uh_gather_u64index_s64(svbool_t pg,const uint16_t * base,svuint64_t indices)295 svint64_t test_svld1uh_gather_u64index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) {
296   // CHECK-LABEL: test_svld1uh_gather_u64index_s64
297   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
298   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
299   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
300   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
301   return SVE_ACLE_FUNC(svld1uh_gather_, u64, index_s64, )(pg, base, indices);
302 }
303 
test_svld1uh_gather_u32index_u32(svbool_t pg,const uint16_t * base,svuint32_t indices)304 svuint32_t test_svld1uh_gather_u32index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) {
305   // CHECK-LABEL: test_svld1uh_gather_u32index_u32
306   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
307   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
308   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
309   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
310   return SVE_ACLE_FUNC(svld1uh_gather_, u32, index_u32, )(pg, base, indices);
311 }
312 
test_svld1uh_gather_u64index_u64(svbool_t pg,const uint16_t * base,svuint64_t indices)313 svuint64_t test_svld1uh_gather_u64index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) {
314   // CHECK-LABEL: test_svld1uh_gather_u64index_u64
315   // CHECK: %[[PG]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
316   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
317   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
318   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
319   return SVE_ACLE_FUNC(svld1uh_gather_, u64, index_u64, )(pg, base, indices);
320 }
321 
test_svld1uh_gather_u32base_index_s32(svbool_t pg,svuint32_t bases,int64_t index)322 svint32_t test_svld1uh_gather_u32base_index_s32(svbool_t pg, svuint32_t bases, int64_t index) {
323   // CHECK-LABEL: test_svld1uh_gather_u32base_index_s32
324   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
325   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
326   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %[[SHL]])
327   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
328   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
329   return SVE_ACLE_FUNC(svld1uh_gather, _u32base, _index_s32, )(pg, bases, index);
330 }
331 
test_svld1uh_gather_u64base_index_s64(svbool_t pg,svuint64_t bases,int64_t index)332 svint64_t test_svld1uh_gather_u64base_index_s64(svbool_t pg, svuint64_t bases, int64_t index) {
333   // CHECK-LABEL: test_svld1uh_gather_u64base_index_s64
334   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
335   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
336   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %[[SHL]])
337   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
338   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
339   return SVE_ACLE_FUNC(svld1uh_gather, _u64base, _index_s64, )(pg, bases, index);
340 }
341 
test_svld1uh_gather_u32base_index_u32(svbool_t pg,svuint32_t bases,int64_t index)342 svuint32_t test_svld1uh_gather_u32base_index_u32(svbool_t pg, svuint32_t bases, int64_t index) {
343   // CHECK-LABEL: test_svld1uh_gather_u32base_index_u32
344   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
345   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
346   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %[[SHL]])
347   // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
348   // CHECK: ret <vscale x 4 x i32> %[[ZEXT]]
349   return SVE_ACLE_FUNC(svld1uh_gather, _u32base, _index_u32, )(pg, bases, index);
350 }
351 
test_svld1uh_gather_u64base_index_u64(svbool_t pg,svuint64_t bases,int64_t index)352 svuint64_t test_svld1uh_gather_u64base_index_u64(svbool_t pg, svuint64_t bases, int64_t index) {
353   // CHECK-LABEL: test_svld1uh_gather_u64base_index_u64
354   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
355   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
356   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %[[SHL]])
357   // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
358   // CHECK: ret <vscale x 2 x i64> %[[ZEXT]]
359   return SVE_ACLE_FUNC(svld1uh_gather, _u64base, _index_u64, )(pg, bases, index);
360 }
361