• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6 
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10 
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17 
test_svldff1sb_s16(svbool_t pg,const int8_t * base)18 svint16_t test_svldff1sb_s16(svbool_t pg, const int8_t *base)
19 {
20   // CHECK-LABEL: test_svldff1sb_s16
21   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
22   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %base)
23   // CHECK: %[[SEXT:.*]] = sext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16>
24   // CHECK: ret <vscale x 8 x i16> %[[SEXT]]
25   return svldff1sb_s16(pg, base);
26 }
27 
test_svldff1sb_s32(svbool_t pg,const int8_t * base)28 svint32_t test_svldff1sb_s32(svbool_t pg, const int8_t *base)
29 {
30   // CHECK-LABEL: test_svldff1sb_s32
31   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
32   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %base)
33   // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32>
34   // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
35   return svldff1sb_s32(pg, base);
36 }
37 
test_svldff1sb_s64(svbool_t pg,const int8_t * base)38 svint64_t test_svldff1sb_s64(svbool_t pg, const int8_t *base)
39 {
40   // CHECK-LABEL: test_svldff1sb_s64
41   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
42   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %base)
43   // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64>
44   // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
45   return svldff1sb_s64(pg, base);
46 }
47 
test_svldff1sb_u16(svbool_t pg,const int8_t * base)48 svuint16_t test_svldff1sb_u16(svbool_t pg, const int8_t *base)
49 {
50   // CHECK-LABEL: test_svldff1sb_u16
51   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
52   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %base)
53   // CHECK: %[[SEXT:.*]] = sext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16>
54   // CHECK: ret <vscale x 8 x i16> %[[SEXT]]
55   return svldff1sb_u16(pg, base);
56 }
57 
test_svldff1sb_u32(svbool_t pg,const int8_t * base)58 svuint32_t test_svldff1sb_u32(svbool_t pg, const int8_t *base)
59 {
60   // CHECK-LABEL: test_svldff1sb_u32
61   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
62   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %base)
63   // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32>
64   // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
65   return svldff1sb_u32(pg, base);
66 }
67 
test_svldff1sb_u64(svbool_t pg,const int8_t * base)68 svuint64_t test_svldff1sb_u64(svbool_t pg, const int8_t *base)
69 {
70   // CHECK-LABEL: test_svldff1sb_u64
71   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
72   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %base)
73   // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64>
74   // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
75   return svldff1sb_u64(pg, base);
76 }
77 
test_svldff1sb_vnum_s16(svbool_t pg,const int8_t * base,int64_t vnum)78 svint16_t test_svldff1sb_vnum_s16(svbool_t pg, const int8_t *base, int64_t vnum)
79 {
80   // CHECK-LABEL: test_svldff1sb_vnum_s16
81   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
82   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 8 x i8>*
83   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %[[BITCAST]], i64 %vnum, i64 0
84   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %[[GEP]])
85   // CHECK: %[[SEXT:.*]] = sext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16>
86   // CHECK: ret <vscale x 8 x i16> %[[SEXT]]
87   return svldff1sb_vnum_s16(pg, base, vnum);
88 }
89 
test_svldff1sb_vnum_s32(svbool_t pg,const int8_t * base,int64_t vnum)90 svint32_t test_svldff1sb_vnum_s32(svbool_t pg, const int8_t *base, int64_t vnum)
91 {
92   // CHECK-LABEL: test_svldff1sb_vnum_s32
93   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
94   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 4 x i8>*
95   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %[[BITCAST]], i64 %vnum, i64 0
96   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %[[GEP]])
97   // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32>
98   // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
99   return svldff1sb_vnum_s32(pg, base, vnum);
100 }
101 
test_svldff1sb_vnum_s64(svbool_t pg,const int8_t * base,int64_t vnum)102 svint64_t test_svldff1sb_vnum_s64(svbool_t pg, const int8_t *base, int64_t vnum)
103 {
104   // CHECK-LABEL: test_svldff1sb_vnum_s64
105   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
106   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 2 x i8>*
107   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %[[BITCAST]], i64 %vnum, i64 0
108   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %[[GEP]])
109   // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64>
110   // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
111   return svldff1sb_vnum_s64(pg, base, vnum);
112 }
113 
test_svldff1sb_vnum_u16(svbool_t pg,const int8_t * base,int64_t vnum)114 svuint16_t test_svldff1sb_vnum_u16(svbool_t pg, const int8_t *base, int64_t vnum)
115 {
116   // CHECK-LABEL: test_svldff1sb_vnum_u16
117   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
118   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 8 x i8>*
119   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %[[BITCAST]], i64 %vnum, i64 0
120   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %[[GEP]])
121   // CHECK: %[[SEXT:.*]] = sext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16>
122   // CHECK: ret <vscale x 8 x i16> %[[SEXT]]
123   return svldff1sb_vnum_u16(pg, base, vnum);
124 }
125 
test_svldff1sb_vnum_u32(svbool_t pg,const int8_t * base,int64_t vnum)126 svuint32_t test_svldff1sb_vnum_u32(svbool_t pg, const int8_t *base, int64_t vnum)
127 {
128   // CHECK-LABEL: test_svldff1sb_vnum_u32
129   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
130   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 4 x i8>*
131   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %[[BITCAST]], i64 %vnum, i64 0
132   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %[[GEP]])
133   // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32>
134   // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
135   return svldff1sb_vnum_u32(pg, base, vnum);
136 }
137 
test_svldff1sb_vnum_u64(svbool_t pg,const int8_t * base,int64_t vnum)138 svuint64_t test_svldff1sb_vnum_u64(svbool_t pg, const int8_t *base, int64_t vnum)
139 {
140   // CHECK-LABEL: test_svldff1sb_vnum_u64
141   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
142   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 2 x i8>*
143   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %[[BITCAST]], i64 %vnum, i64 0
144   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %[[GEP]])
145   // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64>
146   // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
147   return svldff1sb_vnum_u64(pg, base, vnum);
148 }
149 
test_svldff1sb_gather_u32base_s32(svbool_t pg,svuint32_t bases)150 svint32_t test_svldff1sb_gather_u32base_s32(svbool_t pg, svuint32_t bases) {
151   // CHECK-LABEL: test_svldff1sb_gather_u32base_s32
152   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
153   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 0)
154   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
155   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
156   return SVE_ACLE_FUNC(svldff1sb_gather, _u32base, _s32, )(pg, bases);
157 }
158 
test_svldff1sb_gather_u64base_s64(svbool_t pg,svuint64_t bases)159 svint64_t test_svldff1sb_gather_u64base_s64(svbool_t pg, svuint64_t bases) {
160   // CHECK-LABEL: test_svldff1sb_gather_u64base_s64
161   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
162   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 0)
163   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
164   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
165   return SVE_ACLE_FUNC(svldff1sb_gather, _u64base, _s64, )(pg, bases);
166 }
167 
test_svldff1sb_gather_u32base_u32(svbool_t pg,svuint32_t bases)168 svuint32_t test_svldff1sb_gather_u32base_u32(svbool_t pg, svuint32_t bases) {
169   // CHECK-LABEL: test_svldff1sb_gather_u32base_u32
170   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
171   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 0)
172   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
173   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
174   return SVE_ACLE_FUNC(svldff1sb_gather, _u32base, _u32, )(pg, bases);
175 }
176 
test_svldff1sb_gather_u64base_u64(svbool_t pg,svuint64_t bases)177 svuint64_t test_svldff1sb_gather_u64base_u64(svbool_t pg, svuint64_t bases) {
178   // CHECK-LABEL: test_svldff1sb_gather_u64base_u64
179   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
180   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 0)
181   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
182   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
183   return SVE_ACLE_FUNC(svldff1sb_gather, _u64base, _u64, )(pg, bases);
184 }
185 
test_svldff1sb_gather_s32offset_s32(svbool_t pg,const int8_t * base,svint32_t offsets)186 svint32_t test_svldff1sb_gather_s32offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) {
187   // CHECK-LABEL: test_svldff1sb_gather_s32offset_s32
188   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
189   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
190   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
191   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
192   return SVE_ACLE_FUNC(svldff1sb_gather_, s32, offset_s32, )(pg, base, offsets);
193 }
194 
test_svldff1sb_gather_s64offset_s64(svbool_t pg,const int8_t * base,svint64_t offsets)195 svint64_t test_svldff1sb_gather_s64offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) {
196   // CHECK-LABEL: test_svldff1sb_gather_s64offset_s64
197   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
198   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
199   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
200   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
201   return SVE_ACLE_FUNC(svldff1sb_gather_, s64, offset_s64, )(pg, base, offsets);
202 }
203 
test_svldff1sb_gather_s32offset_u32(svbool_t pg,const int8_t * base,svint32_t offsets)204 svuint32_t test_svldff1sb_gather_s32offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) {
205   // CHECK-LABEL: test_svldff1sb_gather_s32offset_u32
206   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
207   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
208   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
209   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
210   return SVE_ACLE_FUNC(svldff1sb_gather_, s32, offset_u32, )(pg, base, offsets);
211 }
212 
test_svldff1sb_gather_s64offset_u64(svbool_t pg,const int8_t * base,svint64_t offsets)213 svuint64_t test_svldff1sb_gather_s64offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) {
214   // CHECK-LABEL: test_svldff1sb_gather_s64offset_u64
215   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
216   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
217   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
218   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
219   return SVE_ACLE_FUNC(svldff1sb_gather_, s64, offset_u64, )(pg, base, offsets);
220 }
221 
test_svldff1sb_gather_u32offset_s32(svbool_t pg,const int8_t * base,svuint32_t offsets)222 svint32_t test_svldff1sb_gather_u32offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) {
223   // CHECK-LABEL: test_svldff1sb_gather_u32offset_s32
224   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
225   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
226   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
227   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
228   return SVE_ACLE_FUNC(svldff1sb_gather_, u32, offset_s32, )(pg, base, offsets);
229 }
230 
test_svldff1sb_gather_u64offset_s64(svbool_t pg,const int8_t * base,svuint64_t offsets)231 svint64_t test_svldff1sb_gather_u64offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) {
232   // CHECK-LABEL: test_svldff1sb_gather_u64offset_s64
233   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
234   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
235   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
236   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
237   return SVE_ACLE_FUNC(svldff1sb_gather_, u64, offset_s64, )(pg, base, offsets);
238 }
239 
test_svldff1sb_gather_u32offset_u32(svbool_t pg,const int8_t * base,svuint32_t offsets)240 svuint32_t test_svldff1sb_gather_u32offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) {
241   // CHECK-LABEL: test_svldff1sb_gather_u32offset_u32
242   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
243   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
244   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
245   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
246   return SVE_ACLE_FUNC(svldff1sb_gather_, u32, offset_u32, )(pg, base, offsets);
247 }
248 
test_svldff1sb_gather_u64offset_u64(svbool_t pg,const int8_t * base,svuint64_t offsets)249 svuint64_t test_svldff1sb_gather_u64offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) {
250   // CHECK-LABEL: test_svldff1sb_gather_u64offset_u64
251   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
252   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
253   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
254   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
255   return SVE_ACLE_FUNC(svldff1sb_gather_, u64, offset_u64, )(pg, base, offsets);
256 }
257 
test_svldff1sb_gather_u32base_offset_s32(svbool_t pg,svuint32_t bases,int64_t offset)258 svint32_t test_svldff1sb_gather_u32base_offset_s32(svbool_t pg, svuint32_t bases, int64_t offset) {
259   // CHECK-LABEL: test_svldff1sb_gather_u32base_offset_s32
260   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
261   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 %offset)
262   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
263   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
264   return SVE_ACLE_FUNC(svldff1sb_gather, _u32base, _offset_s32, )(pg, bases, offset);
265 }
266 
test_svldff1sb_gather_u64base_offset_s64(svbool_t pg,svuint64_t bases,int64_t offset)267 svint64_t test_svldff1sb_gather_u64base_offset_s64(svbool_t pg, svuint64_t bases, int64_t offset) {
268   // CHECK-LABEL: test_svldff1sb_gather_u64base_offset_s64
269   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
270   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 %offset)
271   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
272   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
273   return SVE_ACLE_FUNC(svldff1sb_gather, _u64base, _offset_s64, )(pg, bases, offset);
274 }
275 
test_svldff1sb_gather_u32base_offset_u32(svbool_t pg,svuint32_t bases,int64_t offset)276 svuint32_t test_svldff1sb_gather_u32base_offset_u32(svbool_t pg, svuint32_t bases, int64_t offset) {
277   // CHECK-LABEL: test_svldff1sb_gather_u32base_offset_u32
278   // CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
279   // CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 %offset)
280   // CHECK: [[SEXT:%.*]] = sext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
281   // CHECK: ret <vscale x 4 x i32> [[SEXT]]
282   return SVE_ACLE_FUNC(svldff1sb_gather, _u32base, _offset_u32, )(pg, bases, offset);
283 }
284 
test_svldff1sb_gather_u64base_offset_u64(svbool_t pg,svuint64_t bases,int64_t offset)285 svuint64_t test_svldff1sb_gather_u64base_offset_u64(svbool_t pg, svuint64_t bases, int64_t offset) {
286   // CHECK-LABEL: test_svldff1sb_gather_u64base_offset_u64
287   // CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
288   // CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 %offset)
289   // CHECK: [[SEXT:%.*]] = sext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
290   // CHECK: ret <vscale x 2 x i64> [[SEXT]]
291   return SVE_ACLE_FUNC(svldff1sb_gather, _u64base, _offset_u64, )(pg, bases, offset);
292 }
293