Lines Matching refs:sve
15 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %a, i32 %b)
23 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %a, i64 %b)
31 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %a, i32 %b)
39 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %a, i64 %b)
47 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %a, i32 %b)
55 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %a, i64 %b)
63 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b)
71 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %a, i64 %b)
83 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %a, i32 %b)
91 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %a, i64 %b)
99 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %a, i32 %b)
107 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %a, i64 %b)
115 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %a, i32 %b)
123 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %a, i64 %b)
131 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b)
139 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %a, i64 %b)
151 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %a, i32 %b)
159 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %a, i64 %b)
167 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %a, i32 %b)
175 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %a, i64 %b)
183 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %a, i32 %b)
191 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %a, i64 %b)
199 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b)
207 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %a, i64 %b)
219 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %a, i32 %b)
227 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %a, i64 %b)
235 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %a, i32 %b)
243 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %a, i64 %b)
251 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %a, i32 %b)
259 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %a, i64 %b)
267 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b)
275 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %a, i64 %b)
279 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32, i32)
280 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64, i64)
281 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32, i32)
282 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64, i64)
283 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32, i32)
284 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64, i64)
285 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32)
286 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64, i64)
288 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32, i32)
289 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64, i64)
290 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32, i32)
291 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64, i64)
292 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32, i32)
293 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64, i64)
294 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32)
295 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64, i64)
297 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32, i32)
298 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64, i64)
299 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32, i32)
300 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64, i64)
301 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32, i32)
302 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64, i64)
303 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32)
304 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64, i64)
306 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32, i32)
307 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64, i64)
308 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32, i32)
309 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64, i64)
310 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32, i32)
311 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64, i64)
312 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32)
313 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64, i64)