• Home
  • Raw
  • Download

Lines Matching refs:sve

2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
14 %out = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> %pg,
25 %out = call i64 @llvm.aarch64.sve.saddv.nxv8i16(<vscale x 8 x i1> %pg,
37 %out = call i64 @llvm.aarch64.sve.saddv.nxv4i32(<vscale x 4 x i1> %pg,
48 %out = call i64 @llvm.aarch64.sve.saddv.nxv2i64(<vscale x 2 x i1> %pg,
59 %out = call i64 @llvm.aarch64.sve.uaddv.nxv16i8(<vscale x 16 x i1> %pg,
70 %out = call i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1> %pg,
82 %out = call i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x i1> %pg,
93 %out = call i64 @llvm.aarch64.sve.uaddv.nxv2i64(<vscale x 2 x i1> %pg,
104 %out = call i8 @llvm.aarch64.sve.smaxv.nxv16i8(<vscale x 16 x i1> %pg,
115 %out = call i16 @llvm.aarch64.sve.smaxv.nxv8i16(<vscale x 8 x i1> %pg,
126 %out = call i32 @llvm.aarch64.sve.smaxv.nxv4i32(<vscale x 4 x i1> %pg,
137 %out = call i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1> %pg,
148 %out = call i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1> %pg,
159 %out = call i16 @llvm.aarch64.sve.umaxv.nxv8i16(<vscale x 8 x i1> %pg,
170 %out = call i32 @llvm.aarch64.sve.umaxv.nxv4i32(<vscale x 4 x i1> %pg,
181 %out = call i64 @llvm.aarch64.sve.umaxv.nxv2i64(<vscale x 2 x i1> %pg,
192 %out = call i8 @llvm.aarch64.sve.sminv.nxv16i8(<vscale x 16 x i1> %pg,
203 %out = call i16 @llvm.aarch64.sve.sminv.nxv8i16(<vscale x 8 x i1> %pg,
214 %out = call i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1> %pg,
225 %out = call i64 @llvm.aarch64.sve.sminv.nxv2i64(<vscale x 2 x i1> %pg,
236 %out = call i8 @llvm.aarch64.sve.uminv.nxv16i8(<vscale x 16 x i1> %pg,
247 %out = call i16 @llvm.aarch64.sve.uminv.nxv8i16(<vscale x 8 x i1> %pg,
258 %out = call i32 @llvm.aarch64.sve.uminv.nxv4i32(<vscale x 4 x i1> %pg,
269 %out = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg,
280 %out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> %pg,
291 %out = call i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1> %pg,
302 %out = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> %pg,
313 %out = call i64 @llvm.aarch64.sve.orv.nxv2i64(<vscale x 2 x i1> %pg,
324 %out = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> %pg,
335 %out = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> %pg,
346 %out = call i32 @llvm.aarch64.sve.eorv.nxv4i32(<vscale x 4 x i1> %pg,
357 %out = call i64 @llvm.aarch64.sve.eorv.nxv2i64(<vscale x 2 x i1> %pg,
368 %out = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> %pg,
379 %out = call i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1> %pg,
390 %out = call i32 @llvm.aarch64.sve.andv.nxv4i32(<vscale x 4 x i1> %pg,
401 %out = call i64 @llvm.aarch64.sve.andv.nxv2i64(<vscale x 2 x i1> %pg,
406 declare i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
407 declare i64 @llvm.aarch64.sve.saddv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
408 declare i64 @llvm.aarch64.sve.saddv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
409 declare i64 @llvm.aarch64.sve.saddv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
410 declare i64 @llvm.aarch64.sve.uaddv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
411 declare i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
412 declare i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
413 declare i64 @llvm.aarch64.sve.uaddv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
414 declare i8 @llvm.aarch64.sve.smaxv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
415 declare i16 @llvm.aarch64.sve.smaxv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
416 declare i32 @llvm.aarch64.sve.smaxv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
417 declare i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
418 declare i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
419 declare i16 @llvm.aarch64.sve.umaxv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
420 declare i32 @llvm.aarch64.sve.umaxv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
421 declare i64 @llvm.aarch64.sve.umaxv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
422 declare i8 @llvm.aarch64.sve.sminv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
423 declare i16 @llvm.aarch64.sve.sminv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
424 declare i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
425 declare i64 @llvm.aarch64.sve.sminv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
426 declare i8 @llvm.aarch64.sve.uminv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
427 declare i16 @llvm.aarch64.sve.uminv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
428 declare i32 @llvm.aarch64.sve.uminv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
429 declare i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
430 declare i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
431 declare i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
432 declare i32 @llvm.aarch64.sve.orv.nxv4i32 (<vscale x 4 x i1>, <vscale x 4 x i32>)
433 declare i64 @llvm.aarch64.sve.orv.nxv2i64 (<vscale x 2 x i1>, <vscale x 2 x i64>)
434 declare i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
435 declare i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
436 declare i32 @llvm.aarch64.sve.eorv.nxv4i32 (<vscale x 4 x i1>, <vscale x 4 x i32>)
437 declare i64 @llvm.aarch64.sve.eorv.nxv2i64 (<vscale x 2 x i1>, <vscale x 2 x i64>)
438 declare i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
439 declare i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
440 declare i32 @llvm.aarch64.sve.andv.nxv4i32 (<vscale x 4 x i1>, <vscale x 4 x i32>)
441 declare i64 @llvm.aarch64.sve.andv.nxv2i64 (<vscale x 2 x i1>, <vscale x 2 x i64>)