Lines Matching refs:ve
1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
18 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
19 %5 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 31, <256 x double> %4, i32 256)
20 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %0, i32 256)
25 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
28 declare <256 x double> @llvm.ve.vl.vmv.vsvl(i32, <256 x double>, i32)
31 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
46 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
47 …%4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 31, <256 x double> %3, <256 x double>…
48 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %0, i32 256)
53 declare <256 x double> @llvm.ve.vl.vmv.vsvvl(i32, <256 x double>, <256 x double>, i32)
68 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
69 …%4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 31, <256 x double> %3, <256 x i1> un…
70 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %0, i32 256)
75 declare <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)