Lines Matching refs:ve
1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
19 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsl(double %0, i32 256)
25 declare <256 x double> @llvm.ve.vl.vbrdd.vsl(double, i32)
37 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
38 %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsvl(double %0, <256 x double> %3, i32 256)
39 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
44 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
47 declare <256 x double> @llvm.ve.vl.vbrdd.vsvl(double, <256 x double>, i32)
50 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
70 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
72 %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
73 …%6 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsmvl(double %0, <256 x i1> %5, <256 x double…
74 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
79 declare <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1>, i64, i64)
82 declare <256 x double> @llvm.ve.vl.vbrdd.vsmvl(double, <256 x i1>, <256 x double>, i32)
95 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 %0, i32 256)
101 declare <256 x double> @llvm.ve.vl.vbrdl.vsl(i64, i32)
113 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
114 %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsvl(i64 %0, <256 x double> %3, i32 256)
115 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
120 declare <256 x double> @llvm.ve.vl.vbrdl.vsvl(i64, <256 x double>, i32)
133 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
134 %4 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %0)
135 …%5 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64 %0, <256 x i1> %4, <256 x double> %…
136 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %1, i32 256)
141 declare <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64, <256 x i1>, <256 x double>, i32)
154 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 31, i32 256)
169 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
170 %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsvl(i64 31, <256 x double> %3, i32 256)
171 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
186 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
187 %4 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %0)
188 …%5 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64 31, <256 x i1> %4, <256 x double> %…
189 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %1, i32 256)
204 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrds.vsl(float %0, i32 256)
210 declare <256 x double> @llvm.ve.vl.vbrds.vsl(float, i32)
222 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
223 %4 = tail call fast <256 x double> @llvm.ve.vl.vbrds.vsvl(float %0, <256 x double> %3, i32 256)
224 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
229 declare <256 x double> @llvm.ve.vl.vbrds.vsvl(float, <256 x double>, i32)
251 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
253 %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
254 …%6 = tail call fast <256 x double> @llvm.ve.vl.vbrds.vsmvl(float %0, <256 x i1> %5, <256 x double>…
255 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
260 declare <256 x double> @llvm.ve.vl.vbrds.vsmvl(float, <256 x i1>, <256 x double>, i32)
274 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 %0, i32 256)
280 declare <256 x double> @llvm.ve.vl.vbrdw.vsl(i32, i32)
293 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
294 %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsvl(i32 %0, <256 x double> %3, i32 256)
295 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
300 declare <256 x double> @llvm.ve.vl.vbrdw.vsvl(i32, <256 x double>, i32)
314 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
316 %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
317 …%6 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32 %0, <256 x i1> %5, <256 x double> %…
318 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
323 declare <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32, <256 x i1>, <256 x double>, i32)
336 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 31, i32 256)
351 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
352 %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsvl(i32 31, <256 x double> %3, i32 256)
353 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
368 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
370 %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
371 …%6 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32 31, <256 x i1> %5, <256 x double> %…
372 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
387 %3 = tail call fast <256 x double> @llvm.ve.vl.pvbrd.vsl(i64 %0, i32 256)
393 declare <256 x double> @llvm.ve.vl.pvbrd.vsl(i64, i32)
405 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
406 %4 = tail call fast <256 x double> @llvm.ve.vl.pvbrd.vsvl(i64 %0, <256 x double> %3, i32 256)
407 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
412 declare <256 x double> @llvm.ve.vl.pvbrd.vsvl(i64, <256 x double>, i32)
426 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
427 %4 = tail call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> undef, i64 1, i64 %0)
428 %5 = tail call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> %4, i64 6, i64 %0)
429 …%6 = tail call fast <256 x double> @llvm.ve.vl.pvbrd.vsMvl(i64 %0, <512 x i1> %5, <256 x double> %…
430 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
435 declare <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1>, i64, i64)
438 declare <256 x double> @llvm.ve.vl.pvbrd.vsMvl(i64, <512 x i1>, <256 x double>, i32)