Lines Matching refs:vld
14 ; CHECK-NEXT: vld %v0, %s1, %s0
19 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
25 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
33 ; CHECK-NEXT: vld %v0, %s1, %s2
34 ; CHECK-NEXT: vld %v0, %s1, %s0
39 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %2, i32 256)
40 …%5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 25…
46 declare <256 x double> @llvm.ve.vl.vld.vssvl(i64, i8*, <256 x double>, i32)
54 ; CHECK-NEXT: vld %v0, 8, %s0
59 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
70 ; CHECK-NEXT: vld %v0, 8, %s1
71 ; CHECK-NEXT: vld %v0, 8, %s0
76 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
77 …%4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
88 ; CHECK-NEXT: vld.nc %v0, %s1, %s0
107 ; CHECK-NEXT: vld.nc %v0, %s1, %s2
108 ; CHECK-NEXT: vld.nc %v0, %s1, %s0
128 ; CHECK-NEXT: vld.nc %v0, 8, %s0
144 ; CHECK-NEXT: vld.nc %v0, 8, %s1
145 ; CHECK-NEXT: vld.nc %v0, 8, %s0