/external/llvm-project/llvm/include/llvm/IR/ |
D | IntrinsicsVEVL.gen.td | 1 let TargetPrefix = "ve" in def int_ve_vl_vld_vssl : GCCBuiltin<"__builtin_ve_vl_vld_vssl">, Intrins… 2 let TargetPrefix = "ve" in def int_ve_vl_vld_vssvl : GCCBuiltin<"__builtin_ve_vl_vld_vssvl">, Intri… 3 let TargetPrefix = "ve" in def int_ve_vl_vldnc_vssl : GCCBuiltin<"__builtin_ve_vl_vldnc_vssl">, Int… 4 let TargetPrefix = "ve" in def int_ve_vl_vldnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldnc_vssvl">, I… 5 let TargetPrefix = "ve" in def int_ve_vl_vldu_vssl : GCCBuiltin<"__builtin_ve_vl_vldu_vssl">, Intri… 6 let TargetPrefix = "ve" in def int_ve_vl_vldu_vssvl : GCCBuiltin<"__builtin_ve_vl_vldu_vssvl">, Int… 7 let TargetPrefix = "ve" in def int_ve_vl_vldunc_vssl : GCCBuiltin<"__builtin_ve_vl_vldunc_vssl">, I… 8 let TargetPrefix = "ve" in def int_ve_vl_vldunc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldunc_vssvl">,… 9 let TargetPrefix = "ve" in def int_ve_vl_vldlsx_vssl : GCCBuiltin<"__builtin_ve_vl_vldlsx_vssl">, I… 10 let TargetPrefix = "ve" in def int_ve_vl_vldlsx_vssvl : GCCBuiltin<"__builtin_ve_vl_vldlsx_vssvl">,… [all …]
|
/external/mesa3d/src/gallium/auxiliary/util/ |
D | u_vbuf.c | 103 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS]; member 170 struct u_vbuf_elements *ve, *ve_saved; member 346 struct u_vbuf_elements *ve; in u_vbuf_set_vertex_elements_internal() local 364 ve = cso->data; in u_vbuf_set_vertex_elements_internal() 366 ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data; in u_vbuf_set_vertex_elements_internal() 369 assert(ve); in u_vbuf_set_vertex_elements_internal() 371 if (ve != mgr->ve) in u_vbuf_set_vertex_elements_internal() 372 pipe->bind_vertex_elements_state(pipe, ve->driver_cso); in u_vbuf_set_vertex_elements_internal() 374 return ve; in u_vbuf_set_vertex_elements_internal() 380 mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, velems); in u_vbuf_set_vertex_elements() [all …]
|
/external/llvm-project/llvm/test/CodeGen/VE/VELIntrinsics/ |
D | vbrd.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 19 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsl(double %0, i32 256) 25 declare <256 x double> @llvm.ve.vl.vbrdd.vsl(double, i32) 37 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) 38 %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsvl(double %0, <256 x double> %3, i32 256) 39 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256) 44 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 47 declare <256 x double> @llvm.ve.vl.vbrdd.vsvl(double, <256 x double>, i32) 50 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32) 70 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) [all …]
|
D | vst.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 17 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) 18 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256) 23 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 26 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32) 37 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) 38 tail call void @llvm.ve.vl.vst.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256) 43 declare void @llvm.ve.vl.vst.vssml(<256 x double>, i64, i8*, <256 x i1>, i32) 54 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 55 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %2, i64 8, i8* %0, i32 256) [all …]
|
D | vfmk.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 17 %1 = tail call <256 x i1> @llvm.ve.vl.vfmklat.ml(i32 256) 22 declare <256 x i1> @llvm.ve.vl.vfmklat.ml(i32) 32 %1 = tail call <256 x i1> @llvm.ve.vl.vfmklaf.ml(i32 256) 37 declare <256 x i1> @llvm.ve.vl.vfmklaf.ml(i32) 47 %2 = tail call <256 x i1> @llvm.ve.vl.vfmklgt.mvl(<256 x double> %0, i32 256) 52 declare <256 x i1> @llvm.ve.vl.vfmklgt.mvl(<256 x double>, i32) 62 %3 = tail call <256 x i1> @llvm.ve.vl.vfmklgt.mvml(<256 x double> %0, <256 x i1> %1, i32 256) 67 declare <256 x i1> @llvm.ve.vl.vfmklgt.mvml(<256 x double>, <256 x i1>, i32) 77 %2 = tail call <256 x i1> @llvm.ve.vl.vfmkllt.mvl(<256 x double> %0, i32 256) [all …]
|
D | lsv.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 19 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 20 %5 = tail call fast <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double> %4, i32 %2, i64 %1) 21 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %0, i32 256) 26 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 29 declare <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double>, i32, i64) 32 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32) 44 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 45 %4 = tail call i64 @llvm.ve.vl.lvsl.svs(<256 x double> %3, i32 %1) 50 declare i64 @llvm.ve.vl.lvsl.svs(<256 x double>, i32) [all …]
|
D | vmv.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 19 %5 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 31, <256 x double> %4, i32 256) 20 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %0, i32 256) 25 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 28 declare <256 x double> @llvm.ve.vl.vmv.vsvl(i32, <256 x double>, i32) 31 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32) 46 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 47 …%4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 31, <256 x double> %3, <256 x double>… 48 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %0, i32 256) [all …]
|
D | vcvt.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 17 %2 = tail call fast <256 x double> @llvm.ve.vl.vcvtwdsx.vvl(<256 x double> %0, i32 256) 22 declare <256 x double> @llvm.ve.vl.vcvtwdsx.vvl(<256 x double>, i32) 35 …%3 = tail call fast <256 x double> @llvm.ve.vl.vcvtwdsx.vvvl(<256 x double> %0, <256 x double> %1,… 40 declare <256 x double> @llvm.ve.vl.vcvtwdsx.vvvl(<256 x double>, <256 x double>, i32) 53 …%4 = tail call fast <256 x double> @llvm.ve.vl.vcvtwdsx.vvmvl(<256 x double> %0, <256 x i1> %1, <2… 58 declare <256 x double> @llvm.ve.vl.vcvtwdsx.vvmvl(<256 x double>, <256 x i1>, <256 x double>, i32) 68 %2 = tail call fast <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvl(<256 x double> %0, i32 256) 73 declare <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvl(<256 x double>, i32) 86 …%3 = tail call fast <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvvl(<256 x double> %0, <256 x double> %… [all …]
|
D | lvlgen.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 6 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 7 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32) 33 %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 256) 34 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l0, i64 16, i8* %Q, i32 %evl) 35 %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, i8* %P, i32 128) 36 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l1, i64 16, i8* %Q, i32 %evl2) 37 %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 128) 38 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l2, i64 16, i8* %Q, i32 %evl) 58 %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 %evl) [all …]
|
D | vseq.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 16 %1 = tail call fast <256 x double> @llvm.ve.vl.vseq.vl(i32 256) 21 declare <256 x double> @llvm.ve.vl.vseq.vl(i32) 31 %2 = tail call fast <256 x double> @llvm.ve.vl.vseq.vvl(<256 x double> %0, i32 256) 36 declare <256 x double> @llvm.ve.vl.vseq.vvl(<256 x double>, i32) 46 %1 = tail call fast <256 x double> @llvm.ve.vl.pvseqlo.vl(i32 256) 51 declare <256 x double> @llvm.ve.vl.pvseqlo.vl(i32) 61 %2 = tail call fast <256 x double> @llvm.ve.vl.pvseqlo.vvl(<256 x double> %0, i32 256) 66 declare <256 x double> @llvm.ve.vl.pvseqlo.vvl(<256 x double>, i32) 76 %1 = tail call fast <256 x double> @llvm.ve.vl.pvsequp.vl(i32 256) [all …]
|
D | lvm.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 15 %3 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %1) 16 %4 = tail call i64 @llvm.ve.vl.svm.sms(<256 x i1> %3, i64 3) 21 declare <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1>, i64, i64) 24 declare i64 @llvm.ve.vl.svm.sms(<256 x i1>, i64) 35 %3 = tail call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> undef, i64 5, i64 %1) 36 %4 = tail call i64 @llvm.ve.vl.svm.sMs(<512 x i1> %3, i64 3) 37 %5 = tail call i64 @llvm.ve.vl.svm.sMs(<512 x i1> %3, i64 6) 43 declare <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1>, i64, i64) 46 declare i64 @llvm.ve.vl.svm.sMs(<512 x i1>, i64)
|
D | vld.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 19 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) 25 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 39 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %2, i32 256) 40 …%5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 25… 46 declare <256 x double> @llvm.ve.vl.vld.vssvl(i64, i8*, <256 x double>, i32) 59 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 76 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) 77 …%4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 93 %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %0, i32 256) [all …]
|
D | vrsqrt.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 17 %2 = tail call fast <256 x double> @llvm.ve.vl.vrsqrtd.vvl(<256 x double> %0, i32 256) 22 declare <256 x double> @llvm.ve.vl.vrsqrtd.vvl(<256 x double>, i32) 35 …%3 = tail call fast <256 x double> @llvm.ve.vl.vrsqrtd.vvvl(<256 x double> %0, <256 x double> %1, … 40 declare <256 x double> @llvm.ve.vl.vrsqrtd.vvvl(<256 x double>, <256 x double>, i32) 50 %2 = tail call fast <256 x double> @llvm.ve.vl.vrsqrts.vvl(<256 x double> %0, i32 256) 55 declare <256 x double> @llvm.ve.vl.vrsqrts.vvl(<256 x double>, i32) 68 …%3 = tail call fast <256 x double> @llvm.ve.vl.vrsqrts.vvvl(<256 x double> %0, <256 x double> %1, … 73 declare <256 x double> @llvm.ve.vl.vrsqrts.vvvl(<256 x double>, <256 x double>, i32) 83 %2 = tail call fast <256 x double> @llvm.ve.vl.vrsqrtdnex.vvl(<256 x double> %0, i32 256) [all …]
|
D | vfcmp.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vfcmpd.vvvl(<256 x double> %0, <256 x double> %1, i… 23 declare <256 x double> @llvm.ve.vl.vfcmpd.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfcmpd.vvvvl(<256 x double> %0, <256 x double> %1, … 41 declare <256 x double> @llvm.ve.vl.vfcmpd.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 51 %3 = tail call fast <256 x double> @llvm.ve.vl.vfcmpd.vsvl(double %0, <256 x double> %1, i32 256) 56 declare <256 x double> @llvm.ve.vl.vfcmpd.vsvl(double, <256 x double>, i32) 69 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfcmpd.vsvvl(double %0, <256 x double> %1, <256 x d… 74 declare <256 x double> @llvm.ve.vl.vfcmpd.vsvvl(double, <256 x double>, <256 x double>, i32) 87 …%5 = tail call fast <256 x double> @llvm.ve.vl.vfcmpd.vvvmvl(<256 x double> %0, <256 x double> %1,… [all …]
|
D | vfadd.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vfaddd.vvvl(<256 x double> %0, <256 x double> %1, i… 23 declare <256 x double> @llvm.ve.vl.vfaddd.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfaddd.vvvvl(<256 x double> %0, <256 x double> %1, … 41 declare <256 x double> @llvm.ve.vl.vfaddd.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 51 %3 = tail call fast <256 x double> @llvm.ve.vl.vfaddd.vsvl(double %0, <256 x double> %1, i32 256) 56 declare <256 x double> @llvm.ve.vl.vfaddd.vsvl(double, <256 x double>, i32) 69 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfaddd.vsvvl(double %0, <256 x double> %1, <256 x d… 74 declare <256 x double> @llvm.ve.vl.vfaddd.vsvvl(double, <256 x double>, <256 x double>, i32) 87 …%5 = tail call fast <256 x double> @llvm.ve.vl.vfaddd.vvvmvl(<256 x double> %0, <256 x double> %1,… [all …]
|
D | vfmul.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vfmuld.vvvl(<256 x double> %0, <256 x double> %1, i… 23 declare <256 x double> @llvm.ve.vl.vfmuld.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfmuld.vvvvl(<256 x double> %0, <256 x double> %1, … 41 declare <256 x double> @llvm.ve.vl.vfmuld.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 51 %3 = tail call fast <256 x double> @llvm.ve.vl.vfmuld.vsvl(double %0, <256 x double> %1, i32 256) 56 declare <256 x double> @llvm.ve.vl.vfmuld.vsvl(double, <256 x double>, i32) 69 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfmuld.vsvvl(double %0, <256 x double> %1, <256 x d… 74 declare <256 x double> @llvm.ve.vl.vfmuld.vsvvl(double, <256 x double>, <256 x double>, i32) 87 …%5 = tail call fast <256 x double> @llvm.ve.vl.vfmuld.vvvmvl(<256 x double> %0, <256 x double> %1,… [all …]
|
D | vfsub.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vfsubd.vvvl(<256 x double> %0, <256 x double> %1, i… 23 declare <256 x double> @llvm.ve.vl.vfsubd.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfsubd.vvvvl(<256 x double> %0, <256 x double> %1, … 41 declare <256 x double> @llvm.ve.vl.vfsubd.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 51 %3 = tail call fast <256 x double> @llvm.ve.vl.vfsubd.vsvl(double %0, <256 x double> %1, i32 256) 56 declare <256 x double> @llvm.ve.vl.vfsubd.vsvl(double, <256 x double>, i32) 69 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfsubd.vsvvl(double %0, <256 x double> %1, <256 x d… 74 declare <256 x double> @llvm.ve.vl.vfsubd.vsvvl(double, <256 x double>, <256 x double>, i32) 87 …%5 = tail call fast <256 x double> @llvm.ve.vl.vfsubd.vvvmvl(<256 x double> %0, <256 x double> %1,… [all …]
|
D | vfmax.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vfmaxd.vvvl(<256 x double> %0, <256 x double> %1, i… 23 declare <256 x double> @llvm.ve.vl.vfmaxd.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfmaxd.vvvvl(<256 x double> %0, <256 x double> %1, … 41 declare <256 x double> @llvm.ve.vl.vfmaxd.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 51 %3 = tail call fast <256 x double> @llvm.ve.vl.vfmaxd.vsvl(double %0, <256 x double> %1, i32 256) 56 declare <256 x double> @llvm.ve.vl.vfmaxd.vsvl(double, <256 x double>, i32) 69 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfmaxd.vsvvl(double %0, <256 x double> %1, <256 x d… 74 declare <256 x double> @llvm.ve.vl.vfmaxd.vsvvl(double, <256 x double>, <256 x double>, i32) 87 …%5 = tail call fast <256 x double> @llvm.ve.vl.vfmaxd.vvvmvl(<256 x double> %0, <256 x double> %1,… [all …]
|
D | vfmin.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vfmind.vvvl(<256 x double> %0, <256 x double> %1, i… 23 declare <256 x double> @llvm.ve.vl.vfmind.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfmind.vvvvl(<256 x double> %0, <256 x double> %1, … 41 declare <256 x double> @llvm.ve.vl.vfmind.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) 51 %3 = tail call fast <256 x double> @llvm.ve.vl.vfmind.vsvl(double %0, <256 x double> %1, i32 256) 56 declare <256 x double> @llvm.ve.vl.vfmind.vsvl(double, <256 x double>, i32) 69 …%4 = tail call fast <256 x double> @llvm.ve.vl.vfmind.vsvvl(double %0, <256 x double> %1, <256 x d… 74 declare <256 x double> @llvm.ve.vl.vfmind.vsvvl(double, <256 x double>, <256 x double>, i32) 87 …%5 = tail call fast <256 x double> @llvm.ve.vl.vfmind.vvvmvl(<256 x double> %0, <256 x double> %1,… [all …]
|
D | vmin.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vvvl(<256 x double> %0, <256 x double> %1,… 23 declare <256 x double> @llvm.ve.vl.vminswsx.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vvvvl(<256 x double> %0, <256 x double> %1… 41 declare <256 x double> @llvm.ve.vl.vminswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i… 52 %3 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvl(i32 %0, <256 x double> %1, i32 256) 57 declare <256 x double> @llvm.ve.vl.vminswsx.vsvl(i32, <256 x double>, i32) 71 …%4 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvvl(i32 %0, <256 x double> %1, <256 x do… 76 declare <256 x double> @llvm.ve.vl.vminswsx.vsvvl(i32, <256 x double>, <256 x double>, i32) 86 %2 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvl(i32 8, <256 x double> %0, i32 256) [all …]
|
D | vmax.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vvvl(<256 x double> %0, <256 x double> %1,… 23 declare <256 x double> @llvm.ve.vl.vmaxswsx.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vvvvl(<256 x double> %0, <256 x double> %1… 41 declare <256 x double> @llvm.ve.vl.vmaxswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i… 52 %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvl(i32 %0, <256 x double> %1, i32 256) 57 declare <256 x double> @llvm.ve.vl.vmaxswsx.vsvl(i32, <256 x double>, i32) 71 …%4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvvl(i32 %0, <256 x double> %1, <256 x do… 76 declare <256 x double> @llvm.ve.vl.vmaxswsx.vsvvl(i32, <256 x double>, <256 x double>, i32) 86 %2 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvl(i32 8, <256 x double> %0, i32 256) [all …]
|
D | vsla.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double> %0, <256 x double> %1, … 23 declare <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvvl(<256 x double> %0, <256 x double> %1,… 41 declare <256 x double> @llvm.ve.vl.vslawsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i3… 52 %3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double> %0, i32 %1, i32 256) 57 declare <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double>, i32, i32) 71 …%4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double> %0, i32 %1, <256 x dou… 76 declare <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double>, i32, <256 x double>, i32) 86 %2 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double> %0, i32 8, i32 256) [all …]
|
D | vsra.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 …%3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double> %0, <256 x double> %1, … 23 declare <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double>, <256 x double>, i32) 36 …%4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvvl(<256 x double> %0, <256 x double> %1,… 41 declare <256 x double> @llvm.ve.vl.vsrawsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i3… 52 %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double> %0, i32 %1, i32 256) 57 declare <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double>, i32, i32) 71 …%4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double> %0, i32 %1, <256 x dou… 76 declare <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double>, i32, <256 x double>, i32) 86 %2 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double> %0, i32 8, i32 256) [all …]
|
D | vadd.ll | 1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 18 %3 = tail call fast <256 x double> @llvm.ve.vl.vaddsl.vsvl(i64 %0, <256 x double> %1, i32 256) 23 declare <256 x double> @llvm.ve.vl.vaddsl.vsvl(i64, <256 x double>, i32) 33 %2 = tail call fast <256 x double> @llvm.ve.vl.vaddsl.vsvl(i64 8, <256 x double> %0, i32 256) 48 …%5 = tail call fast <256 x double> @llvm.ve.vl.vaddsl.vsvmvl(i64 %0, <256 x double> %1, <256 x i1>… 53 declare <256 x double> @llvm.ve.vl.vaddsl.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i… 66 …%4 = tail call fast <256 x double> @llvm.ve.vl.vaddsl.vsvmvl(i64 8, <256 x double> %0, <256 x i1> … 81 …%4 = tail call fast <256 x double> @llvm.ve.vl.vaddsl.vsvvl(i64 %0, <256 x double> %1, <256 x doub… 86 declare <256 x double> @llvm.ve.vl.vaddsl.vsvvl(i64, <256 x double>, <256 x double>, i32) 99 …%3 = tail call fast <256 x double> @llvm.ve.vl.vaddsl.vsvvl(i64 8, <256 x double> %0, <256 x doubl… [all …]
|
/external/mesa3d/src/gallium/tests/graw/ |
D | tri-instanced.c | 105 struct pipe_vertex_element ve[3]; in set_vertices() local 109 memset(ve, 0, sizeof ve); in set_vertices() 112 ve[0].src_offset = Offset(struct vertex, position); in set_vertices() 113 ve[0].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT; in set_vertices() 114 ve[0].vertex_buffer_index = 0; in set_vertices() 117 ve[1].src_offset = Offset(struct vertex, color); in set_vertices() 118 ve[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT; in set_vertices() 119 ve[1].vertex_buffer_index = 0; in set_vertices() 122 ve[2].src_offset = 0; in set_vertices() 123 ve[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT; in set_vertices() [all …]
|