Home
last modified time | relevance | path

Searched refs:v4i16 (Results 1 – 25 of 368) sorted by relevance

12345678910>>...15

/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/AArch64/
Dvector-reduce.ll22 ; COST-LABEL: add.i16.v4i16
23 …st of 1 for instruction: %r = call i16 @llvm.experimental.vector.reduce.add.i16.v4i16(<4 x i16> %v)
24 ; CODE-LABEL: add.i16.v4i16
26 define i16 @add.i16.v4i16(<4 x i16> %v) {
27 %r = call i16 @llvm.experimental.vector.reduce.add.i16.v4i16(<4 x i16> %v)
67 ; COST-LABEL: umin.i16.v4i16
68 … of 58 for instruction: %r = call i16 @llvm.experimental.vector.reduce.umin.i16.v4i16(<4 x i16> %v)
69 ; CODE-LABEL: umin.i16.v4i16
71 define i16 @umin.i16.v4i16(<4 x i16> %v) {
72 %r = call i16 @llvm.experimental.vector.reduce.umin.i16.v4i16(<4 x i16> %v)
[all …]
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/AArch64/
DAArch64GenDAGISel.inc533 /* 1014*/ OPC_CheckChild0Type, MVT::v4i16,
555 …// Src: (st (vector_extract:{ *:[i32] } VecListOne64:{ *:[v4i16] }:$Vt, (imm:{ *:[i64] })<<P:Predi…
556 …Dst: (ST1i16 (SUBREG_TO_REG:{ *:[f128] } 0:{ *:[i32] }, VecListOne64:{ *:[v4i16] }:$Vt, dsub:{ *:[…
1110 /* 2233*/ OPC_CheckChild1Type, MVT::v4i16,
1120 …// Src: (st FPR64:{ *:[v4i16] }:$Rt, (ro_Windexed64:{ *:[iPTR] } GPR64sp:{ *:[i64] }:$Rn, GPR32:{ …
1121 …// Dst: (STRDroW FPR64:{ *:[v4i16] }:$Rt, GPR64sp:{ *:[i64] }:$Rn, GPR32:{ *:[i32] }:$Rm, ro_Wexte…
1127 …// Src: (st FPR64:{ *:[v4i16] }:$Rt, (ro_Xindexed64:{ *:[iPTR] } GPR64sp:{ *:[i64] }:$Rn, GPR64:{ …
1128 …// Dst: (STRDroX FPR64:{ *:[v4i16] }:$Rt, GPR64sp:{ *:[i64] }:$Rn, GPR64:{ *:[i64] }:$Rm, ro_Xexte…
1145 …// Src: (st FPR64:{ *:[v4i16] }:$Rt, (am_indexed7s32:{ *:[iPTR] } GPR64sp:{ *:[i64] }:$Rn, simm7s4…
1146 …:[i32] } FPR64:{ *:[v4i16] }:$Rt, ssub:{ *:[i32] }), (CPYi32:{ *:[i32] } (SUBREG_TO_REG:{ *:[f128]…
[all …]
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/ARM/
DARMGenDAGISel.inc5116 /* 10419*/ OPC_CheckChild1Type, MVT::v4i16,
5118 /* 10422*/ OPC_CheckChild2Type, MVT::v4i16,
5127 …zext:{ *:[v4i32] } (intrinsic_wo_chain:{ *:[v4i16] } 1016:{ *:[iPTR] }, DPR:{ *:[v4i16] }:$Vn, DPR…
5128 …: (VABALsv4i32:{ *:[v4i32] } QPR:{ *:[v4i32] }:$src1, DPR:{ *:[v4i16] }:$Vn, DPR:{ *:[v4i16] }:$Vm)
5162 /* 10513*/ OPC_CheckChild1Type, MVT::v4i16,
5164 /* 10516*/ OPC_CheckChild2Type, MVT::v4i16,
5173 …zext:{ *:[v4i32] } (intrinsic_wo_chain:{ *:[v4i16] } 1017:{ *:[iPTR] }, DPR:{ *:[v4i16] }:$Vn, DPR…
5174 …: (VABALuv4i32:{ *:[v4i32] } QPR:{ *:[v4i32] }:$src1, DPR:{ *:[v4i16] }:$Vn, DPR:{ *:[v4i16] }:$Vm)
5216 /* 10621*/ OPC_CheckChild1Type, MVT::v4i16,
5218 /* 10624*/ OPC_CheckChild2Type, MVT::v4i16,
[all …]
/external/llvm/lib/Target/Hexagon/
DHexagonInstrInfoVector.td20 def V4I16: PatLeaf<(v4i16 DoubleRegs:$R)>;
42 defm : bitconvert_64<v4i16, i64>;
55 [(set (v4i16 DoubleRegs:$dst),
56 (Op (v4i16 DoubleRegs:$src1), u4ImmPred:$src2))]> {
93 def: Pat<(v4i16 (HexagonVSPLATH I32:$Rs)), (S2_vsplatrh I32:$Rs)>;
131 def: Pat<(v4i16 (sra V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
133 def: Pat<(v4i16 (srl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
135 def: Pat<(v4i16 (shl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
142 [SDTCisSameAs<0, 1>, SDTCisVT<0, v4i16>, SDTCisInt<2>]>;
153 def: Pat<(v4i16 (HexagonVSRAH V4I16:$Rs, u4ImmPred:$u4)),
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
Dvqshrn.ll15 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -1…
39 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -1…
63 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
76 declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
80 declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
84 declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
99 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
123 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
147 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 …
160 declare <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
[all …]
Dvpadd.ll17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
40 declare <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
49 %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1)
57 %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
73 %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %tmp1)
81 %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
155 declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone
156 declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) nounwind readnone
159 declare <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8>) nounwind readnone
160 declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone
Dvpadal.ll8 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
35 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
44 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
111 declare <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
112 declare <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
115 declare <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
116 declare <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
Dvpminmax.ll17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
44 %tmp3 = call <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
67 declare <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
71 declare <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
90 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
117 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
140 declare <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
144 declare <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
Dvqshrn.ll15 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -1…
39 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -1…
63 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
76 declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
80 declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
84 declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
99 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
123 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
147 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 …
160 declare <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
[all …]
Dneon-v8.1a.ll6 declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>)
11 declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>)
16 declare <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16>, <4 x i16>)
23 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %mhs, <4 x i16> %rhs)
24 %retval = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %acc, <4 x i16> %prod)
55 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %mhs, <4 x i16> %rhs)
56 %retval = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %acc, <4 x i16> %prod)
92 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %x, <4 x i16> %shuffle)
93 %retval = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %acc, <4 x i16> %prod)
132 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %x, <4 x i16> %shuffle)
[all …]
Dvpadal.ll8 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
35 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
44 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
111 declare <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
112 declare <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
115 declare <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
116 declare <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
Dvpminmax.ll17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
44 %tmp3 = call <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
67 declare <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
71 declare <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
90 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
117 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
140 declare <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
144 declare <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
/external/llvm/test/CodeGen/ARM/
Dvqshrn.ll15 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -1…
39 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -1…
63 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
76 declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
80 declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
84 declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
99 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
123 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -…
147 …%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 …
160 declare <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
[all …]
Dneon-v8.1a.ll6 declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>)
11 declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>)
16 declare <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16>, <4 x i16>)
23 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %mhs, <4 x i16> %rhs)
24 %retval = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %acc, <4 x i16> %prod)
55 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %mhs, <4 x i16> %rhs)
56 %retval = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %acc, <4 x i16> %prod)
92 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %x, <4 x i16> %shuffle)
93 %retval = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %acc, <4 x i16> %prod)
132 %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %x, <4 x i16> %shuffle)
[all …]
Dvpadd.ll17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
40 declare <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
49 %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1)
57 %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
73 %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %tmp1)
81 %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
166 declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone
167 declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) nounwind readnone
170 declare <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8>) nounwind readnone
171 declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone
Dvpadal.ll8 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
17 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
35 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
44 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
111 declare <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
112 declare <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
115 declare <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
116 declare <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
Dvpminmax.ll17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
44 %tmp3 = call <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
67 declare <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
71 declare <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
90 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
117 %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
140 declare <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
144 declare <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
/external/swiftshader/third_party/LLVM/lib/Target/ARM/
DARMCallingConv.td31 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
49 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
63 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
75 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
91 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
138 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
148 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
163 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
175 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
/external/clang/test/CodeGen/
Dppc64-vector.c5 typedef short v4i16 __attribute__((vector_size (8))); typedef
25 v4i16 test_v4i16(v4i16 x) in test_v4i16()
/external/llvm/lib/Target/ARM/
DARMTargetTransformInfo.cpp105 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, in getCastInstrCost()
106 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, in getCastInstrCost()
110 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, in getCastInstrCost()
113 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, in getCastInstrCost()
114 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, in getCastInstrCost()
142 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost()
143 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost()
157 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, in getCastInstrCost()
158 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, in getCastInstrCost()
396 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, in getShuffleCost()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/ARM/
DARMTargetTransformInfo.cpp166 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, in getCastInstrCost()
167 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, in getCastInstrCost()
171 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, in getCastInstrCost()
174 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, in getCastInstrCost()
175 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, in getCastInstrCost()
203 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost()
204 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost()
218 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, in getCastInstrCost()
219 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, in getCastInstrCost()
441 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, in getShuffleCost()
[all …]
/external/llvm/test/Bitcode/
Darm32_neon_vcnt_upgrade.ll8 %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1)
9 ;CHECK: {{call.*@llvm.ctlz.v4i16\(<4 x i16>.*, i1 false}}
21 declare <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16>) nounwind readnone
/external/swiftshader/third_party/llvm-7.0/llvm/test/Bitcode/
Darm32_neon_vcnt_upgrade.ll8 %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1)
9 ;CHECK: {{call.*@llvm.ctlz.v4i16\(<4 x i16>.*, i1 false}}
21 declare <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16>) nounwind readnone
/external/llvm/test/CodeGen/AArch64/
Darm64-neon-across.ll17 declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
33 declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>)
37 declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>)
53 declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>)
57 declare i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16>)
73 declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>)
77 declare i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16>)
94 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a)
111 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a)
178 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Darm64-neon-across.ll17 declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
33 declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>)
37 declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>)
53 declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>)
57 declare i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16>)
73 declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>)
77 declare i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16>)
94 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a)
111 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a)
178 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a)
[all …]

12345678910>>...15