Home
last modified time | relevance | path

Searched refs:v16i8 (Results 1 – 25 of 117) sorted by relevance

12345

/external/llvm/test/CodeGen/PowerPC/
Dvaddsplat.ll10 %v16i8 = type <16 x i8>
56 define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
57 %p = load %v16i8* %P
58 …%r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16,…
59 store %v16i8 %r, %v16i8* %S
67 define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
68 %p = load %v16i8* %P
69 …%r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -…
70 store %v16i8 %r, %v16i8* %S
126 define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
[all …]
/external/llvm/lib/Target/PowerPC/
DPPCInstrAltivec.td18 // Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be
21 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>;
46 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
51 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
56 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
61 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
66 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
71 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
78 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
299 [(set v16i8:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-tbl.ll13 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %A, <16 x i8> %B)
27 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C)
41 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16…
55 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16…
60 declare <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
62 declare <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
64 declare <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwin…
66 declare <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i…
78 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C)
92 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16…
[all …]
Darm64-neon-across.ll15 declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>)
25 declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>)
31 declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>)
45 declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>)
51 declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>)
65 declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>)
71 declare i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8>)
119 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a)
144 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a)
205 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a)
[all …]
Darm64-copy-tuple.ll105 …%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8…
110 …tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2…
113 …tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2…
124 …%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16…
131 …tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2…
134 …tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2…
140 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>*)
141 declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i…
145 declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*)
146 declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
Darm64-simd-scalar-to-vector.ll13 %tmp = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) nounwind
22 declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>) nounwind readnone
Darm64-vecCmpBr.ll42 %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %0) #3
89 %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %0) #3
135 %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %0) #3
181 %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %0) #3
195 declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) #2
199 declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) #2
Darm64-vhadd.ll17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
71 %tmp3 = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
119 declare <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
123 declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
141 %tmp3 = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
195 %tmp3 = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
243 declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
247 declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
/external/llvm/test/CodeGen/ARM/
D2013-10-11-select-stalls.ll8 %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %foo, i32 1)
9 %vld2 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1)
16 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 )
Dvcnt.ll16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
21 declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone
51 %tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
75 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) nounwind readnone
107 %tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
131 declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) nounwind readnone
D2012-08-27-CopyPhysRegCrash.ll8 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32) nounwind readonly
10 declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) nounwind
22 %7 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* null, i32 1)
25 %10 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %9, i32 1)
27 %12 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %6, i32 1)
31 %16 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %15, i32 1)
114 tail call void @llvm.arm.neon.vst1.v16i8(i8* null, <16 x i8> %98, i32 1)
Dpopcnt.ll16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
75 declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone
109 %tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
133 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) nounwind readnone
165 %tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
189 declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) nounwind readnone
Dvabs.ll39 %tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1)
72 declare <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8>) nounwind readnone
105 %tmp2 = call <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8> %tmp1)
129 declare <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8>) nounwind readnone
Dvhadd.ll62 %tmp3 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
89 %tmp3 = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
119 declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
123 declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
186 %tmp3 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
213 %tmp3 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
243 declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
247 declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
/external/llvm/test/CodeGen/X86/
Dvshift-6.ll8 ; This happens for example when lowering a shift left of a MVT::v16i8 vector.
12 ; B = BITCAST MVT::v16i8, A
16 ; D = BITCAST MVT::v16i8, C
22 ; Where 'r' is a vector of type MVT::v16i8, and
/external/llvm/lib/Target/X86/
DX86TargetTransformInfo.cpp259 { ISD::SHL, MVT::v16i8, 1 }, // psllw. in getArithmeticInstrCost()
264 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. in getArithmeticInstrCost()
269 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. in getArithmeticInstrCost()
314 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. in getArithmeticInstrCost()
320 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. in getArithmeticInstrCost()
325 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. in getArithmeticInstrCost()
336 { ISD::SDIV, MVT::v16i8, 16*20 }, in getArithmeticInstrCost()
340 { ISD::UDIV, MVT::v16i8, 16*20 }, in getArithmeticInstrCost()
470 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} in getShuffleCost()
489 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or in getShuffleCost()
[all …]
/external/llvm/lib/Target/R600/
DSITypeRewriter.cpp37 Type *v16i8; member in __anon3638d9680111::SITypeRewriter
58 v16i8 = VectorType::get(Type::getInt8Ty(M.getContext()), 16); in doInitialization()
86 if (ElemTy == v16i8) { in visitLoadInst()
111 if (Arg->getType() == v16i8) { in visitCallInst()
/external/llvm/lib/Target/ARM/
DARMCallingConv.td28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
80 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
96 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
147 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
157 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
175 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
190 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
DARMTargetTransformInfo.cpp231 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, in getCastInstrCost()
232 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, in getCastInstrCost()
235 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, in getCastInstrCost()
462 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; in getShuffleCost()
488 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; in getShuffleCost()
543 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost()
544 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost()
545 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost()
546 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost()
/external/llvm/test/CodeGen/Thumb2/
Dv8_IT_1.ll9 %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1)
16 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 )
/external/llvm/include/llvm/CodeGen/
DMachineValueType.h69 v16i8 = 23, // 16 x i8 enumerator
211 return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 || in is128BitVector()
278 case v16i8: in getVectorElementType()
322 case v16i8: in getVectorNumElements()
409 case v16i8: in getSizeInBits()
522 if (NumElements == 16) return MVT::v16i8; in getVectorVT()
/external/llvm/lib/Target/AArch64/
DAArch64ISelDAGToDAG.cpp2205 else if (VT == MVT::v16i8) in Select()
2223 else if (VT == MVT::v16i8) in Select()
2241 else if (VT == MVT::v16i8) in Select()
2259 else if (VT == MVT::v16i8) in Select()
2277 else if (VT == MVT::v16i8) in Select()
2295 else if (VT == MVT::v16i8) in Select()
2313 else if (VT == MVT::v16i8) in Select()
2331 else if (VT == MVT::v16i8) in Select()
2349 else if (VT == MVT::v16i8) in Select()
2365 if (VT == MVT::v16i8 || VT == MVT::v8i8) in Select()
[all …]
DAArch64CallingConvention.td33 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
64 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
72 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
84 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
98 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
139 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
148 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
163 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
DAArch64InstrInfo.td1161 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
1219 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
1309 def : Pat <(v16i8 (scalar_to_vector (i32
1311 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
1361 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1523 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1834 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
1902 def : Pat<(store (v16i8 FPR128:$Rt),
1991 def : Pat<(store (v16i8 FPR128:$Rt),
2083 def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
[all …]
/external/llvm/test/CodeGen/Mips/msa/
Dbasic_operations.ll5 @v16i8 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, …
15 … 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8>*@v16i8
18 … 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>*@v16i8
21 …1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8
25 … 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8
29 … 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8
33 … 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, <16 x i8>*@v16i8
40 … 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8
184 store volatile <16 x i8> %16, <16 x i8>*@v16i8
261 %1 = load <16 x i8>* @v16i8
[all …]

12345