/external/llvm/test/CodeGen/CellSPU/useful-harnesses/ |
D | vecoperations.c | 3 typedef unsigned char v16i8 __attribute__((ext_vector_type(16))); typedef 10 void print_v16i8(const char *str, const v16i8 v) { in print_v16i8() 13 v16i8 vec; in print_v16i8() 24 void print_v16i8_hex(const char *str, const v16i8 v) { in print_v16i8_hex() 27 v16i8 vec; in print_v16i8_hex() 68 v16i8 v16i8_mpy(v16i8 v1, v16i8 v2) { in v16i8_mpy() 72 v16i8 v16i8_add(v16i8 v1, v16i8 v2) { in v16i8_add() 137 v16i8 v00 = { 0xf4, 0xad, 0x01, 0xe9, 0x51, 0x78, 0xc1, 0x8a, in main() 139 v16i8 va0 = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, in main() 141 v16i8 va1 = { 0x11, 0x83, 0x4b, 0x63, 0xff, 0x90, 0x32, 0xe5, in main()
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 18 // Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be 21 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>; 42 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 46 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 50 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 54 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 58 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 62 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 68 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 297 (vsldoi_shuffle:$SH (v16i8 VRRC:$vA), VRRC:$vB))]>; [all …]
|
D | PPCCallingConv.td | 32 CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToReg<[V2]>> 50 CCIfType<[v16i8, v8i16, v4i32, v4f32], 59 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 94 CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToStack<16, 16>> 108 CCIfType<[v16i8, v8i16, v4i32, v4f32],
|
/external/llvm/lib/Target/CellSPU/ |
D | CellSDKIntrinsics.td | 124 [(set (v16i8 VECREG:$rT), 125 (int_spu_si_andbi (v16i8 VECREG:$rA), immU8:$val))]>; 154 [(set (v16i8 VECREG:$rT), 155 (int_spu_si_orbi (v16i8 VECREG:$rA), immU8:$val))]>; 178 [(set (v16i8 VECREG:$rT), (int_spu_si_xorbi (v16i8 VECREG:$rA), immU8:$val))]>; 248 [(set (v16i8 VECREG:$rT), 249 (int_spu_si_ceqb (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>; 254 [(set (v16i8 VECREG:$rT), (int_spu_si_ceqbi (v16i8 VECREG:$rA), immU8:$val))]>; 294 [(set (v16i8 VECREG:$rT), 295 (int_spu_si_cgtb (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>; [all …]
|
D | SPUInstrInfo.td | 60 def v16i8: LoadDFormVec<v16i8>; 92 def v16i8: LoadAFormVec<v16i8>; 124 def v16i8: LoadXFormVec<v16i8>; 172 def v16i8: StoreDFormVec<v16i8>; 202 def v16i8: StoreAFormVec<v16i8>; 234 def v16i8: StoreXFormVec<v16i8>; 267 [(set (v16i8 VECREG:$rT), (SPUshufmask dform2_addr:$src))]>; 271 [(set (v16i8 VECREG:$rT), (SPUshufmask xform_addr:$src))]>; 469 def v16i8: FSMBIVec<v16i8>; 492 def v16i8_r16: FSMBRegInst<R16C, v16i8>; [all …]
|
D | SPUCallingConv.td | 24 CCIfType<[i8,i16,i32,i64,i128,f32,f64,v16i8,v8i16,v4i32,v2i64,v4f32,v2f64], 41 v16i8, v8i16, v4i32, v4f32, v2i64, v2f64], 55 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
D | SPUMathInstr.td | 14 // v16i8 multiply instruction sequence: 17 def : Pat<(mul (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)),
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 32 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 50 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 64 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 76 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 120 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 130 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 145 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 157 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
D | ARMInstrNEON.td | 564 def VLD1LNq8Pseudo : VLD1QLNPseudo<v16i8, extloadi8>; 838 def VLD1DUPq8Pseudo : VLD1QDUPPseudo<v16i8, extloadi8>; 1414 def VST1LNq8Pseudo : VST1QLNPseudo<v16i8, truncstorei8, NEONvgetlaneu>; 1454 def VST1LNq8Pseudo_UPD : VST1QLNWBPseudo<v16i8, post_truncsti8, NEONvgetlaneu>; 2434 def v16i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 1, op4, 2437 [(set QPR:$Vd, (v16i8 (OpNode (v16i8 QPR:$Vm))))]>; 2470 def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4, 2471 itinQ, OpcodeStr, !strconcat(Dt, "8"), v16i8,v16i8,IntOp>; 2547 def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, itinQ16, 2549 v16i8, v16i8, OpNode, Commutable>; [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vcnt.ll | 15 %tmp2 = call <16 x i8> @llvm.arm.neon.vcnt.v16i8(<16 x i8> %tmp1) 20 declare <16 x i8> @llvm.arm.neon.vcnt.v16i8(<16 x i8>) nounwind readnone 50 %tmp2 = call <16 x i8> @llvm.arm.neon.vclz.v16i8(<16 x i8> %tmp1) 74 declare <16 x i8> @llvm.arm.neon.vclz.v16i8(<16 x i8>) nounwind readnone 106 %tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1) 130 declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) nounwind readnone
|
D | vhadd.ll | 62 %tmp3 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 89 %tmp3 = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 119 declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 123 declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 186 %tmp3 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 213 %tmp3 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 243 declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 247 declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
D | vabs.ll | 39 %tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1) 72 declare <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8>) nounwind readnone 105 %tmp2 = call <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8> %tmp1) 129 declare <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8>) nounwind readnone
|
D | vhsub.ll | 62 %tmp3 = call <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 89 %tmp3 = call <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 119 declare <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 123 declare <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
D | vminmax.ll | 71 %tmp3 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 98 %tmp3 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 139 declare <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 143 declare <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 217 %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 244 %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 285 declare <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 289 declare <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
D | vpadal.ll | 62 %tmp3 = call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2) 89 %tmp3 = call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2) 119 declare <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone 123 declare <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone
|
D | vqshl.ll | 80 %tmp3 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 116 %tmp3 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 247 …%tmp2 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7… 279 …%tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7… 311 …%tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 … 354 declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 359 declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 364 declare <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 446 %tmp3 = call <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 482 %tmp3 = call <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) [all …]
|
D | vpadd.ll | 97 %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1) 121 %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1) 163 declare <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8>) nounwind readnone 167 declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) nounwind readnone
|
D | vshl.ll | 80 %tmp3 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 116 %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 186 …%tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7,… 284 …%tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 … 316 …%tmp2 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 … 354 declare <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 359 declare <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 441 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 477 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 576 …%tmp2 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8… [all …]
|
D | vqsub.ll | 80 %tmp3 = call <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 116 %tmp3 = call <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 157 declare <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 162 declare <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
D | vqadd.ll | 80 %tmp3 = call <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 116 %tmp3 = call <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 157 declare <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 162 declare <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
D | vld1.ll | 72 %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8) 81 %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8) 126 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32) nounwind readonly
|
D | vabd.ll | 71 %tmp3 = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 98 %tmp3 = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 139 declare <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 143 declare <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
/external/llvm/include/llvm/CodeGen/ |
D | ValueTypes.h | 57 v16i8 = 15, // 16 x i8 enumerator 193 case v16i8: in getVectorElementType() 219 case v16i8: in getVectorNumElements() 272 case v16i8: in getSizeInBits() 342 if (NumElements == 16) return MVT::v16i8; in getVectorVT() 436 case 16: return MVT::v16i8; in getIntVectorWithNumElements() 481 return (V==MVT::v16i8 || V==MVT::v8i16 || V==MVT::v4i32 || in is128BitVector()
|
/external/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 40 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 157 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 175 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 194 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, 213 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 235 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 273 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 282 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
|
/external/llvm/test/CodeGen/X86/ |
D | widen_cast-4.ll | 11 ; v8i8 that is widen to v16i8 then split 12 ; FIXME: This is widen to v16i8 and split to 16 and we then rebuild the vector.
|