/external/llvm/test/Analysis/BasicAA/ |
D | intrinsics.ll | 10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 16 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 25 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 26 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 31 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 32 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 33 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind [all …]
|
/external/llvm/lib/Target/CellSPU/ |
D | CellSDKIntrinsics.td | 43 [(set (v4i32 VECREG:$rT), (int_spu_si_mpy (v8i16 VECREG:$rA), 44 (v8i16 VECREG:$rB)))]>; 49 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyu (v8i16 VECREG:$rA), 50 (v8i16 VECREG:$rB)))] >; 55 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyi (v8i16 VECREG:$rA), 61 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyui (v8i16 VECREG:$rA), 67 [(set (v4i32 VECREG:$rT), (int_spu_si_mpya (v8i16 VECREG:$rA), 68 (v8i16 VECREG:$rB), 69 (v8i16 VECREG:$rC)))]>; 75 (v8i16 VECREG:$rB)))]>; [all …]
|
D | SPUInstrInfo.td | 61 def v8i16: LoadDFormVec<v8i16>; 93 def v8i16: LoadAFormVec<v8i16>; 125 def v8i16: LoadXFormVec<v8i16>; 173 def v8i16: StoreDFormVec<v8i16>; 203 def v8i16: StoreAFormVec<v8i16>; 235 def v8i16: StoreXFormVec<v8i16>; 275 [(set (v8i16 VECREG:$rT), (SPUshufmask dform2_addr:$src))]>; 279 [(set (v8i16 VECREG:$rT), (SPUshufmask xform_addr:$src))]>; 320 [(set (v8i16 VECREG:$rT), (v8i16 v8i16SExt16Imm:$val))]>; 470 def v8i16: FSMBIVec<v8i16>; [all …]
|
D | SPUCallingConv.td | 20 CCIfType<[i8,i16,i32,i64,i128,f32,f64,v16i8,v8i16,v4i32,v2i64,v4f32,v2f64], 37 v16i8, v8i16, v4i32, v4f32, v2i64, v2f64], 51 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
D | SPUInstrFormats.td | 256 // RI10 Format for v8i16 intrinsics 261 [(set (v8i16 VECREG:$rT), (IntID (v8i16 VECREG:$rA), 271 // RR Format for v8i16 intrinsics 276 [(set (v8i16 VECREG:$rT), (IntID (v8i16 VECREG:$rA), 277 (v8i16 VECREG:$rB)))] >;
|
D | SPUMathInstr.td | 33 // v8i16 multiply instruction sequence: 36 def : Pat<(mul (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)),
|
/external/llvm/test/Analysis/TypeBasedAliasAnalysis/ |
D | intrinsics.ll | 10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind, !tbaa !2 16 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16), !tbaa !1 17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind, !tbaa !2 22 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly 23 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
|
/external/llvm/test/CodeGen/ARM/ |
D | 2012-08-27-CopyPhysRegCrash.ll | 43 %28 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %26) nounwind 46 %31 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> undef, <8 x i16> %30) nounwind 57 %42 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %36) nounwind 58 %43 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %41) nounwind 62 %47 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %31, <8 x i16> %46) nounwind 80 %65 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %60) nounwind 87 %72 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> undef) nounwind 88 %73 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %71) nounwind 96 %81 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %80) nounwind 99 %84 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %76, <8 x i16> %83) nounwind [all …]
|
D | vpadal.ll | 62 %tmp3 = call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2) 71 %tmp3 = call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2) 89 %tmp3 = call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2) 98 %tmp3 = call <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2) 119 declare <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone 120 declare <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone 123 declare <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone 124 declare <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone
|
D | vpadd.ll | 97 %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1) 105 %tmp2 = call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %tmp1) 121 %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1) 129 %tmp2 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %tmp1) 163 declare <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8>) nounwind readnone 164 declare <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16>) nounwind readnone 167 declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) nounwind readnone 168 declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) nounwind readnone
|
D | vshll.ll | 7 …%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, … 31 …%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, … 57 …%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 8, i8 8, i8 8, … 77 declare <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone 81 declare <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
|
D | 2011-08-12-vmovqqqq-pseudo.ll | 7 …%vld3_lane = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8* undef, <8 … 12 declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>…
|
D | vhadd.ll | 71 %tmp3 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 98 %tmp3 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 120 declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 124 declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 195 %tmp3 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 222 %tmp3 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 244 declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 248 declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
D | vabs.ll | 47 %tmp2 = call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %tmp1) 73 declare <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16>) nounwind readnone 113 %tmp2 = call <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16> %tmp1) 130 declare <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16>) nounwind readnone
|
D | vcnt.ll | 59 %tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0) 76 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone 115 %tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1) 132 declare <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16>) nounwind readnone
|
D | vhsub.ll | 71 %tmp3 = call <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 98 %tmp3 = call <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 120 declare <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 124 declare <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
D | vminmax.ll | 80 %tmp3 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 107 %tmp3 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 140 declare <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 144 declare <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 226 %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 253 %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 286 declare <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 290 declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
D | reg_sequence.ll | 27 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1] 41 tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17, i32 1) 61 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1] 64 %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7, i32 1) ; <<8 x i16>> [#uses=1] 68 tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9, i32 1) 71 tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10, i32 1) 149 …%tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, … 327 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly 333 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind 344 declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i3…
|
D | vmul.ll | 166 %tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) 226 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) 275 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) 359 declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone 363 declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone 367 declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone 462 tail call void @llvm.arm.neon.vst1.v8i16(i8* %14, <8 x i16> %13, i32 2) 468 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind 566 …%vqsub2.i216 = tail call <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16> <i16 256, i16 256, i16 25… 581 …%vrshr_n160 = tail call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i188, <8 x i16> <i1… [all …]
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 73 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 89 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 140 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 150 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 168 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 180 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
D | ARMInstrNEON.td | 1025 def VLD1LNq16Pseudo : VLD1QLNPseudo<v8i16, extloadi16>; 1320 def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16", v8i16, extloadi16>; 1993 def VST1LNq16Pseudo : VST1QLNPseudo<v8i16, truncstorei16, NEONvgetlaneu>; 2035 def VST1LNq16Pseudo_UPD : VST1QLNWBPseudo<v8i16, post_truncsti16,NEONvgetlaneu>; 3068 def v8i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 1, op4, 3071 [(set QPR:$Vd, (v8i16 (OpNode (v8i16 QPR:$Vm))))]>; 3102 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4, 3103 itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>; 3117 v8i8, v8i16, OpNode>; 3134 v8i8, v8i16, IntOp>; [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 305 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>; 420 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>; 486 [(set VRRC:$vD, (v8i16 vecspltish:$SIMM))]>; 548 def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>; 549 def VCMPEQUHo : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>; 550 def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>; 551 def VCMPGTSHo : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>; 552 def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>; 553 def VCMPGTUHo : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>; 604 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>; [all …]
|
D | PPCCallingConv.td | 28 CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToReg<[V2]>> 46 CCIfType<[v16i8, v8i16, v4i32, v4f32], 55 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 90 CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToStack<16, 16>> 104 CCIfType<[v16i8, v8i16, v4i32, v4f32],
|
/external/llvm/test/CodeGen/CellSPU/useful-harnesses/ |
D | vecoperations.c | 4 typedef short v8i16 __attribute__((ext_vector_type(16))); typedef 38 void print_v8i16_hex(const char *str, v8i16 v) { in print_v8i16_hex() 41 v8i16 vec; in print_v8i16_hex() 143 v8i16 v01 = { 0x1a87, 0x0a14, 0x5014, 0xfff0, in main()
|
/external/llvm/include/llvm/CodeGen/ |
D | ValueTypes.h | 66 v8i16 = 20, // 8 x i16 enumerator 187 return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 || in is128BitVector() 243 case v8i16: in getVectorElementType() 273 case v8i16: in getVectorNumElements() 327 case v8i16: in getSizeInBits() 406 if (NumElements == 8) return MVT::v8i16; in getVectorVT()
|