/external/llvm/test/CodeGen/Hexagon/vect/ |
D | vect-bad-bitcast.ll | 8 …i16] [i16 0, i16 0, i16 0, i16 1280, i16 2560, i16 4864, i16 7168, i16 9472, i16 11776, i16 12672,… 13 %WaterLeveldB_out = alloca i16, align 2 18 …@fxpBitAllocation to i32 (i32, i32, i32, i32, i16*, i32, i32, i32)*)(i32 0, i32 0, i32 256, i32 %c… 27 %WaterLeveldB.1p_vsel.lcssa = phi <4 x i16> [ %WaterLeveldB.1p_vsel, %polly.stmt.for.body ] 28 …%_low_half = shufflevector <4 x i16> %WaterLeveldB.1p_vsel.lcssa, <4 x i16> undef, <2 x i32> <i32 … 29 …%_high_half = shufflevector <4 x i16> %WaterLeveldB.1p_vsel.lcssa, <4 x i16> undef, <2 x i32> <i32… 30 %0 = icmp sgt <2 x i16> %_low_half, %_high_half 31 %1 = select <2 x i1> %0, <2 x i16> %_low_half, <2 x i16> %_high_half 32 %2 = extractelement <2 x i16> %1, i32 0 33 %3 = extractelement <2 x i16> %1, i32 1 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/CellSPU/ |
D | icmp16.ll | 13 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-… 28 ; i16 integer comparisons: 29 define i16 @icmp_eq_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { 31 %A = icmp eq i16 %arg1, %arg2 32 %B = select i1 %A, i16 %val1, i16 %val2 33 ret i16 %B 36 define i1 @icmp_eq_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { 38 %A = icmp eq i16 %arg1, %arg2 42 define i16 @icmp_eq_immed01_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { 44 %A = icmp eq i16 %arg1, 511 [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | vec-const-08.ll | 6 define <8 x i16> @f1() { 10 ret <8 x i16> <i16 257, i16 257, i16 257, i16 257, 11 i16 257, i16 257, i16 257, i16 257> 15 define <8 x i16> @f2() { 19 ret <8 x i16> <i16 51657, i16 51657, i16 51657, i16 51657, 20 i16 51657, i16 51657, i16 51657, i16 51657> 24 define <8 x i16> @f3() { 28 ret <8 x i16> <i16 -258, i16 -258, i16 -258, i16 -258, 29 i16 -258, i16 -258, i16 -258, i16 -258> 33 define <8 x i16> @f4() { [all …]
|
D | vec-const-14.ll | 7 define <8 x i16> @f1() { 11 ret <8 x i16> <i16 0, i16 32768, i16 0, i16 32768, 12 i16 0, i16 32768, i16 0, i16 32768> 16 define <8 x i16> @f2() { 20 ret <8 x i16> <i16 1, i16 -1, i16 1, i16 -1, 21 i16 1, i16 -1, i16 1, i16 -1> 25 define <8 x i16> @f3() { 29 ret <8 x i16> <i16 -2, i16 0, i16 -2, i16 0, 30 i16 -2, i16 0, i16 -2, i16 0> 34 define <8 x i16> @f4() { [all …]
|
D | vec-abs-02.ll | 6 define <8 x i16> @f1(<8 x i16> %val) { 10 %cmp = icmp slt <8 x i16> %val, zeroinitializer 11 %neg = sub <8 x i16> zeroinitializer, %val 12 %ret = select <8 x i1> %cmp, <8 x i16> %neg, <8 x i16> %val 13 ret <8 x i16> %ret 17 define <8 x i16> @f2(<8 x i16> %val) { 21 %cmp = icmp sle <8 x i16> %val, zeroinitializer 22 %neg = sub <8 x i16> zeroinitializer, %val 23 %ret = select <8 x i1> %cmp, <8 x i16> %neg, <8 x i16> %val 24 ret <8 x i16> %ret [all …]
|
D | vec-cmp-02.ll | 6 define <8 x i16> @f1(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) { 10 %cmp = icmp eq <8 x i16> %val1, %val2 11 %ret = sext <8 x i1> %cmp to <8 x i16> 12 ret <8 x i16> %ret 16 define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) { 21 %cmp = icmp ne <8 x i16> %val1, %val2 22 %ret = sext <8 x i1> %cmp to <8 x i16> 23 ret <8 x i16> %ret 27 define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) { 31 %cmp = icmp sgt <8 x i16> %val1, %val2 [all …]
|
/external/llvm/test/CodeGen/MSP430/ |
D | mult-alt-generic-msp430.ll | 3 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16" 6 @mout0 = common global i16 0, align 2 7 @min1 = common global i16 0, align 2 8 @marray = common global [2 x i16] zeroinitializer, align 2 12 call void asm "foo $1,$0", "=*m,*m"(i16* @mout0, i16* @min1) nounwind 18 %out0 = alloca i16, align 2 19 %index = alloca i16, align 2 20 store i16 0, i16* %out0, align 2 21 store i16 1, i16* %index, align 2 32 %out0 = alloca i16, align 2 [all …]
|
D | postinc.ll | 2 target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8" 5 define zeroext i16 @add(i16* nocapture %a, i16 zeroext %n) nounwind readonly { 7 %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1] 11 %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2] 12 %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1] 13 %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1] 16 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 17 %add = add i16 %tmp4, %sum.09 ; <i16> [#uses=2] 18 %inc = add i16 %i.010, 1 ; <i16> [#uses=2] 19 %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1] [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/MSP430/ |
D | mult-alt-generic-msp430.ll | 3 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16" 6 @mout0 = common global i16 0, align 2 7 @min1 = common global i16 0, align 2 8 @marray = common global [2 x i16] zeroinitializer, align 2 12 call void asm "foo $1,$0", "=*m,*m"(i16* @mout0, i16* @min1) nounwind 18 %out0 = alloca i16, align 2 19 %index = alloca i16, align 2 20 store i16 0, i16* %out0, align 2 21 store i16 1, i16* %index, align 2 32 %out0 = alloca i16, align 2 [all …]
|
/external/llvm/test/CodeGen/Mips/msa/ |
D | 3rf_4rf_q.ll | 7 @llvm_mips_madd_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7… 8 @llvm_mips_madd_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, … 9 @llvm_mips_madd_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22… 10 @llvm_mips_madd_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0… 14 %0 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG1 15 %1 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG2 16 %2 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG3 17 %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) 18 store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES 22 declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | byval4.ll | 27 %struct.s = type { i16, i16, i16, i16, i16, i16, i16, i16, 28 i16, i16, i16, i16, i16, i16, i16, i16, 29 i16, i16, i16, i16, i16, i16, i16, i16, 30 i16, i16, i16, i16, i16, i16, i16, i16, 31 i16, i16, i16, i16, i16, i16, i16, i16, 32 i16, i16, i16, i16, i16, i16, i16, i16, 33 i16, i16, i16, i16, i16, i16, i16, i16, 34 i16, i16, i16, i16, i16, i16, i16, i16, 35 i16 } 38 define void @g(i16 signext %a1, i16 signext %a2, i16 signext %a3, [all …]
|
D | pic-load-remat.ll | 8 …3 = tail call <8 x i16> @llvm.x86.sse2.psubs.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializ… 9 …3 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializ… 10 …<8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 3, … 11 …%tmp4651 = add <8 x i16> %tmp4609, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 > ; <… 12 …ll <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp4651, <8 x i16> bitcast (<4 x i32> < i32 4, i32 … 13 …i16> @llvm.x86.sse2.pavg.w( <8 x i16> < i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170… 14 …%tmp4679 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4669, <8 x i16> %tmp4669 ) no… 15 %tmp4689 = add <8 x i16> %tmp4679, %tmp4658 ; <<8 x i16>> [#uses=1] 16 …4700 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4689, <8 x i16> zeroinitializer )… 17 %tmp4708 = bitcast <8 x i16> %tmp4700 to <2 x i64> ; <<2 x i64>> [#uses=1] [all …]
|
D | rot16.ll | 3 define i16 @foo(i16 %x, i16 %y, i16 %z) nounwind readnone { 7 %0 = shl i16 %x, %z 8 %1 = sub i16 16, %z 9 %2 = lshr i16 %x, %1 10 %3 = or i16 %2, %0 11 ret i16 %3 14 define i16 @bar(i16 %x, i16 %y, i16 %z) nounwind readnone { 18 %0 = shl i16 %y, %z 19 %1 = sub i16 16, %z 20 %2 = lshr i16 %x, %1 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | pic-load-remat.ll | 8 …3 = tail call <8 x i16> @llvm.x86.sse2.psubs.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializ… 9 …3 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializ… 10 …<8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 3, … 11 …%tmp4651 = add <8 x i16> %tmp4609, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 > ; <… 12 …ll <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp4651, <8 x i16> bitcast (<4 x i32> < i32 4, i32 … 13 …i16> @llvm.x86.sse2.pavg.w( <8 x i16> < i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170… 14 …%tmp4679 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4669, <8 x i16> %tmp4669 ) no… 15 %tmp4689 = add <8 x i16> %tmp4679, %tmp4658 ; <<8 x i16>> [#uses=1] 16 …4700 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4689, <8 x i16> zeroinitializer )… 17 %tmp4708 = bitcast <8 x i16> %tmp4700 to <2 x i64> ; <<2 x i64>> [#uses=1] [all …]
|
D | byval4.ll | 27 %struct.s = type { i16, i16, i16, i16, i16, i16, i16, i16, 28 i16, i16, i16, i16, i16, i16, i16, i16, 29 i16, i16, i16, i16, i16, i16, i16, i16, 30 i16, i16, i16, i16, i16, i16, i16, i16, 31 i16, i16, i16, i16, i16, i16, i16, i16, 32 i16, i16, i16, i16, i16, i16, i16, i16, 33 i16, i16, i16, i16, i16, i16, i16, i16, 34 i16, i16, i16, i16, i16, i16, i16, i16, 35 i16 } 38 define void @g(i16 signext %a1, i16 signext %a2, i16 signext %a3, [all …]
|
D | rot16.ll | 3 define i16 @foo(i16 %x, i16 %y, i16 %z) nounwind readnone { 7 %0 = shl i16 %x, %z 8 %1 = sub i16 16, %z 9 %2 = lshr i16 %x, %1 10 %3 = or i16 %2, %0 11 ret i16 %3 14 define i16 @bar(i16 %x, i16 %y, i16 %z) nounwind readnone { 18 %0 = shl i16 %y, %z 19 %1 = sub i16 16, %z 20 %2 = lshr i16 %x, %1 [all …]
|
/external/llvm/test/Transforms/BBVectorize/ |
D | vector-sel.ll | 5 @d = external global [1 x [10 x [1 x i16]]], align 16 8 ;CHECK: %0 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3… 9 ;CHECK: %1 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3… 10 ;CHECK: %2 = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4… 12 …CHECK: %4 = select <8 x i1> %3, <8 x i16> <i16 -3, i16 -3, i16 -3, i16 -3, i16 -3, i16 -3, i16 -3,… 20 …%0 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3, i16 -… 21 …%1 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3, i16 -… 22 %2 = select <4 x i1> %boolvec, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>, <4 x i16> %0 23 %3 = select <4 x i1> %boolvec, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>, <4 x i16> %1 24 %4 = add nsw <4 x i16> %2, zeroinitializer [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/AArch64/ |
D | gather-reduce.ll | 24 ; GENERIC: [[L:%[a-zA-Z0-9.]+]] = load <8 x i16> 25 ; GENERIC: zext <8 x i16> [[L]] to <8 x i32> 30 define i32 @gather_reduce_8x16_i32(i16* nocapture readonly %a, i16* nocapture readonly %b, i16* noc… 48 %a.addr.0101 = phi i16* [ %incdec.ptr58, %for.body ], [ %a, %for.body.preheader ] 49 %incdec.ptr = getelementptr inbounds i16, i16* %a.addr.0101, i64 1 50 %0 = load i16, i16* %a.addr.0101, align 2 51 %conv = zext i16 %0 to i32 52 %incdec.ptr1 = getelementptr inbounds i16, i16* %b, i64 1 53 %1 = load i16, i16* %b, align 2 54 %conv2 = zext i16 %1 to i32 [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 12 @src16 = common global [16 x i16] zeroinitializer, align 32 13 @dst16 = common global [16 x i16] zeroinitializer, align 32 17 declare i16 @llvm.bswap.i16(i16) 148 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>… 149 ; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> [[TMP1]]) 150 ; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), a… 153 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align… 154 …%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align… 155 …%ld2 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align… 156 …%ld3 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align… [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | aarch64-be-bv.ll | 3 @vec_v8i16 = global <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8> 6 define i16 @movi_modimm_t1() nounwind { 11 %in = load <8 x i16>, <8 x i16>* @vec_v8i16 12 %rv = add <8 x i16> %in, <i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0> 13 %el = extractelement <8 x i16> %rv, i32 0 14 ret i16 %el 18 define i16 @movi_modimm_t2() nounwind { 23 %in = load <8 x i16>, <8 x i16>* @vec_v8i16 24 %rv = add <8 x i16> %in, <i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0> 25 %el = extractelement <8 x i16> %rv, i32 0 [all …]
|
/external/llvm/test/Instrumentation/DataFlowSanitizer/ |
D | store.ll | 3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v… 21 ; NO_COMBINE_PTR_LABEL: load i16, i16* {{.*}} @__dfsan_arg_tls 25 ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16* 26 ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16* 27 ; NO_COMBINE_PTR_LABEL: store i16 31 ; COMBINE_PTR_LABEL: load i16, i16* 32 ; COMBINE_PTR_LABEL: load i16, i16* 33 ; COMBINE_PTR_LABEL: icmp ne i16 38 ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16* 39 ; COMBINE_PTR_LABEL: getelementptr i16, i16* [all …]
|
/external/llvm/test/CodeGen/BPF/ |
D | loops.ll | 3 define zeroext i16 @add(i16* nocapture %a, i16 zeroext %n) nounwind readonly { 5 %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1] 9 %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2] 10 %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1] 11 %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1] 14 %tmp4 = load i16, i16* %arrayidx ; <i16> [#uses=1] 15 %add = add i16 %tmp4, %sum.09 ; <i16> [#uses=2] 16 %inc = add i16 %i.010, 1 ; <i16> [#uses=2] 17 %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1] 21 %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1] [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | 2012-04-23-Neon-Intrinsics.ll | 3 define <4 x i32> @mulByZero(<4 x i16> %x) nounwind readnone ssp { 5 …%a = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> zeroinitializer) noun… 11 define <4 x i32> @mulByOne(<4 x i16> %x) nounwind readnone ssp { 13 … tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %x, <4 x i16> <i16 1, i16 1, i16 1, i16… 16 ; CHECK-NEXT: %a = sext <4 x i16> %x to <4 x i32> 22 … x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 … 30 … i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i1… 38 … i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i1… 44 define <4 x i32> @complex1(<4 x i16> %x) nounwind readnone ssp { 46 …= tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16… [all …]
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | 2008-08-04-LongAddRec.ll | 10 add i16 %x17.0, 1 ; <i16>:0 [#uses=2] 11 add i16 %0, %x16.0 ; <i16>:1 [#uses=2] 12 add i16 %1, %x15.0 ; <i16>:2 [#uses=2] 13 add i16 %2, %x14.0 ; <i16>:3 [#uses=2] 14 add i16 %3, %x13.0 ; <i16>:4 [#uses=2] 15 add i16 %4, %x12.0 ; <i16>:5 [#uses=2] 16 add i16 %5, %x11.0 ; <i16>:6 [#uses=2] 17 add i16 %6, %x10.0 ; <i16>:7 [#uses=2] 18 add i16 %7, %x9.0 ; <i16>:8 [#uses=2] 19 add i16 %8, %x8.0 ; <i16>:9 [#uses=2] [all …]
|
/external/swiftshader/third_party/LLVM/test/Analysis/ScalarEvolution/ |
D | 2008-08-04-LongAddRec.ll | 10 add i16 %x17.0, 1 ; <i16>:0 [#uses=2] 11 add i16 %0, %x16.0 ; <i16>:1 [#uses=2] 12 add i16 %1, %x15.0 ; <i16>:2 [#uses=2] 13 add i16 %2, %x14.0 ; <i16>:3 [#uses=2] 14 add i16 %3, %x13.0 ; <i16>:4 [#uses=2] 15 add i16 %4, %x12.0 ; <i16>:5 [#uses=2] 16 add i16 %5, %x11.0 ; <i16>:6 [#uses=2] 17 add i16 %6, %x10.0 ; <i16>:7 [#uses=2] 18 add i16 %7, %x9.0 ; <i16>:8 [#uses=2] 19 add i16 %8, %x8.0 ; <i16>:9 [#uses=2] [all …]
|