/external/llvm/test/CodeGen/X86/ |
D | narrow-shl-cst.ll | 5 %and = shl i32 %x, 10 6 %shl = and i32 %and, 31744 7 ret i32 %shl 14 %or = shl i32 %x, 10 15 %shl = or i32 %or, 31744 16 ret i32 %shl 23 %xor = shl i32 %x, 10 24 %shl = xor i32 %xor, 31744 25 ret i32 %shl 32 %and = shl i64 %x, 40 [all …]
|
D | dagcombine-shifts.ll | 3 ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) 5 ; Canolicalize the sequence shl/zext/lshr performing the zeroextend 16 %shl = shl i16 %ext, 4 17 ret i16 %shl 23 ; CHECK-NOT: shl 30 %shl = shl i32 %ext, 4 31 ret i32 %shl 37 ; CHECK-NOT: shl 44 %shl = shl i32 %ext, 4 45 ret i32 %shl [all …]
|
D | shift-bmi2.ll | 6 %shl = shl i32 %x, %shamt 13 ret i32 %shl 18 %shl = shl i32 %x, 5 25 ret i32 %shl 31 %shl = shl i32 %x, %shamt 39 ret i32 %shl 45 %shl = shl i32 %x, 5 52 ret i32 %shl 57 %shl = shl i64 %x, %shamt 61 ret i64 %shl [all …]
|
D | sse2-vector-shifts.ll | 7 %shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 8 ret <8 x i16> %shl 17 %shl = shl <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 18 ret <8 x i16> %shl 27 %shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 28 ret <8 x i16> %shl 37 %shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0> 38 ret <4 x i32> %shl 47 %shl = shl <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1> 48 ret <4 x i32> %shl [all …]
|
D | avx2-vector-shifts.ll | 7 …%shl = shl <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 … 8 ret <16 x i16> %shl 17 …%shl = shl <16 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 … 18 ret <16 x i16> %shl 27 …%shl = shl <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16… 28 ret <16 x i16> %shl 37 %shl = shl <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> 38 ret <8 x i32> %shl 47 %shl = shl <8 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 48 ret <8 x i32> %shl [all …]
|
D | shift-combine-crash.ll | 15 %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 1, i64 2> 16 ret <4 x i64> %shl 23 %shl = shl <4 x i64> %A, <i64 2, i64 3, i64 undef, i64 undef> 24 ret <4 x i64> %shl 28 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 3, i64 undef> 29 ret <4 x i64> %shl 33 %shl = shl <4 x i64> %A, <i64 undef, i64 2, i64 undef, i64 3> 34 ret <4 x i64> %shl 38 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 undef, i64 undef> 39 ret <4 x i64> %shl [all …]
|
D | rotate4.ll | 12 %shl = shl i32 %a, %and 16 %or = or i32 %shl, %shr 26 %shl = lshr i32 %a, %and 29 %shr = shl i32 %a, %and3 30 %or = or i32 %shl, %shr 40 %shl = shl i64 %a, %and 44 %or = or i64 %shl, %shr 54 %shl = lshr i64 %a, %and 57 %shr = shl i64 %a, %and3 58 %or = or i64 %shl, %shr [all …]
|
D | vshift-1.ll | 10 %shl = shl <2 x i64> %val, < i64 32, i64 32 > 11 store <2 x i64> %shl, <2 x i64>* %dst 22 %shl = shl <2 x i64> %val, %1 23 store <2 x i64> %shl, <2 x i64>* %dst 32 %shl = shl <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 > 33 store <4 x i32> %shl, <4 x i32>* %dst 46 %shl = shl <4 x i32> %val, %3 47 store <4 x i32> %shl, <4 x i32>* %dst 55 %shl = shl <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 56 store <8 x i16> %shl, <8 x i16>* %dst [all …]
|
D | vec_shift6.ll | 11 %shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11> 12 ret <8 x i16> %shl 20 %shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1> 21 ret <8 x i16> %shl 33 %shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3> 34 ret <4 x i32> %shl 44 %shl = shl <4 x i32> %a, <i32 0, i32 0, i32 1, i32 1> 45 ret <4 x i32> %shl 59 …%shl = shl <16 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, … 60 ret <16 x i16> %shl [all …]
|
D | vshift-4.ll | 11 %shl = shl <2 x i64> %val, %shamt 12 store <2 x i64> %shl, <2 x i64>* %dst 27 %shl = shl <2 x i64> %val, %shamt 28 store <2 x i64> %shl, <2 x i64>* %dst 37 %shl = shl <4 x i32> %val, %shamt 38 store <4 x i32> %shl, <4 x i32>* %dst 47 %shl = shl <4 x i32> %val, %shamt 48 store <4 x i32> %shl, <4 x i32>* %dst 57 %shl = shl <4 x i32> %val, %shamt 58 store <4 x i32> %shl, <4 x i32>* %dst [all …]
|
D | shl_undef.ll | 6 ; the successor shl(s) become shl undef, 1. This pattern then matches 7 ; shl x, 1 -> add x, x. add undef, undef doesn't guarantee the low 13 ; Use intel syntax, or "shl" might hit "pushl". 15 ; CHECK-NOT: shl 26 %tmp1506 = shl i32 %tmp1220, 1 33 %tmp1618 = shl i32 %tmp1676, 1 40 ; CHECK-NOT: shl 41 ; shl undef, 0 -> undef 44 %tmp2 = shl i32 undef, 0; 48 ; CHECK-NOT: shl [all …]
|
D | add_shl_constant.ll | 8 %shl = shl i32 %add.0, 3 9 %add.1 = add i32 %shl, %y 18 %shl = shl i32 %add.0, 3 19 %add.1 = add i32 %y, %shl 32 %shl = shl <4 x i32> %add.0, <i32 3, i32 3, i32 3, i32 3> 33 %add.1 = add <4 x i32> %shl, %y 46 %shl = shl <4 x i32> %add.0, <i32 3, i32 3, i32 3, i32 3> 47 %add.1 = add <4 x i32> %y, %shl
|
D | legalize-shift-64.ll | 6 %shl = shl i64 %conv, %sh_prom 7 ret i64 %shl 18 %shl = shl i64 %xx, %sh_prom 19 ret i64 %shl 59 %shl = shl <2 x i64> %A, %B 60 ret <2 x i64> %shl 62 ; CHECK: shl 64 ; CHECK: shl 75 %shl = shl i32 %load, 8 76 %add = add i32 %shl, -224 [all …]
|
D | x86-64-double-precision-shift-left.ll | 19 %shl = shl i64 %a, 1 21 %or = or i64 %shr, %shl 37 %shl = shl i64 %a, 2 39 %or = or i64 %shr, %shl 55 %shl = shl i64 %a, 7 57 %or = or i64 %shr, %shl 73 %shl = shl i64 %a, 63 75 %or = or i64 %shr, %shl
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | vshift-cost.ll | 13 %shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11> 14 ret <8 x i16> %shl 17 ; CHECK: Found an estimated cost of 1 for instruction: %shl 21 %shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1> 22 ret <8 x i16> %shl 25 ; CHECK: Found an estimated cost of 1 for instruction: %shl 34 %shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3> 35 ret <4 x i32> %shl 38 ; SSE2: Found an estimated cost of 6 for instruction: %shl 39 ; SSE41: Found an estimated cost of 1 for instruction: %shl [all …]
|
D | testshiftshl.ll | 8 ; SSE2: cost of 20 {{.*}} shl 12 %0 = shl %shifttype %a , %b 20 ; SSE2: cost of 10 {{.*}} shl 24 %0 = shl %shifttype4i16 %a , %b 32 ; SSE2: cost of 80 {{.*}} shl 36 %0 = shl %shifttype8i16 %a , %b 44 ; SSE2: cost of 160 {{.*}} shl 48 %0 = shl %shifttype16i16 %a , %b 56 ; SSE2: cost of 320 {{.*}} shl 60 %0 = shl %shifttype32i16 %a , %b [all …]
|
/external/llvm/lib/Target/R600/ |
D | SIRegisterInfo.td | 75 (add (decimate (shl SGPR_32, 1), 2))]>; 80 (add (decimate (shl SGPR_32, 1), 4)), 81 (add (decimate (shl SGPR_32, 2), 4)), 82 (add (decimate (shl SGPR_32, 3), 4))]>; 87 (add (decimate (shl SGPR_32, 1), 4)), 88 (add (decimate (shl SGPR_32, 2), 4)), 89 (add (decimate (shl SGPR_32, 3), 4)), 90 (add (decimate (shl SGPR_32, 4), 4)), 91 (add (decimate (shl SGPR_32, 5), 4)), 92 (add (decimate (shl SGPR_32, 6), 4)), [all …]
|
/external/llvm/test/CodeGen/Mips/ |
D | mips64shift.ll | 6 %shl = shl i64 %a0, %a1 7 ret i64 %shl 27 %shl = shl i64 %a0, 10 28 ret i64 %shl 48 %shl = shl i64 %a0, 40 49 ret i64 %shl 71 %shl = shl i64 %a0, %sub 72 %or = or i64 %shl, %shr 79 %shl = shl i64 %a0, %a1 82 %or = or i64 %shr, %shl [all …]
|
D | rotate.ll | 8 %shl = shl i32 %a, %b 11 %or = or i32 %shr, %shl 19 %shl = shl i32 %a, 10 21 %or = or i32 %shl, %shr 31 %shl = shl i32 %a, %sub 32 %or = or i32 %shl, %shr 41 %shl = shl i32 %a, 22 42 %or = or i32 %shr, %shl
|
/external/llvm/test/Transforms/InstCombine/ |
D | shift.ll | 8 %B = shl i32 %A, 0 ; <i32> [#uses=1] 16 %B = shl i32 0, %shift.upgrd.1 ; <i32> [#uses=1] 67 %B = shl i32 %A, 32 ;; shift all bits out 74 %B = shl <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out 81 %B = shl <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3> 103 %B = shl i32 %A, 1 ;; convert to an mul instruction 113 %C = shl i32 %B, 1 ;; convert to an mul instruction 129 %B = shl i8 %A, 5 ; <i8> [#uses=1] 130 %C = shl i8 %B, 3 ; <i8> [#uses=1] 139 %B = shl i8 %A, 7 ; <i8> [#uses=1] [all …]
|
/external/llvm/test/CodeGen/R600/ |
D | sext-in-reg.ll | 18 %shl = shl i32 %in, 31 19 %sext = ashr i32 %shl, 31 36 %shl = shl i32 %c, 24 37 %ashr = ashr i32 %shl, 24 54 %shl = shl i32 %c, 16 55 %ashr = ashr i32 %shl, 16 72 %shl = shl <1 x i32> %c, <i32 24> 73 %ashr = ashr <1 x i32> %shl, <i32 24> 85 %c = shl i64 %a, %b 86 %shl = shl i64 %c, 63 [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | rev.ll | 8 %tmp4 = shl i32 %X15, 8 26 %tmp4 = shl i16 %tmp3, 8 51 %shl = shl nuw nsw i32 %conv, 8 52 %or = or i32 %conv2, %shl 53 %sext = shl i32 %or, 16 63 %shl = shl i32 %i, 24 64 %shr = ashr exact i32 %shl, 16 76 %and = shl i32 %x, 8 77 %shl = and i32 %and, 65280 84 %or10 = or i32 %or6, %shl [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-shifted-sext.ll | 12 %shl = shl nsw i32 %conv1, 4 13 %conv2 = trunc i32 %shl to i16 36 %shl = shl nsw i32 %conv1, 8 37 %conv2 = trunc i32 %shl to i16 61 %shl = shl nsw i32 %conv, 4 62 ret i32 %shl 83 %shl = shl nsw i32 %conv, 8 84 ret i32 %shl 106 %shl = shl nsw i64 %conv, 4 107 ret i64 %shl [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | int-neg-02.ll | 98 %shl = shl i64 %val, 32 99 %ashr = ashr i64 %shl, 32 101 %cmp = icmp slt i64 %shl, 0 112 %shl = shl i64 %val, 32 113 %ashr = ashr i64 %shl, 32 115 %cmp = icmp sle i64 %shl, 0 126 %shl = shl i64 %val, 32 127 %ashr = ashr i64 %shl, 32 129 %cmp = icmp sgt i64 %shl, 0 140 %shl = shl i64 %val, 32 [all …]
|
D | risbg-01.ll | 96 %shr = shl i32 %foo, 2 106 %shr = shl i64 %foo, 2 118 %shr = shl i32 %foo, 2 129 %shr = shl i64 %foo, 2 136 ; shl are not used. 141 %parta = shl i32 %foo, 14 153 %parta = shl i64 %foo, 14 160 ; Try a case in which only the bits from the shl are used. 165 %parta = shl i32 %foo, 14 177 %parta = shl i64 %foo, 14 [all …]
|