/external/llvm/test/Analysis/CostModel/X86/ |
D | testshiftlshr.ll | 8 ; SSE2: cost of 4 {{.*}} lshr 12 %0 = lshr %shifttype %a , %b 20 ; SSE2: cost of 16 {{.*}} lshr 24 %0 = lshr %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} lshr 36 %0 = lshr %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} lshr 48 %0 = lshr %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} lshr 60 %0 = lshr %shifttype32i16 %a , %b [all …]
|
D | arith.ll | 86 ; AVX: cost of 2 {{.*}} lshr 87 ; AVX2: cost of 1 {{.*}} lshr 88 %B0 = lshr <4 x i32> undef, undef 89 ; AVX: cost of 2 {{.*}} lshr 90 ; AVX2: cost of 1 {{.*}} lshr 91 %B1 = lshr <2 x i64> undef, undef 113 ; AVX: cost of 2 {{.*}} lshr 114 ; AVX2: cost of 1 {{.*}} lshr 115 %B0 = lshr <8 x i32> undef, undef 116 ; AVX: cost of 2 {{.*}} lshr [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vshift-2.ll | 10 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > 11 store <2 x i64> %lshr, <2 x i64>* %dst 22 %lshr = lshr <2 x i64> %val, %1 23 store <2 x i64> %lshr, <2 x i64>* %dst 31 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > 32 store <4 x i32> %lshr, <4 x i32>* %dst 45 %lshr = lshr <4 x i32> %val, %3 46 store <4 x i32> %lshr, <4 x i32>* %dst 55 %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 56 store <8 x i16> %lshr, <8 x i16>* %dst [all …]
|
D | lower-vec-shift.ll | 12 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 13 ret <8 x i16> %lshr 29 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2> 30 ret <8 x i16> %lshr 46 %lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2> 47 ret <4 x i32> %lshr 61 %lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2> 62 ret <4 x i32> %lshr 76 %lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 77 ret <8 x i16> %lshr [all …]
|
D | 2013-01-09-DAGCombineBug.ll | 51 …t (i64 xor (i64 zext (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i12… 57 …lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl… 60 …lshr (i192 or (i192 and (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr…
|
D | rot64.ll | 12 %2 = lshr i64 %x, %1 21 %2 = lshr i64 %x, %1 28 %0 = lshr i64 %x, %z 37 %0 = lshr i64 %y, %z 48 %0 = lshr i64 %x, 57 59 %a = lshr i64 %x, 57 68 %1 = lshr i64 %x, 57 77 %0 = lshr i64 %x, 7 88 %a = lshr i64 %x, 7 96 %0 = lshr i64 %y, 7
|
D | rot32.ll | 10 %2 = lshr i32 %x, %1 21 %2 = lshr i32 %x, %1 30 %0 = lshr i32 %x, %z 41 %0 = lshr i32 %y, %z 54 %0 = lshr i32 %x, 25 65 %a = lshr i32 %x, 25 76 %1 = lshr i32 %x, 25 87 %0 = lshr i32 %x, 7 98 %a = lshr i32 %x, 7 108 %0 = lshr i32 %y, 7
|
D | h-registers-1.ll | 22 %sa = lshr i64 %a, 8 24 %sb = lshr i64 %b, 8 26 %sc = lshr i64 %c, 8 28 %sd = lshr i64 %d, 8 30 %se = lshr i64 %e, 8 32 %sf = lshr i64 %f, 8 34 %sg = lshr i64 %g, 8 36 %sh = lshr i64 %h, 8
|
D | rot16.ll | 9 %2 = lshr i16 %x, %1 20 %2 = lshr i16 %x, %1 29 %0 = lshr i16 %x, %z 40 %0 = lshr i16 %y, %z 51 %0 = lshr i16 %x, 11 62 %1 = lshr i16 %x, 11 71 %0 = lshr i16 %x, 5 81 %0 = lshr i16 %y, 5
|
D | rotate.ll | 9 %C = lshr i32 %A, %shift.upgrd.2 ; <i32> [#uses=1] 16 %B = lshr i32 %A, %shift.upgrd.3 ; <i32> [#uses=1] 26 %C = lshr i32 %A, 27 ; <i32> [#uses=1] 32 %B = lshr i32 %A, 5 ; <i32> [#uses=1] 43 %C = lshr i16 %A, %shift.upgrd.6 ; <i16> [#uses=1] 50 %B = lshr i16 %A, %shift.upgrd.7 ; <i16> [#uses=1] 60 %C = lshr i16 %A, 11 ; <i16> [#uses=1] 66 %B = lshr i16 %A, 5 ; <i16> [#uses=1] 75 %C = lshr i8 %A, %Amt2 ; <i8> [#uses=1] 81 %B = lshr i8 %A, %Amt ; <i8> [#uses=1] [all …]
|
D | cmp.ll | 192 %s = lshr i32 %mask, 7 204 %bf.lshr = lshr i32 %bf.load, 16 205 %cmp2 = icmp eq i32 %bf.lshr, 0 206 %cmp5 = icmp uge i32 %bf.lshr, %n 216 %lshr = lshr i16 %L, 15 217 %trunc = trunc i16 %lshr to i8 227 %lshr = lshr i32 %L, 31 228 %trunc = trunc i32 %lshr to i8 238 %lshr = lshr i64 %L, 63 239 %trunc = trunc i64 %lshr to i8 [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | bitreverse-recognize.ll | 17 %8 = lshr i8 %a, 1 19 %10 = lshr i8 %a, 3 21 %12 = lshr i8 %a, 5 23 %14 = lshr i8 %a, 7 47 %8 = lshr i8 %a, 1 49 %10 = lshr i8 %a, 3 51 %12 = lshr i8 %a, 5 53 %14 = lshr i8 %a, 7 83 %16 = lshr i16 %a, 1 85 %18 = lshr i16 %a, 3 [all …]
|
D | shift.ll | 39 %B = lshr i32 %A, 32 ;; shift all bits out 46 %B = lshr <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out 53 %B = lshr <4 x i32> %A, zeroinitializer 60 %B = lshr <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3> 140 %C = lshr i8 %B, 7 ; <i8> [#uses=1] 151 %B = lshr i8 %A, 7 ; <i8> [#uses=1] 156 ;; Allow the simplification when the lshr shift is exact. 160 %B = lshr exact i8 %A, 7 173 %B = lshr i8 %a, 3 ; <i8> [#uses=1] 178 ;; Allow the simplification in InstCombine when the lshr shift is exact. [all …]
|
D | icmp-shr.ll | 8 %shr = lshr i8 127, %a 24 %shr = lshr i8 127, %a 56 %shr = lshr i8 127, %a 64 %shr = lshr i8 127, %a 88 %shr = lshr exact i8 126, %a 96 %shr = lshr exact i8 126, %a 104 %shr = lshr exact i8 -128, %a 112 %shr = lshr i8 -128, %a 120 %shr = lshr exact i8 -128, %a 128 %shr = lshr i8 -128, %a [all …]
|
D | bswap.ll | 7 %tmp1 = lshr i32 %i, 24 ; <i32> [#uses=1] 8 %tmp3 = lshr i32 %i, 8 ; <i32> [#uses=1] 24 %tmp8 = lshr i32 %arg, 8 ; <i32> [#uses=1] 27 %tmp12 = lshr i32 %arg, 24 ; <i32> [#uses=1] 33 %tmp2 = lshr i16 %s, 8 ; <i16> [#uses=1] 40 %tmp2 = lshr i16 %s, 8 ; <i16> [#uses=1] 65 %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1] 70 %tmp6 = lshr i32 %x, 24 ; <i32> [#uses=1] 78 %shr = lshr i32 %x, 16 82 %and4 = lshr i32 %or, 8
|
D | compare-signs.ll | 28 ; CHECK: [[SHIFT:%.*]] = lshr i32 [[XOR1]], 31 30 %0 = lshr i32 %a, 31 ; <i32> [#uses=1] 31 %1 = lshr i32 %b, 31 ; <i32> [#uses=1] 46 ; CHECK: lshr i32 %0, 31 48 %0 = lshr i32 %a, 29 ; <i32> [#uses=1] 49 %1 = lshr i32 %b, 29 ; <i32> [#uses=1] 67 %r = lshr i32 %na, 31 80 %r = lshr i64 %na, 63 93 %r = lshr i64 %na, 63
|
/external/llvm/test/CodeGen/PowerPC/ |
D | fast-isel-shifter.ll | 19 define i32 @lshr() nounwind { 21 ; ELF64: lshr 23 %lshr = lshr i32 -1, 2 24 ret i32 %lshr 31 %lshr = lshr i32 %src1, %src2 32 ret i32 %lshr
|
/external/llvm/test/CodeGen/ARM/ |
D | fast-isel-shifter.ll | 20 define i32 @lshr() nounwind ssp { 22 ; ARM: lshr 24 %lshr = lshr i32 -1, 2 25 ret i32 %lshr 32 %lshr = lshr i32 %src1, %src2 33 ret i32 %lshr
|
D | uxtb.ll | 10 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 16 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 22 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 28 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 34 %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1] 43 %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1] 54 %tmp5 = lshr i32 %x, 24 ; <i32> [#uses=1] 60 %tmp1 = lshr i32 %x, 24 ; <i32> [#uses=1] 68 %tmp1 = lshr i32 %p0, 7 ; <i32> [#uses=1] 70 %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/Thumb/ |
D | 2009-08-12-ConstIslandAssert.ll | 16 %6 = lshr i32 %4, 24 ; <i32> [#uses=1] 19 %9 = lshr i32 %4, 16 ; <i32> [#uses=1] 37 %27 = lshr i32 %24, 24 ; <i32> [#uses=1] 40 %30 = lshr i32 %24, 16 ; <i32> [#uses=1] 46 %36 = lshr i32 %24, 8 ; <i32> [#uses=1] 61 %51 = lshr i32 %48, 24 ; <i32> [#uses=1] 64 %54 = lshr i32 %48, 16 ; <i32> [#uses=1] 70 %60 = lshr i32 %48, 8 ; <i32> [#uses=1] 84 %74 = lshr i32 %72, 24 ; <i32> [#uses=1] 87 %77 = lshr i32 %72, 16 ; <i32> [#uses=1] [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | shift-06.ll | 10 %shift = lshr i64 %a, 1 19 %shift = lshr i64 %a, 63 28 %shift = lshr i64 %a, 64 37 %shift = lshr i64 %a, %amt 47 %shift = lshr i64 %a, %add 58 %shift = lshr i64 %a, %addext 69 %shift = lshr i64 %a, %addext 80 %shift = lshr i64 %a, %add 92 %shift = lshr i64 %a, %add 102 %shift = lshr i64 %a, %sub [all …]
|
D | shift-02.ll | 10 %shift = lshr i32 %a, 1 19 %shift = lshr i32 %a, 31 28 %shift = lshr i32 %a, 32 38 %shift = lshr i32 %a, %sub 47 %shift = lshr i32 %a, %amt 57 %shift = lshr i32 %a, %add 68 %shift = lshr i32 %a, %trunc 79 %shift = lshr i32 %a, %add 90 %shift = lshr i32 %a, %add 101 %shift = lshr i32 %a, %add [all …]
|
D | shift-04.ll | 11 %partb = lshr i32 %a, 31 22 %partb = lshr i32 %a, 1 33 %partb = lshr i32 %a, 0 45 %partb = lshr i32 %a, %amtb 58 %partb = lshr i32 %a, %sub 72 %partb = lshr i32 %a, %sub 87 %partb = lshr i32 %a, %subtrunc 101 %partb = lshr i32 %a, %sub 116 %partb = lshr i32 %a, %sub 129 %partb = lshr i32 %a, %subb [all …]
|
D | int-mul-08.ll | 16 %highx = lshr i128 %mulx, 64 36 %highx = lshr i128 %mulx, 64 52 %highx = lshr i128 %mulx, 67 68 %highx = lshr i128 %mulx, 64 95 %highx = lshr i128 %mulx, 64 110 %highx = lshr i128 %mulx, 64 127 %highx = lshr i128 %mulx, 64 142 %highx = lshr i128 %mulx, 64 157 %highx = lshr i128 %mulx, 64 174 %highx = lshr i128 %mulx, 64 [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | pr23510.ll | 8 ; CHECK: lshr <2 x i64> 9 ; CHECK: lshr <2 x i64> 16 %shr = lshr i64 %tmp, 4 20 %shr2 = lshr i64 %tmp1, 4 28 %shr5 = lshr i64 %tmp4, 4 31 %shr7 = lshr i64 %tmp5, 4
|