/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/AggressiveInstCombine/ |
D | masked-cmp.ll | 13 %s = lshr i32 %x, 3 26 %t1 = lshr i32 %x, 3 27 %t2 = lshr i32 %x, 5 28 %t3 = lshr i32 %x, 8 45 %t1 = lshr i32 %x, 3 46 %t2 = lshr i32 %x, 5 47 %t3 = lshr i32 %x, 8 63 %s = lshr i32 %x, 7 76 %t1 = lshr i64 %x, 1 77 %t2 = lshr i64 %x, 2 [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | testshiftlshr.ll | 8 ; SSE2: cost of 4 {{.*}} lshr 12 %0 = lshr %shifttype %a , %b 20 ; SSE2: cost of 16 {{.*}} lshr 24 %0 = lshr %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} lshr 36 %0 = lshr %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} lshr 48 %0 = lshr %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} lshr 60 %0 = lshr %shifttype32i16 %a , %b [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | testshiftlshr.ll | 8 ; SSE2: cost of 4 {{.*}} lshr 12 %0 = lshr %shifttype %a , %b 20 ; SSE2: cost of 16 {{.*}} lshr 24 %0 = lshr %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} lshr 36 %0 = lshr %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} lshr 48 %0 = lshr %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} lshr 60 %0 = lshr %shifttype32i16 %a , %b [all …]
|
D | vshift-lshr-cost.ll | 27 ; SSE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shift = lshr <2 x i64> %a, … 31 ; AVX1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shift = lshr <2 x i64> %a,… 35 ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shift = lshr <2 x i64> %a,… 39 ; XOPAVX1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shift = lshr <2 x i64> … 43 ; XOPAVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shift = lshr <2 x i64> … 47 ; AVX512-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shift = lshr <2 x i64> %… 51 ; BTVER2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shift = lshr <2 x i64> %… 54 %shift = lshr <2 x i64> %a, %b 60 ; SSE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %shift = lshr <4 x i64> %a, … 64 ; AVX1-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %shift = lshr <4 x i64> %a… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | knownbits-recursion.ll | 26 %tmp9 = lshr i32 %tmp8, 16 28 %tmp11 = lshr i32 %tmp10, 16 30 %tmp13 = lshr i32 %tmp12, 16 32 %tmp15 = lshr i32 %tmp14, 16 34 %tmp17 = lshr i32 %tmp16, 16 36 %tmp19 = lshr i32 %tmp18, 16 38 %tmp21 = lshr i32 %tmp20, 16 40 %tmp23 = lshr i32 %tmp22, 16 42 %tmp25 = lshr i32 %tmp24, 16 44 %tmp27 = lshr i32 %tmp26, 16 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | vshift-2.ll | 10 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > 11 store <2 x i64> %lshr, <2 x i64>* %dst 22 %lshr = lshr <2 x i64> %val, %1 23 store <2 x i64> %lshr, <2 x i64>* %dst 31 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > 32 store <4 x i32> %lshr, <4 x i32>* %dst 45 %lshr = lshr <4 x i32> %val, %3 46 store <4 x i32> %lshr, <4 x i32>* %dst 55 %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 56 store <8 x i16> %lshr, <8 x i16>* %dst [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vshift-2.ll | 10 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > 11 store <2 x i64> %lshr, <2 x i64>* %dst 22 %lshr = lshr <2 x i64> %val, %1 23 store <2 x i64> %lshr, <2 x i64>* %dst 31 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > 32 store <4 x i32> %lshr, <4 x i32>* %dst 45 %lshr = lshr <4 x i32> %val, %3 46 store <4 x i32> %lshr, <4 x i32>* %dst 55 %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 56 store <8 x i16> %lshr, <8 x i16>* %dst [all …]
|
D | lower-vec-shift.ll | 12 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 13 ret <8 x i16> %lshr 29 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2> 30 ret <8 x i16> %lshr 46 %lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2> 47 ret <4 x i32> %lshr 61 %lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2> 62 ret <4 x i32> %lshr 76 %lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 77 ret <8 x i16> %lshr [all …]
|
D | 2013-01-09-DAGCombineBug.ll | 51 …t (i64 xor (i64 zext (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i12… 57 …lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl… 60 …lshr (i192 or (i192 and (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | canonicalize-lshr-shl-to-masking.ll | 22 %tmp0 = lshr i32 %x, %y 32 %tmp0 = lshr i32 %x, 5 39 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i32 [[X:%.*]], 10 43 %tmp0 = lshr i32 %x, 10 50 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i32 [[X:%.*]], 5 54 %tmp0 = lshr i32 %x, 5 67 %tmp0 = lshr exact i32 %x, %y 76 %tmp0 = lshr exact i32 %x, 5 83 ; CHECK-NEXT: [[RET:%.*]] = lshr exact i32 [[X:%.*]], 5 86 %tmp0 = lshr exact i32 %x, 10 [all …]
|
D | canonicalize-shl-lshr-to-masking.ll | 17 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 -1, [[Y:%.*]] 22 %ret = lshr i32 %tmp0, %y 32 %ret = lshr i32 %tmp0, 5 43 %ret = lshr i32 %tmp0, 5 49 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 5 54 %ret = lshr i32 %tmp0, 10 60 ; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 5 65 %ret = lshr exact i32 %tmp0, 10 78 %ret = lshr i32 %tmp0, %y ; this one is obviously 'exact'. 87 %ret = lshr i32 %tmp0, 5 ; this one is obviously 'exact'. [all …]
|
D | lshr.ll | 20 %sh = lshr i32 %ct, 5 31 %sh = lshr i32 %ct, 5 42 %sh = lshr i32 %ct, 5 53 %sh = lshr <2 x i8> %ct, <i8 3, i8 3> 64 %sh = lshr <2 x i8> %ct, <i8 3, i8 3> 75 %sh = lshr <2 x i8> %ct, <i8 3, i8 3> 83 ; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i8 [[ADD]], 2 88 %lshr = lshr i8 %add, 2 89 ret i8 %lshr 96 ; CHECK-NEXT: [[LSHR:%.*]] = lshr exact <2 x i8> [[ADD]], <i8 2, i8 2> [all …]
|
D | apint-shift.ll | 64 %C = lshr i17 %B, 16 68 ; shl (lshr X, C), C --> and X, C' 75 %sh1 = lshr i19 %X, 18 81 ; lshr (lshr X, C1), C2 --> lshr X, C1 + C2 85 ; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i19> %X, <i19 5, i19 5> 88 %sh1 = lshr <2 x i19> %X, <i19 3, i19 3> 89 %sh2 = lshr <2 x i19> %sh1, <i19 2, i19 2> 95 ; CHECK-NEXT: [[SH1:%.*]] = lshr i9 %x, 2 96 ; CHECK-NEXT: [[SH2:%.*]] = lshr i9 %x, 5 100 %sh1 = lshr i9 %x, 2 [all …]
|
D | canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll | 17 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] 21 %tmp0 = lshr i8 -1, %y 33 ; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]] 37 %tmp0 = lshr <2 x i8> <i8 -1, i8 -1>, %y 45 ; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]] 49 %tmp0 = lshr <3 x i8> <i8 -1, i8 undef, i8 -1>, %y 63 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] 68 %tmp0 = lshr i8 -1, %y 77 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] 82 %tmp0 = lshr i8 -1, %y [all …]
|
D | canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll | 17 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] 21 %tmp0 = lshr i8 -1, %y 33 ; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]] 37 %tmp0 = lshr <2 x i8> <i8 -1, i8 -1>, %y 45 ; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]] 49 %tmp0 = lshr <3 x i8> <i8 -1, i8 undef, i8 -1>, %y 63 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] 68 %tmp0 = lshr i8 -1, %y 77 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] 82 %tmp0 = lshr i8 -1, %y [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | aggr-licm.ll | 17 %v0 = lshr i64 %a0, 1 36 %v16 = lshr i64 %v4, 2 37 %v17 = lshr i64 %v3, 2 48 %v28 = lshr i64 %v4, 4 49 %v29 = lshr i64 %v3, 4 60 %v40 = lshr i64 %v4, 6 61 %v41 = lshr i64 %v3, 6 72 %v52 = lshr i64 %v4, 8 73 %v53 = lshr i64 %v3, 8 84 %v64 = lshr i64 %v4, 10 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | 2013-01-09-DAGCombineBug.ll | 51 …t (i64 xor (i64 zext (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i12… 57 …lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl… 60 …lshr (i192 or (i192 and (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr…
|
D | vshift-2.ll | 22 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > 23 store <2 x i64> %lshr, <2 x i64>* %dst 46 %lshr = lshr <2 x i64> %val, %1 47 store <2 x i64> %lshr, <2 x i64>* %dst 65 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > 66 store <4 x i32> %lshr, <4 x i32>* %dst 90 %lshr = lshr <4 x i32> %val, %3 91 store <4 x i32> %lshr, <4 x i32>* %dst 110 %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 111 store <8 x i16> %lshr, <8 x i16>* %dst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/PhaseOrdering/ |
D | bitfield-bittests.ll | 27 %a.sroa.5.0.shift = lshr i32 %a, 8 30 %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 31 %bf.clear2 = and i8 %bf.lshr, 1 34 %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 38 %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 55 %a.sroa.5.0.shift = lshr i32 %a, 8 58 %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 59 %bf.clear2 = and i8 %bf.lshr, 1 62 %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 66 %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Thumb/ |
D | 2009-08-12-ConstIslandAssert.ll | 16 %6 = lshr i32 %4, 24 ; <i32> [#uses=1] 19 %9 = lshr i32 %4, 16 ; <i32> [#uses=1] 37 %27 = lshr i32 %24, 24 ; <i32> [#uses=1] 40 %30 = lshr i32 %24, 16 ; <i32> [#uses=1] 46 %36 = lshr i32 %24, 8 ; <i32> [#uses=1] 61 %51 = lshr i32 %48, 24 ; <i32> [#uses=1] 64 %54 = lshr i32 %48, 16 ; <i32> [#uses=1] 70 %60 = lshr i32 %48, 8 ; <i32> [#uses=1] 84 %74 = lshr i32 %72, 24 ; <i32> [#uses=1] 87 %77 = lshr i32 %72, 16 ; <i32> [#uses=1] [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | shift_mask.ll | 97 %lshr = lshr i8 %a, %rem 98 ret i8 %lshr 109 %lshr = lshr i16 %a, %rem 110 ret i16 %lshr 120 %lshr = lshr i32 %a, %rem 121 ret i32 %lshr 131 %lshr = lshr i64 %a, %rem 132 ret i64 %lshr 141 %lshr = lshr <16 x i8> %a, %rem 142 ret <16 x i8> %lshr [all …]
|
D | fast-isel-shifter.ll | 19 define i32 @lshr() nounwind { 21 ; ELF64: lshr 23 %lshr = lshr i32 -1, 2 24 ret i32 %lshr 31 %lshr = lshr i32 %src1, %src2 32 ret i32 %lshr
|
/external/llvm/test/CodeGen/PowerPC/ |
D | fast-isel-shifter.ll | 19 define i32 @lshr() nounwind { 21 ; ELF64: lshr 23 %lshr = lshr i32 -1, 2 24 ret i32 %lshr 31 %lshr = lshr i32 %src1, %src2 32 ret i32 %lshr
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Thumb/ |
D | 2009-08-12-ConstIslandAssert.ll | 16 %6 = lshr i32 %4, 24 ; <i32> [#uses=1] 19 %9 = lshr i32 %4, 16 ; <i32> [#uses=1] 37 %27 = lshr i32 %24, 24 ; <i32> [#uses=1] 40 %30 = lshr i32 %24, 16 ; <i32> [#uses=1] 46 %36 = lshr i32 %24, 8 ; <i32> [#uses=1] 61 %51 = lshr i32 %48, 24 ; <i32> [#uses=1] 64 %54 = lshr i32 %48, 16 ; <i32> [#uses=1] 70 %60 = lshr i32 %48, 8 ; <i32> [#uses=1] 84 %74 = lshr i32 %72, 24 ; <i32> [#uses=1] 87 %77 = lshr i32 %72, 16 ; <i32> [#uses=1] [all …]
|
/external/llvm/test/CodeGen/Thumb/ |
D | 2009-08-12-ConstIslandAssert.ll | 16 %6 = lshr i32 %4, 24 ; <i32> [#uses=1] 19 %9 = lshr i32 %4, 16 ; <i32> [#uses=1] 37 %27 = lshr i32 %24, 24 ; <i32> [#uses=1] 40 %30 = lshr i32 %24, 16 ; <i32> [#uses=1] 46 %36 = lshr i32 %24, 8 ; <i32> [#uses=1] 61 %51 = lshr i32 %48, 24 ; <i32> [#uses=1] 64 %54 = lshr i32 %48, 16 ; <i32> [#uses=1] 70 %60 = lshr i32 %48, 8 ; <i32> [#uses=1] 84 %74 = lshr i32 %72, 24 ; <i32> [#uses=1] 87 %77 = lshr i32 %72, 16 ; <i32> [#uses=1] [all …]
|