/external/llvm/test/Analysis/CostModel/X86/ |
D | testshiftashr.ll | 8 ; SSE2: cost of 12 {{.*}} ashr 12 %0 = ashr %shifttype %a , %b 20 ; SSE2: cost of 16 {{.*}} ashr 24 %0 = ashr %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} ashr 36 %0 = ashr %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} ashr 48 %0 = ashr %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} ashr 60 %0 = ashr %shifttype32i16 %a , %b [all …]
|
D | arith.ll | 151 ; SSSE3: cost of 16 {{.*}} %C0 = ashr 152 ; SSE42: cost of 16 {{.*}} %C0 = ashr 153 ; AVX: cost of 16 {{.*}} %C0 = ashr 154 ; AVX2: cost of 1 {{.*}} %C0 = ashr 155 %C0 = ashr <4 x i32> undef, undef 156 ; SSSE3: cost of 12 {{.*}} %C1 = ashr 157 ; SSE42: cost of 12 {{.*}} %C1 = ashr 158 ; AVX: cost of 12 {{.*}} %C1 = ashr 159 ; AVX2: cost of 4 {{.*}} %C1 = ashr 160 %C1 = ashr <2 x i64> undef, undef [all …]
|
D | vshift-ashr-cost.ll | 21 %shift = ashr <2 x i64> %a, %b 32 %shift = ashr <4 x i64> %a, %b 44 %shift = ashr <4 x i32> %a, %b 56 %shift = ashr <8 x i32> %a, %b 67 %shift = ashr <8 x i16> %a, %b 78 %shift = ashr <16 x i16> %a, %b 89 %shift = ashr <16 x i8> %a, %b 100 %shift = ashr <32 x i8> %a, %b 116 %shift = ashr <2 x i64> %a, %splat 128 %shift = ashr <4 x i64> %a, %splat [all …]
|
/external/llvm/test/CodeGen/NVPTX/ |
D | sext-in-reg.ll | 11 %conv1 = ashr exact i64 %sext, 56 13 %conv4 = ashr exact i64 %sext1, 56 14 %shr = ashr i64 %a, 16 15 %shr9 = ashr i64 %b, 16 29 %conv1 = ashr exact i64 %sext, 32 31 %conv4 = ashr exact i64 %sext1, 32 32 %shr = ashr i64 %a, 16 33 %shr9 = ashr i64 %b, 16 47 %conv1 = ashr exact i64 %sext, 48 49 %conv4 = ashr exact i64 %sext1, 48 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vshift-3.ll | 6 ; Note that x86 does have ashr 12 %ashr = ashr <2 x i64> %val, < i64 32, i64 32 > 13 store <2 x i64> %ashr, <2 x i64>* %dst 21 %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 > 22 store <4 x i32> %ashr, <4 x i32>* %dst 35 %ashr = ashr <4 x i32> %val, %3 36 store <4 x i32> %ashr, <4 x i32>* %dst 44 %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 45 store <8 x i16> %ashr, <8 x i16>* %dst 63 %ashr = ashr <8 x i16> %val, %7 [all …]
|
D | field-extract-use-trunc.ll | 13 %tmp7.25 = ashr i32 %f11, 24 19 %tmp7.25 = ashr i32 %f11, 24 25 %tmp7.25 = ashr i64 %f11, 32 31 %tmp7.25 = ashr i16 %f11, 8 37 %tmp7.25 = ashr i16 %f11, 8
|
D | 2008-05-12-tailmerge-5.ll | 51 %tmp17 = ashr i32 %tmp16, 23 ; <i32> [#uses=1] 54 %sextr = ashr i16 %sextl, 7 ; <i16> [#uses=2] 56 %sextr20 = ashr i16 %sextl19, 7 ; <i16> [#uses=0] 58 %sextr22 = ashr i16 %sextl21, 7 ; <i16> [#uses=1] 87 %tmp39 = ashr i16 %tmp38, 7 ; <i16> [#uses=1] 89 %sextr41 = ashr i16 %sextl40, 7 ; <i16> [#uses=2] 91 %sextr43 = ashr i16 %sextl42, 7 ; <i16> [#uses=0] 93 %sextr45 = ashr i16 %sextl44, 7 ; <i16> [#uses=1] 108 %tmp55 = ashr i16 %tmp54, 7 ; <i16> [#uses=1] 110 %sextr57 = ashr i16 %sextl56, 7 ; <i16> [#uses=2] [all …]
|
D | sar_fold.ll | 8 %2 = ashr exact i32 %1, 15 17 %2 = ashr exact i32 %1, 17 26 %2 = ashr exact i32 %1, 23 35 %2 = ashr exact i32 %1, 25
|
D | sar_fold64.ll | 8 %2 = ashr exact i64 %1, 47 18 %2 = ashr exact i64 %1, 49 28 %2 = ashr exact i64 %1, 55 38 %2 = ashr exact i64 %1, 57
|
/external/llvm/test/CodeGen/SystemZ/ |
D | int-neg-02.ll | 99 %ashr = ashr i64 %shl, 32 100 %neg = sub i64 0, %ashr 102 %abs = select i1 %cmp, i64 %neg, i64 %ashr 113 %ashr = ashr i64 %shl, 32 114 %neg = sub i64 0, %ashr 116 %abs = select i1 %cmp, i64 %neg, i64 %ashr 127 %ashr = ashr i64 %shl, 32 128 %neg = sub i64 0, %ashr 130 %abs = select i1 %cmp, i64 %ashr, i64 %neg 141 %ashr = ashr i64 %shl, 32 [all …]
|
D | int-abs-01.ll | 91 %ashr = ashr i64 %shl, 32 92 %neg = sub i64 0, %ashr 94 %abs = select i1 %cmp, i64 %neg, i64 %ashr 104 %ashr = ashr i64 %shl, 32 105 %neg = sub i64 0, %ashr 107 %abs = select i1 %cmp, i64 %neg, i64 %ashr 117 %ashr = ashr i64 %shl, 32 118 %neg = sub i64 0, %ashr 120 %abs = select i1 %cmp, i64 %ashr, i64 %neg 130 %ashr = ashr i64 %shl, 32 [all …]
|
D | shift-07.ll | 10 %shift = ashr i64 %a, 1 19 %shift = ashr i64 %a, 63 28 %shift = ashr i64 %a, 64 37 %shift = ashr i64 %a, %amt 47 %shift = ashr i64 %a, %add 58 %shift = ashr i64 %a, %addext 69 %shift = ashr i64 %a, %addext 80 %shift = ashr i64 %a, %add 92 %shift = ashr i64 %a, %add 102 %shift = ashr i64 %a, %sub [all …]
|
D | shift-03.ll | 10 %shift = ashr i32 %a, 1 19 %shift = ashr i32 %a, 31 28 %shift = ashr i32 %a, 32 38 %shift = ashr i32 %a, %sub 47 %shift = ashr i32 %a, %amt 57 %shift = ashr i32 %a, %add 68 %shift = ashr i32 %a, %trunc 79 %shift = ashr i32 %a, %add 90 %shift = ashr i32 %a, %add 101 %shift = ashr i32 %a, %add [all …]
|
D | vec-shift-05.ll | 14 %ret = ashr <16 x i8> %val1, %val2 23 %ret = ashr <16 x i8> %val, <i8 1, i8 1, i8 1, i8 1, 35 %ret = ashr <16 x i8> %val, <i8 7, i8 7, i8 7, i8 7, 51 %ret = ashr <8 x i16> %val1, %val2 60 %ret = ashr <8 x i16> %val, <i16 1, i16 1, i16 1, i16 1, 70 %ret = ashr <8 x i16> %val, <i16 15, i16 15, i16 15, i16 15, 83 %ret = ashr <4 x i32> %val1, %val2 92 %ret = ashr <4 x i32> %val, <i32 1, i32 1, i32 1, i32 1> 101 %ret = ashr <4 x i32> %val, <i32 31, i32 31, i32 31, i32 31> 114 %ret = ashr <2 x i64> %val1, %val2 [all …]
|
/external/llvm/test/CodeGen/XCore/ |
D | ashr.ll | 2 define i32 @ashr(i32 %a, i32 %b) nounwind { 3 %1 = ashr i32 %a, %b 6 ; CHECK-LABEL: ashr: 7 ; CHECK-NEXT: ashr r0, r0, r1 10 %1 = ashr i32 %a, 24 14 ; CHECK-NEXT: ashr r0, r0, 24 17 %1 = ashr i32 %a, 31 21 ; CHECK-NEXT: ashr r0, r0, 32 32 ; CHECK-NEXT: ashr r0, r0, 32 44 ; CHECK-NEXT: ashr r0, r0, 32 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | sext-in-reg.ll | 19 %sext = ashr i32 %shl, 31 37 %ashr = ashr i32 %shl, 24 38 store i32 %ashr, i32 addrspace(1)* %out, align 4 55 %ashr = ashr i32 %shl, 16 56 store i32 %ashr, i32 addrspace(1)* %out, align 4 73 %ashr = ashr <1 x i32> %shl, <i32 24> 74 store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4 87 %ashr = ashr i64 %shl, 63 88 store i64 %ashr, i64 addrspace(1)* %out, align 8 101 %ashr = ashr i64 %shl, 56 [all …]
|
D | sdivrem24.ll | 59 %num.i24 = ashr i32 %num.i24.0, 8 60 %den.i24 = ashr i32 %den.i24.0, 8 78 %num.i24 = ashr i32 %num.i24.0, 7 79 %den.i24 = ashr i32 %den.i24.0, 7 97 %num.i24 = ashr i32 %num.i24.0, 8 98 %den.i24 = ashr i32 %den.i24.0, 7 116 %num.i24 = ashr i32 %num.i24.0, 7 117 %den.i24 = ashr i32 %den.i24.0, 8 177 %num.i24 = ashr i32 %num.i24.0, 8 178 %den.i24 = ashr i32 %den.i24.0, 8 [all …]
|
/external/llvm/test/Transforms/InstSimplify/ |
D | shr-nop.ll | 12 %t = ashr i32 %n, 17 29 %shr = ashr exact i8 0, %a 38 %shr = ashr i8 0, %a 56 %shr = ashr exact i8 0, %a 74 %shr = ashr i8 0, %a 92 %shr = ashr exact i8 -128, %a 119 %shr = ashr exact i8 -128, %a 137 %shr = ashr i8 -128, %a 155 %shr = ashr i8 -128, %a 173 %shr = ashr i8 0, %a [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | fast-isel-shifter.ll | 35 define i32 @ashr() nounwind { 37 ; ELF64: ashr 39 %ashr = ashr i32 -1, 2 40 ret i32 %ashr 47 %ashr = ashr i32 %src1, %src2 48 ret i32 %ashr
|
/external/llvm/test/CodeGen/ARM/ |
D | fast-isel-shifter.ll | 36 define i32 @ashr() nounwind ssp { 38 ; ARM: ashr 40 %ashr = ashr i32 -1, 2 41 ret i32 %ashr 48 %ashr = ashr i32 %src1, %src2 49 ret i32 %ashr
|
D | smul.ll | 14 %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1] 22 %tmp1 = ashr i32 %x, 16 ; <i32> [#uses=1] 23 %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1] 32 %tmp2 = ashr i32 %y, 16 ; <i32> [#uses=1] 41 %tmp1 = ashr i32 %x, 16 42 %tmp3 = ashr i32 %y, 16 62 %tmp3 = ashr i32 %y, 16 72 %shr = ashr exact i32 %shl, 16 98 %shr = ashr i32 %b, 16 111 %shr = ashr exact i32 %shl, 16 [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | icmp-shr.ll | 16 %shr = ashr i8 127, %a 32 %shr = ashr i8 127, %a 40 %shr = ashr i8 128, %a 48 %shr = ashr i8 128, %a 72 %shr = ashr exact i8 128, %a 80 %shr = ashr exact i8 128, %a 136 %shr = ashr exact i8 -128, %a 144 %shr = ashr exact i8 -128, %a 168 %shr = ashr i8 -128, %a 176 %shr = ashr i8 -128, %a [all …]
|
D | shift-sra.ll | 7 %Y = ashr i32 %X, %shift.upgrd.1 ; <i32> [#uses=1] 17 %tmp5 = ashr i32 %tmp4, 3 ; <i32> [#uses=1] 29 %Y2 = ashr i64 %Y, 63 33 %S = ashr i64 %P, 12 47 %Y2 = ashr i64 %Y, 63 52 %S = ashr i64 %R, 12 71 %S = ashr i32 %P, 16 75 ; CHECK-NEXT: ashr i32 %P, 16
|
D | signext.ll | 12 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16 23 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16 44 ; CHECK: %tmp.3 = ashr exact i32 %sext, 24 50 %tmp.4 = ashr i32 %tmp.2, 16 ; <i32> [#uses=1] 54 ; CHECK: %tmp.4 = ashr exact i32 %tmp.2, 16 61 %tmp.5 = ashr i32 %sext1, 16 ; <i32> [#uses=1] 75 ; CHECK: %sub = ashr i32 %x, 5
|
/external/llvm/test/CodeGen/AArch64/ |
D | fast-isel-shift.ll | 7 %2 = ashr i16 %1, 1 16 %2 = ashr i16 %1, 1 24 %2 = ashr i32 %1, 1 32 %2 = ashr i32 %1, 1 40 %2 = ashr i64 %1, 1 48 %2 = ashr i64 %1, 1 401 %1 = ashr i8 %a, %b 408 %1 = ashr i8 %a, 4 416 %2 = ashr i16 %1, 4 424 %2 = ashr i16 %1, 4 [all …]
|