/external/llvm/test/CodeGen/X86/ |
D | narrow-shl-cst.ll | 5 %and = shl i32 %x, 10 6 %shl = and i32 %and, 31744 7 ret i32 %shl 14 %or = shl i32 %x, 10 15 %shl = or i32 %or, 31744 16 ret i32 %shl 23 %xor = shl i32 %x, 10 24 %shl = xor i32 %xor, 31744 25 ret i32 %shl 32 %and = shl i64 %x, 40 [all …]
|
D | dagcombine-shifts.ll | 3 ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) 5 ; Canolicalize the sequence shl/zext/lshr performing the zeroextend 16 %shl = shl i16 %ext, 4 17 ret i16 %shl 23 ; CHECK-NOT: shl 30 %shl = shl i32 %ext, 4 31 ret i32 %shl 37 ; CHECK-NOT: shl 44 %shl = shl i32 %ext, 4 45 ret i32 %shl [all …]
|
D | shift-bmi2.ll | 6 %shl = shl i32 %x, %shamt 13 ret i32 %shl 18 %shl = shl i32 %x, 5 25 ret i32 %shl 31 %shl = shl i32 %x, %shamt 38 ret i32 %shl 44 %shl = shl i32 %x, 5 51 ret i32 %shl 56 %shl = shl i64 %x, %shamt 60 ret i64 %shl [all …]
|
D | sse2-vector-shifts.ll | 11 %shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 12 ret <8 x i16> %shl 21 %shl = shl <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 22 ret <8 x i16> %shl 31 %shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 32 ret <8 x i16> %shl 40 %shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0> 41 ret <4 x i32> %shl 50 %shl = shl <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1> 51 ret <4 x i32> %shl [all …]
|
D | shift-combine-crash.ll | 15 %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 1, i64 2> 16 ret <4 x i64> %shl 23 %shl = shl <4 x i64> %A, <i64 2, i64 3, i64 undef, i64 undef> 24 ret <4 x i64> %shl 28 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 3, i64 undef> 29 ret <4 x i64> %shl 33 %shl = shl <4 x i64> %A, <i64 undef, i64 2, i64 undef, i64 3> 34 ret <4 x i64> %shl 38 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 undef, i64 undef> 39 ret <4 x i64> %shl [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | narrow-shl-cst.ll | 5 %and = shl i32 %x, 10 6 %shl = and i32 %and, 31744 7 ret i32 %shl 14 %or = shl i32 %x, 10 15 %shl = or i32 %or, 31744 16 ret i32 %shl 23 %xor = shl i32 %x, 10 24 %shl = xor i32 %xor, 31744 25 ret i32 %shl 32 %and = shl i64 %x, 40 [all …]
|
D | dagcombine-shifts.ll | 3 ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) 5 ; Canolicalize the sequence shl/zext/lshr performing the zeroextend 16 %shl = shl i16 %ext, 4 17 ret i16 %shl 23 ; CHECK-NOT: shl 30 %shl = shl i32 %ext, 4 31 ret i32 %shl 37 ; CHECK-NOT: shl 44 %shl = shl i32 %ext, 4 45 ret i32 %shl [all …]
|
D | sse2-vector-shifts.ll | 11 %shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 12 ret <8 x i16> %shl 21 %shl = shl <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 22 ret <8 x i16> %shl 31 %shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 32 ret <8 x i16> %shl 40 %shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0> 41 ret <4 x i32> %shl 50 %shl = shl <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1> 51 ret <4 x i32> %shl [all …]
|
D | shift-combine-crash.ll | 15 %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 1, i64 2> 16 ret <4 x i64> %shl 23 %shl = shl <4 x i64> %A, <i64 2, i64 3, i64 undef, i64 undef> 24 ret <4 x i64> %shl 28 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 3, i64 undef> 29 ret <4 x i64> %shl 33 %shl = shl <4 x i64> %A, <i64 undef, i64 2, i64 undef, i64 3> 34 ret <4 x i64> %shl 38 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 undef, i64 undef> 39 ret <4 x i64> %shl [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | icmp-shl-nsw.ll | 4 ; If the (shl x, C) preserved the sign and this is a sign test, 12 %shl = shl nsw i32 %x, 21 13 %cmp = icmp sgt i32 %shl, 0 22 %shl = shl nsw i32 %x, 21 23 %cmp = icmp sge i32 %shl, 0 32 %shl = shl nsw i32 %x, 21 33 %cmp = icmp sge i32 %shl, 1 42 %shl = shl nsw <2 x i32> %x, <i32 21, i32 21> 43 %cmp = icmp sge <2 x i32> %shl, <i32 1, i32 1> 47 ; Checks for icmp (eq|ne) (shl x, C), 0 [all …]
|
D | div-shift.ll | 14 %s = shl i32 2, %y 29 %s = shl <2 x i32> <i32 2, i32 2>, %y 41 %1 = shl i32 1, %y 55 %1 = shl i32 4, %y 68 %1 = shl i32 1, %y 82 %1 = shl i32 1, %V 108 %shl = shl nsw i32 %x, 2 109 %r = sdiv i32 %shl, %x 117 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], 2 121 %shl = shl i32 %x, 2 [all …]
|
D | set-lowbits-mask-canonicalize.ll | 16 ; No no-wrap tags on shl 20 ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] 24 %setbit = shl i32 1, %NBits 31 ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] 35 %setbit = shl i32 1, %NBits 44 %setbit = shl i32 1, %NBits 53 %setbit = shl i32 1, %NBits 58 ; shl is nsw 62 ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] 66 %setbit = shl nsw i32 1, %NBits [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | narrow-shl-cst.ll | 5 %and = shl i32 %x, 10 6 %shl = and i32 %and, 31744 7 ret i32 %shl 14 %or = shl i32 %x, 10 15 %shl = or i32 %or, 31744 16 ret i32 %shl 23 %xor = shl i32 %x, 10 24 %shl = xor i32 %xor, 31744 25 ret i32 %shl 32 %and = shl i64 %x, 40 [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | testshiftshl.ll | 8 ; SSE2: cost of 4 {{.*}} shl 12 %0 = shl %shifttype %a , %b 20 ; SSE2: cost of 10 {{.*}} shl 24 %0 = shl %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} shl 36 %0 = shl %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} shl 48 %0 = shl %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} shl 60 %0 = shl %shifttype32i16 %a , %b [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | testshiftshl.ll | 8 ; SSE2: cost of 4 {{.*}} shl 12 %0 = shl %shifttype %a , %b 20 ; SSE2: cost of 10 {{.*}} shl 24 %0 = shl %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} shl 36 %0 = shl %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} shl 48 %0 = shl %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} shl 60 %0 = shl %shifttype32i16 %a , %b [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | xbfiz.ll | 6 %shl = shl i64 %v, 48 7 %shr = ashr i64 %shl, 47 14 %shl = shl i32 %v, 18 15 %shr = ashr i32 %shl, 17 22 %shl = shl i64 %v, 53 23 %shr = lshr i64 %shl, 17 30 %shl = shl i32 %v, 8 31 %shr = lshr i32 %shl, 2 38 %shl = shl i64 %v, 36 39 %and = and i64 %shl, 140668768878592 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | xbfiz.ll | 6 %shl = shl i64 %v, 48 7 %shr = ashr i64 %shl, 47 14 %shl = shl i32 %v, 18 15 %shr = ashr i32 %shl, 17 22 %shl = shl i64 %v, 53 23 %shr = lshr i64 %shl, 17 30 %shl = shl i32 %v, 8 31 %shr = lshr i32 %shl, 2 38 %shl = shl i64 %v, 36 39 %and = and i64 %shl, 140668768878592 [all …]
|
D | dag-combine-mul-shl.ll | 10 …%shl = shl <16 x i8> %arg, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 11 …%mul = mul <16 x i8> %shl, <i8 0, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0… 23 …%shl = shl <16 x i8> %mul, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 24 ret <16 x i8> %shl 34 …%shl = shl <16 x i8> %arg, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 35 …%mul = mul <16 x i8> %shl, <i8 undef, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, … 47 …%shl = shl <16 x i8> %mul, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 48 ret <16 x i8> %shl 57 %shl = shl i32 %arg, 7 58 %mul = mul i32 %shl, 13 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | shift-i64-opts.ll | 13 %shl = lshr i64 %val, 35 14 store i64 %shl, i64 addrspace(1)* %out 25 %shl = lshr i64 %val, 63 26 store i64 %shl, i64 addrspace(1)* %out 37 %shl = lshr i64 %val, 33 38 store i64 %shl, i64 addrspace(1)* %out 48 %shl = lshr i64 %val, 32 49 store i64 %shl, i64 addrspace(1)* %out 64 %shl = lshr i64 %and, 40 65 store i64 %shl, i64 addrspace(1)* %out [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Mips/ |
D | cins.ll | 5 %shl = shl i32 %n, 5 6 %conv = zext i32 %shl to i64 16 %and = shl i64 %n, 8 17 %shl = and i64 %and, 16776960 18 ret i64 %shl 27 %and = shl i64 %n, 38 28 %shl = and i64 %and, 18014123631575040 29 ret i64 %shl 38 %0 = shl i16 %n, 2 49 %0 = shl i8 %n, 2 [all …]
|
D | mips64shift.ll | 6 %shl = shl i64 %a0, %a1 7 ret i64 %shl 27 %shl = shl i64 %a0, 10 28 ret i64 %shl 48 %shl = shl i64 %a0, 40 49 ret i64 %shl 72 %shl = shl i64 %a0, %sub 73 %or = or i64 %shl, %shr 81 %shl = shl i64 %a0, %a1 84 %or = or i64 %shr, %shl [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIRegisterInfo.td | 134 (add (decimate (shl SGPR_32, 1), 2))]>; 139 (add (decimate (shl SGPR_32, 1), 4)), 140 (add (decimate (shl SGPR_32, 2), 4)), 141 (add (decimate (shl SGPR_32, 3), 4))]>; 146 (add (decimate (shl SGPR_32, 1), 4)), 147 (add (decimate (shl SGPR_32, 2), 4)), 148 (add (decimate (shl SGPR_32, 3), 4)), 149 (add (decimate (shl SGPR_32, 4), 4)), 150 (add (decimate (shl SGPR_32, 5), 4)), 151 (add (decimate (shl SGPR_32, 6), 4)), [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | shift-i64-opts.ll | 13 %shl = lshr i64 %val, 35 14 store i64 %shl, i64 addrspace(1)* %out 25 %shl = lshr i64 %val, 63 26 store i64 %shl, i64 addrspace(1)* %out 37 %shl = lshr i64 %val, 33 38 store i64 %shl, i64 addrspace(1)* %out 48 %shl = lshr i64 %val, 32 49 store i64 %shl, i64 addrspace(1)* %out 64 %shl = lshr i64 %and, 40 65 store i64 %shl, i64 addrspace(1)* %out [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Mips/ |
D | mips64shift.ll | 6 %shl = shl i64 %a0, %a1 7 ret i64 %shl 27 %shl = shl i64 %a0, 10 28 ret i64 %shl 48 %shl = shl i64 %a0, 40 49 ret i64 %shl 71 %shl = shl i64 %a0, %sub 72 %or = or i64 %shl, %shr 79 %shl = shl i64 %a0, %a1 82 %or = or i64 %shr, %shl [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | shift-cmp.ll | 6 %shl = shl i32 1, %y 7 %and = and i32 %x, %shl 8 %cmp = icmp eq i32 %and, %shl 18 %shl = shl i64 1, %y 19 %and = and i64 %x, %shl 20 %cmp = icmp eq i64 %and, %shl 30 %shl = shl i32 1, %y 31 %and = and i32 %x, %shl 32 %cmp = icmp ne i32 %and, %shl 43 %shl = shl i64 1, %y [all …]
|