/external/llvm/test/CodeGen/X86/ |
D | narrow-shl-cst.ll | 5 %and = shl i32 %x, 10 6 %shl = and i32 %and, 31744 7 ret i32 %shl 14 %or = shl i32 %x, 10 15 %shl = or i32 %or, 31744 16 ret i32 %shl 23 %xor = shl i32 %x, 10 24 %shl = xor i32 %xor, 31744 25 ret i32 %shl 32 %and = shl i64 %x, 40 [all …]
|
D | dagcombine-shifts.ll | 3 ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) 5 ; Canolicalize the sequence shl/zext/lshr performing the zeroextend 16 %shl = shl i16 %ext, 4 17 ret i16 %shl 23 ; CHECK-NOT: shl 30 %shl = shl i32 %ext, 4 31 ret i32 %shl 37 ; CHECK-NOT: shl 44 %shl = shl i32 %ext, 4 45 ret i32 %shl [all …]
|
D | shift-bmi2.ll | 6 %shl = shl i32 %x, %shamt 13 ret i32 %shl 18 %shl = shl i32 %x, 5 25 ret i32 %shl 31 %shl = shl i32 %x, %shamt 38 ret i32 %shl 44 %shl = shl i32 %x, 5 51 ret i32 %shl 56 %shl = shl i64 %x, %shamt 60 ret i64 %shl [all …]
|
D | shift-combine-crash.ll | 15 %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 1, i64 2> 16 ret <4 x i64> %shl 23 %shl = shl <4 x i64> %A, <i64 2, i64 3, i64 undef, i64 undef> 24 ret <4 x i64> %shl 28 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 3, i64 undef> 29 ret <4 x i64> %shl 33 %shl = shl <4 x i64> %A, <i64 undef, i64 2, i64 undef, i64 3> 34 ret <4 x i64> %shl 38 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 undef, i64 undef> 39 ret <4 x i64> %shl [all …]
|
D | sse2-vector-shifts.ll | 11 %shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 12 ret <8 x i16> %shl 21 %shl = shl <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 22 ret <8 x i16> %shl 31 %shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 32 ret <8 x i16> %shl 40 %shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0> 41 ret <4 x i32> %shl 50 %shl = shl <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1> 51 ret <4 x i32> %shl [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | narrow-shl-cst.ll | 5 %and = shl i32 %x, 10 6 %shl = and i32 %and, 31744 7 ret i32 %shl 14 %or = shl i32 %x, 10 15 %shl = or i32 %or, 31744 16 ret i32 %shl 23 %xor = shl i32 %x, 10 24 %shl = xor i32 %xor, 31744 25 ret i32 %shl 32 %and = shl i64 %x, 40 [all …]
|
D | dagcombine-shifts.ll | 3 ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) 5 ; Canolicalize the sequence shl/zext/lshr performing the zeroextend 16 %shl = shl i16 %ext, 4 17 ret i16 %shl 23 ; CHECK-NOT: shl 30 %shl = shl i32 %ext, 4 31 ret i32 %shl 37 ; CHECK-NOT: shl 44 %shl = shl i32 %ext, 4 45 ret i32 %shl [all …]
|
D | shift-combine-crash.ll | 15 %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 1, i64 2> 16 ret <4 x i64> %shl 23 %shl = shl <4 x i64> %A, <i64 2, i64 3, i64 undef, i64 undef> 24 ret <4 x i64> %shl 28 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 3, i64 undef> 29 ret <4 x i64> %shl 33 %shl = shl <4 x i64> %A, <i64 undef, i64 2, i64 undef, i64 3> 34 ret <4 x i64> %shl 38 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 undef, i64 undef> 39 ret <4 x i64> %shl [all …]
|
D | sse2-vector-shifts.ll | 11 %shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 12 ret <8 x i16> %shl 21 %shl = shl <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 22 ret <8 x i16> %shl 31 %shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 32 ret <8 x i16> %shl 40 %shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0> 41 ret <4 x i32> %shl 50 %shl = shl <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1> 51 ret <4 x i32> %shl [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | icmp-shl-nsw.ll | 4 ; If the (shl x, C) preserved the sign and this is a sign test, 12 %shl = shl nsw i32 %x, 21 13 %cmp = icmp sgt i32 %shl, 0 22 %shl = shl nsw i32 %x, 21 23 %cmp = icmp sge i32 %shl, 0 32 %shl = shl nsw i32 %x, 21 33 %cmp = icmp sge i32 %shl, 1 42 %shl = shl nsw <2 x i32> %x, <i32 21, i32 21> 43 %cmp = icmp sge <2 x i32> %shl, <i32 1, i32 1> 47 ; Checks for icmp (eq|ne) (shl x, C), 0 [all …]
|
D | div-shift.ll | 14 %s = shl i32 2, %y 29 %s = shl <2 x i32> <i32 2, i32 2>, %y 41 %1 = shl i32 1, %y 55 %1 = shl i32 4, %y 68 %1 = shl i32 1, %y 82 %1 = shl i32 1, %V 108 %shl = shl nsw i32 %x, 2 109 %r = sdiv i32 %shl, %x 117 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], 2 121 %shl = shl i32 %x, 2 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | xbfiz.ll | 6 %shl = shl i64 %v, 48 7 %shr = ashr i64 %shl, 47 14 %shl = shl i32 %v, 18 15 %shr = ashr i32 %shl, 17 22 %shl = shl i64 %v, 53 23 %shr = lshr i64 %shl, 17 30 %shl = shl i32 %v, 8 31 %shr = lshr i32 %shl, 2 38 %shl = shl i64 %v, 36 39 %and = and i64 %shl, 140668768878592 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | xbfiz.ll | 6 %shl = shl i64 %v, 48 7 %shr = ashr i64 %shl, 47 14 %shl = shl i32 %v, 18 15 %shr = ashr i32 %shl, 17 22 %shl = shl i64 %v, 53 23 %shr = lshr i64 %shl, 17 30 %shl = shl i32 %v, 8 31 %shr = lshr i32 %shl, 2 38 %shl = shl i64 %v, 36 39 %and = and i64 %shl, 140668768878592 [all …]
|
D | dag-combine-mul-shl.ll | 10 …%shl = shl <16 x i8> %arg, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 11 …%mul = mul <16 x i8> %shl, <i8 0, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0… 23 …%shl = shl <16 x i8> %mul, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 24 ret <16 x i8> %shl 34 …%shl = shl <16 x i8> %arg, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 35 …%mul = mul <16 x i8> %shl, <i8 undef, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, … 47 …%shl = shl <16 x i8> %mul, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7… 48 ret <16 x i8> %shl 57 %shl = shl i32 %arg, 7 58 %mul = mul i32 %shl, 13 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | testshiftshl.ll | 8 ; SSE2: cost of 4 {{.*}} shl 12 %0 = shl %shifttype %a , %b 20 ; SSE2: cost of 10 {{.*}} shl 24 %0 = shl %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} shl 36 %0 = shl %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} shl 48 %0 = shl %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} shl 60 %0 = shl %shifttype32i16 %a , %b [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | testshiftshl.ll | 8 ; SSE2: cost of 4 {{.*}} shl 12 %0 = shl %shifttype %a , %b 20 ; SSE2: cost of 10 {{.*}} shl 24 %0 = shl %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} shl 36 %0 = shl %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} shl 48 %0 = shl %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} shl 60 %0 = shl %shifttype32i16 %a , %b [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Mips/ |
D | cins.ll | 5 %shl = shl i32 %n, 5 6 %conv = zext i32 %shl to i64 16 %and = shl i64 %n, 8 17 %shl = and i64 %and, 16776960 18 ret i64 %shl 27 %and = shl i64 %n, 38 28 %shl = and i64 %and, 18014123631575040 29 ret i64 %shl 38 %0 = shl i16 %n, 2 49 %0 = shl i8 %n, 2 [all …]
|
D | mips64shift.ll | 6 %shl = shl i64 %a0, %a1 7 ret i64 %shl 27 %shl = shl i64 %a0, 10 28 ret i64 %shl 48 %shl = shl i64 %a0, 40 49 ret i64 %shl 72 %shl = shl i64 %a0, %sub 73 %or = or i64 %shl, %shr 81 %shl = shl i64 %a0, %a1 84 %or = or i64 %shr, %shl [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | shift-i64-opts.ll | 13 %shl = lshr i64 %val, 35 14 store i64 %shl, i64 addrspace(1)* %out 25 %shl = lshr i64 %val, 63 26 store i64 %shl, i64 addrspace(1)* %out 37 %shl = lshr i64 %val, 33 38 store i64 %shl, i64 addrspace(1)* %out 48 %shl = lshr i64 %val, 32 49 store i64 %shl, i64 addrspace(1)* %out 64 %shl = lshr i64 %and, 40 65 store i64 %shl, i64 addrspace(1)* %out [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIRegisterInfo.td | 134 (add (decimate (shl SGPR_32, 1), 2))]>; 139 (add (decimate (shl SGPR_32, 1), 4)), 140 (add (decimate (shl SGPR_32, 2), 4)), 141 (add (decimate (shl SGPR_32, 3), 4))]>; 146 (add (decimate (shl SGPR_32, 1), 4)), 147 (add (decimate (shl SGPR_32, 2), 4)), 148 (add (decimate (shl SGPR_32, 3), 4)), 149 (add (decimate (shl SGPR_32, 4), 4)), 150 (add (decimate (shl SGPR_32, 5), 4)), 151 (add (decimate (shl SGPR_32, 6), 4)), [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | shift-cmp.ll | 6 %shl = shl i32 1, %y 7 %and = and i32 %x, %shl 8 %cmp = icmp eq i32 %and, %shl 18 %shl = shl i64 1, %y 19 %and = and i64 %x, %shl 20 %cmp = icmp eq i64 %and, %shl 30 %shl = shl i32 1, %y 31 %and = and i32 %x, %shl 32 %cmp = icmp ne i32 %and, %shl 43 %shl = shl i64 1, %y [all …]
|
/external/mesa3d/src/intel/tools/tests/gen7.5/ |
D | shl.asm | 1 shl(1) a0<1>UW a0<0,1,0>UW 0x0002UW { align1 WE_all 1N }; 2 shl(1) g12.2<1>UD g12.2<0,1,0>UD 0x0000000bUD { align1 WE_all 1N }; 3 shl(8) g19<1>D g18<8,8,1>D 0x00000002UD { align1 1Q }; 4 shl(16) g28<1>D g26<8,8,1>D 0x00000002UD { align1 1H }; 5 shl(8) g10<1>.xD g1<0>.yD 0x00000004UD { align16 1Q }; 6 shl(8) g21<1>.xyD g1<0>.xyyyD g1<0>.zwwwUD { align16 1Q }; 7 shl(8) g3<1>D g3<8,8,1>D g8<8,8,1>UD { align1 1Q }; 8 shl(16) g18<1>D g4<8,8,1>D g9<8,8,1>UD { align1 1H }; 9 shl(1) a0<1>UD g18<0,1,0>UD 0x00000008UD { align1 WE_all 1N }; 10 shl(8) g4<1>.xUD g17<4>.xUD g18<4>.xUD { align16 1Q }; [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | shift-i64-opts.ll | 13 %shl = lshr i64 %val, 35 14 store i64 %shl, i64 addrspace(1)* %out 25 %shl = lshr i64 %val, 63 26 store i64 %shl, i64 addrspace(1)* %out 37 %shl = lshr i64 %val, 33 38 store i64 %shl, i64 addrspace(1)* %out 48 %shl = lshr i64 %val, 32 49 store i64 %shl, i64 addrspace(1)* %out 64 %shl = lshr i64 %and, 40 65 store i64 %shl, i64 addrspace(1)* %out [all …]
|
/external/llvm/test/CodeGen/Mips/ |
D | mips64shift.ll | 7 %shl = shl i64 %a0, %a1 8 ret i64 %shl 28 %shl = shl i64 %a0, 10 29 ret i64 %shl 49 %shl = shl i64 %a0, 40 50 ret i64 %shl 73 %shl = shl i64 %a0, %sub 74 %or = or i64 %shl, %shr 82 %shl = shl i64 %a0, %a1 85 %or = or i64 %shr, %shl [all …]
|
/external/mesa3d/src/intel/tools/tests/gen6/ |
D | shl.asm | 1 shl(8) g25<1>.xD g21<4>.xD 0x00000004UD { align16 1Q }; 2 shl(8) g3<1>D g2.4<0,1,0>D 0x00000004UD { align1 1Q }; 3 shl(16) g3<1>D g2.4<0,1,0>D 0x00000004UD { align1 1H }; 4 shl(8) g11<1>D g11<4>D 16D { align16 1Q }; 5 shl(1) g28<1>UD g28<0,1,0>UD 0x00000010UD { align1 1N }; 6 shl(8) g64<1>.xUD g64<4>.xUD 0x00000010UD { align16 1Q }; 7 shl(8) m17<1>D g2<0,1,0>D 0x00000004UD { align1 1Q }; 8 shl(16) m17<1>D g2<0,1,0>D 0x00000004UD { align1 1H }; 9 shl(8) g2<1>D g2<8,8,1>D 16D { align1 1Q }; 10 shl(16) g2<1>D g2<8,8,1>D 16D { align1 1H }; [all …]
|