Home
last modified time | relevance | path

Searched refs:ashr (Results 1 – 25 of 1229) sorted by relevance

12345678910>>...50

/external/llvm/test/Analysis/CostModel/X86/
Dtestshiftashr.ll8 ; SSE2: cost of 12 {{.*}} ashr
12 %0 = ashr %shifttype %a , %b
20 ; SSE2: cost of 16 {{.*}} ashr
24 %0 = ashr %shifttype4i16 %a , %b
32 ; SSE2: cost of 32 {{.*}} ashr
36 %0 = ashr %shifttype8i16 %a , %b
44 ; SSE2: cost of 64 {{.*}} ashr
48 %0 = ashr %shifttype16i16 %a , %b
56 ; SSE2: cost of 128 {{.*}} ashr
60 %0 = ashr %shifttype32i16 %a , %b
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/
Dtestshiftashr.ll8 ; SSE2: cost of 12 {{.*}} ashr
12 %0 = ashr %shifttype %a , %b
20 ; SSE2: cost of 16 {{.*}} ashr
24 %0 = ashr %shifttype4i16 %a , %b
32 ; SSE2: cost of 32 {{.*}} ashr
36 %0 = ashr %shifttype8i16 %a , %b
44 ; SSE2: cost of 64 {{.*}} ashr
48 %0 = ashr %shifttype16i16 %a , %b
56 ; SSE2: cost of 128 {{.*}} ashr
60 %0 = ashr %shifttype32i16 %a , %b
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Dshift-sra.ll14 %Y = ashr i32 %X, %shift.upgrd.1
28 %tmp5 = ashr i32 %tmp4, 3
39 ; CHECK-NEXT: [[Y2:%.*]] = ashr i64 %Y, 63
50 %Y2 = ashr i64 %Y, 63
54 %S = ashr i64 %P, 12
65 ; CHECK-NEXT: [[Y2:%.*]] = ashr i64 %Y, 63
76 %Y2 = ashr i64 %Y, 63
81 %S = ashr i64 %R, 12
97 ; CHECK-NEXT: [[S:%.*]] = ashr i32 [[P]], 16
111 %S = ashr i32 %P, 16
[all …]
Dcanonicalize-ashr-shl-to-masking.ll22 %tmp0 = ashr i32 %x, %y
32 %tmp0 = ashr i32 %x, 5
39 ; CHECK-NEXT: [[TMP0:%.*]] = ashr i32 [[X:%.*]], 10
43 %tmp0 = ashr i32 %x, 10
54 %tmp0 = ashr i32 %x, 5
67 %tmp0 = ashr exact i32 %x, %y
76 %tmp0 = ashr exact i32 %x, 5
83 ; CHECK-NEXT: [[RET:%.*]] = ashr exact i32 [[X:%.*]], 5
86 %tmp0 = ashr exact i32 %x, 10
96 %tmp0 = ashr exact i32 %x, 5
[all …]
Dselect-bitext-bitwise-ops.ll11 ; CHECK-NEXT: [[TMP5:%.*]] = ashr i64 [[Y:%.*]], [[TMP4]]
18 %5 = ashr i64 %y, %3
28 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
35 %5 = ashr i64 %y, %3
47 ; CHECK-NEXT: [[TMP5:%.*]] = ashr i64 [[Y:%.*]], [[TMP4]]
54 %5 = ashr i64 %y, %3
64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
71 %5 = ashr i64 %y, %3
79 ; CHECK-NEXT: [[TMP2:%.*]] = ashr exact i32 [[TMP1]], 2
83 ; CHECK-NEXT: [[TMP5:%.*]] = ashr i64 [[Y:%.*]], [[TMP4]]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/NVPTX/
Dsext-in-reg.ll11 %conv1 = ashr exact i64 %sext, 56
13 %conv4 = ashr exact i64 %sext1, 56
14 %shr = ashr i64 %a, 16
15 %shr9 = ashr i64 %b, 16
29 %conv1 = ashr exact i64 %sext, 32
31 %conv4 = ashr exact i64 %sext1, 32
32 %shr = ashr i64 %a, 16
33 %shr9 = ashr i64 %b, 16
47 %conv1 = ashr exact i64 %sext, 48
49 %conv4 = ashr exact i64 %sext1, 48
[all …]
/external/llvm/test/CodeGen/NVPTX/
Dsext-in-reg.ll11 %conv1 = ashr exact i64 %sext, 56
13 %conv4 = ashr exact i64 %sext1, 56
14 %shr = ashr i64 %a, 16
15 %shr9 = ashr i64 %b, 16
29 %conv1 = ashr exact i64 %sext, 32
31 %conv4 = ashr exact i64 %sext1, 32
32 %shr = ashr i64 %a, 16
33 %shr9 = ashr i64 %b, 16
47 %conv1 = ashr exact i64 %sext, 48
49 %conv4 = ashr exact i64 %sext1, 48
[all …]
/external/llvm/test/CodeGen/X86/
Dvshift-3.ll6 ; Note that x86 does have ashr
12 %ashr = ashr <2 x i64> %val, < i64 32, i64 32 >
13 store <2 x i64> %ashr, <2 x i64>* %dst
21 %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
22 store <4 x i32> %ashr, <4 x i32>* %dst
35 %ashr = ashr <4 x i32> %val, %3
36 store <4 x i32> %ashr, <4 x i32>* %dst
44 %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
45 store <8 x i16> %ashr, <8 x i16>* %dst
63 %ashr = ashr <8 x i16> %val, %7
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Dvshift-3.ll6 ; Note that x86 does have ashr
13 %ashr = ashr <2 x i64> %val, < i64 32, i64 32 >
14 store <2 x i64> %ashr, <2 x i64>* %dst
22 %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
23 store <4 x i32> %ashr, <4 x i32>* %dst
36 %ashr = ashr <4 x i32> %val, %3
37 store <4 x i32> %ashr, <4 x i32>* %dst
45 %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
46 store <8 x i16> %ashr, <8 x i16>* %dst
64 %ashr = ashr <8 x i16> %val, %7
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dint-neg-02.ll99 %ashr = ashr i64 %shl, 32
100 %neg = sub i64 0, %ashr
102 %abs = select i1 %cmp, i64 %neg, i64 %ashr
113 %ashr = ashr i64 %shl, 32
114 %neg = sub i64 0, %ashr
116 %abs = select i1 %cmp, i64 %neg, i64 %ashr
127 %ashr = ashr i64 %shl, 32
128 %neg = sub i64 0, %ashr
130 %abs = select i1 %cmp, i64 %ashr, i64 %neg
141 %ashr = ashr i64 %shl, 32
[all …]
Dint-abs-01.ll91 %ashr = ashr i64 %shl, 32
92 %neg = sub i64 0, %ashr
94 %abs = select i1 %cmp, i64 %neg, i64 %ashr
104 %ashr = ashr i64 %shl, 32
105 %neg = sub i64 0, %ashr
107 %abs = select i1 %cmp, i64 %neg, i64 %ashr
117 %ashr = ashr i64 %shl, 32
118 %neg = sub i64 0, %ashr
120 %abs = select i1 %cmp, i64 %ashr, i64 %neg
130 %ashr = ashr i64 %shl, 32
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/
Dint-neg-02.ll99 %ashr = ashr i64 %shl, 32
100 %neg = sub i64 0, %ashr
102 %abs = select i1 %cmp, i64 %neg, i64 %ashr
113 %ashr = ashr i64 %shl, 32
114 %neg = sub i64 0, %ashr
116 %abs = select i1 %cmp, i64 %neg, i64 %ashr
127 %ashr = ashr i64 %shl, 32
128 %neg = sub i64 0, %ashr
130 %abs = select i1 %cmp, i64 %ashr, i64 %neg
141 %ashr = ashr i64 %shl, 32
[all …]
Dint-abs-01.ll91 %ashr = ashr i64 %shl, 32
92 %neg = sub i64 0, %ashr
94 %abs = select i1 %cmp, i64 %neg, i64 %ashr
104 %ashr = ashr i64 %shl, 32
105 %neg = sub i64 0, %ashr
107 %abs = select i1 %cmp, i64 %neg, i64 %ashr
117 %ashr = ashr i64 %shl, 32
118 %neg = sub i64 0, %ashr
120 %abs = select i1 %cmp, i64 %ashr, i64 %neg
130 %ashr = ashr i64 %shl, 32
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/GlobalISel/
Dinst-select-ashr.mir5 define void @ashr(i32 addrspace(1)* %global0) {ret void}
9 name: ashr
13 # GCN-LABEL: name: ashr
30 ; ashr ss
34 ; ashr si
38 ; ashr is
42 ; ashr sc
46 ; ashr cs
50 ; ashr vs
54 ; ashr sv
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/XCore/
Dashr.ll2 define i32 @ashr(i32 %a, i32 %b) {
3 %1 = ashr i32 %a, %b
6 ; CHECK: ashr:
7 ; CHECK-NEXT: ashr r0, r0, r1
10 %1 = ashr i32 %a, 24
14 ; CHECK-NEXT: ashr r0, r0, 24
17 %1 = ashr i32 %a, 31
21 ; CHECK-NEXT: ashr r0, r0, 32
32 ; CHECK-NEXT: ashr r0, r0, 32
44 ; CHECK-NEXT: ashr r0, r0, 32
[all …]
/external/llvm/test/CodeGen/XCore/
Dashr.ll2 define i32 @ashr(i32 %a, i32 %b) nounwind {
3 %1 = ashr i32 %a, %b
6 ; CHECK-LABEL: ashr:
7 ; CHECK-NEXT: ashr r0, r0, r1
10 %1 = ashr i32 %a, 24
14 ; CHECK-NEXT: ashr r0, r0, 24
17 %1 = ashr i32 %a, 31
21 ; CHECK-NEXT: ashr r0, r0, 32
32 ; CHECK-NEXT: ashr r0, r0, 32
44 ; CHECK-NEXT: ashr r0, r0, 32
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dsext-in-reg.ll20 %sext = ashr i32 %shl, 31
38 %ashr = ashr i32 %shl, 24
39 store i32 %ashr, i32 addrspace(1)* %out, align 4
56 %ashr = ashr i32 %shl, 16
57 store i32 %ashr, i32 addrspace(1)* %out, align 4
74 %ashr = ashr <1 x i32> %shl, <i32 24>
75 store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4
88 %ashr = ashr i64 %shl, 63
89 store i64 %ashr, i64 addrspace(1)* %out, align 8
102 %ashr = ashr i64 %shl, 56
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dsext-in-reg.ll19 %sext = ashr i32 %shl, 31
37 %ashr = ashr i32 %shl, 24
38 store i32 %ashr, i32 addrspace(1)* %out, align 4
55 %ashr = ashr i32 %shl, 16
56 store i32 %ashr, i32 addrspace(1)* %out, align 4
73 %ashr = ashr <1 x i32> %shl, <i32 24>
74 store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4
87 %ashr = ashr i64 %shl, 63
88 store i64 %ashr, i64 addrspace(1)* %out, align 8
101 %ashr = ashr i64 %shl, 56
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/XCore/
Dashr.ll2 define i32 @ashr(i32 %a, i32 %b) nounwind {
3 %1 = ashr i32 %a, %b
6 ; CHECK-LABEL: ashr:
7 ; CHECK-NEXT: ashr r0, r0, r1
10 %1 = ashr i32 %a, 24
14 ; CHECK-NEXT: ashr r0, r0, 24
17 %1 = ashr i32 %a, 31
21 ; CHECK-NEXT: ashr r0, r0, 32
32 ; CHECK-NEXT: ashr r0, r0, 32
44 ; CHECK-NEXT: ashr r0, r0, 32
[all …]
/external/llvm/test/Transforms/InstSimplify/
Dshr-nop.ll12 %t = ashr i32 %n, 17
29 %shr = ashr exact i8 0, %a
38 %shr = ashr i8 0, %a
56 %shr = ashr exact i8 0, %a
74 %shr = ashr i8 0, %a
92 %shr = ashr exact i8 -128, %a
119 %shr = ashr exact i8 -128, %a
137 %shr = ashr i8 -128, %a
155 %shr = ashr i8 -128, %a
173 %shr = ashr i8 0, %a
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstSimplify/
Dshr-nop.ll12 %t = ashr i32 %n, 17
29 %shr = ashr exact i8 0, %a
38 %shr = ashr i8 0, %a
56 %shr = ashr exact i8 0, %a
74 %shr = ashr i8 0, %a
92 %shr = ashr exact i8 -128, %a
119 %shr = ashr exact i8 -128, %a
137 %shr = ashr i8 -128, %a
155 %shr = ashr i8 -128, %a
173 %shr = ashr i8 0, %a
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dshift_mask.ll183 %ashr = ashr i8 %a, %rem
184 ret i8 %ashr
195 %ashr = ashr i16 %a, %rem
196 ret i16 %ashr
206 %ashr = ashr i32 %a, %rem
207 ret i32 %ashr
217 %ashr = ashr i64 %a, %rem
218 ret i64 %ashr
227 %ashr = ashr <16 x i8> %a, %rem
228 ret <16 x i8> %ashr
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/
Dshift-ashr.ll40 ; SSE-NEXT: [[R0:%.*]] = ashr i64 [[A0]], [[B0]]
41 ; SSE-NEXT: [[R1:%.*]] = ashr i64 [[A1]], [[B1]]
42 ; SSE-NEXT: [[R2:%.*]] = ashr i64 [[A2]], [[B2]]
43 ; SSE-NEXT: [[R3:%.*]] = ashr i64 [[A3]], [[B3]]
44 ; SSE-NEXT: [[R4:%.*]] = ashr i64 [[A4]], [[B4]]
45 ; SSE-NEXT: [[R5:%.*]] = ashr i64 [[A5]], [[B5]]
46 ; SSE-NEXT: [[R6:%.*]] = ashr i64 [[A6]], [[B6]]
47 ; SSE-NEXT: [[R7:%.*]] = ashr i64 [[A7]], [[B7]]
75 ; AVX1-NEXT: [[R0:%.*]] = ashr i64 [[A0]], [[B0]]
76 ; AVX1-NEXT: [[R1:%.*]] = ashr i64 [[A1]], [[B1]]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dvshift-3.ll8 ; Note that x86 does have ashr
30 %ashr = ashr <2 x i64> %val, < i64 32, i64 32 >
31 store <2 x i64> %ashr, <2 x i64>* %dst
49 %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
50 store <4 x i32> %ashr, <4 x i32>* %dst
74 %ashr = ashr <4 x i32> %val, %3
75 store <4 x i32> %ashr, <4 x i32>* %dst
93 %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
94 store <8 x i16> %ashr, <8 x i16>* %dst
124 %ashr = ashr <8 x i16> %val, %7
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dfast-isel-shifter.ll35 define i32 @ashr() nounwind {
37 ; ELF64: ashr
39 %ashr = ashr i32 -1, 2
40 ret i32 %ashr
47 %ashr = ashr i32 %src1, %src2
48 ret i32 %ashr

12345678910>>...50