• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; For pattern (X & (signbit l>> Y)) ==/!= 0
5; it may be optimal to fold into (X << Y) >=/< 0
6
7; Scalar tests
8
9define i1 @scalar_i8_signbit_lshr_and_eq(i8 %x, i8 %y) {
10; CHECK-LABEL: @scalar_i8_signbit_lshr_and_eq(
11; CHECK-NEXT:    [[LSHR:%.*]] = lshr i8 -128, [[Y:%.*]]
12; CHECK-NEXT:    [[AND:%.*]] = and i8 [[LSHR]], [[X:%.*]]
13; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[AND]], 0
14; CHECK-NEXT:    ret i1 [[R]]
15;
16  %lshr = lshr i8 128, %y
17  %and = and i8 %lshr, %x
18  %r = icmp eq i8 %and, 0
19  ret i1 %r
20}
21
22define i1 @scalar_i16_signbit_lshr_and_eq(i16 %x, i16 %y) {
23; CHECK-LABEL: @scalar_i16_signbit_lshr_and_eq(
24; CHECK-NEXT:    [[LSHR:%.*]] = lshr i16 -32768, [[Y:%.*]]
25; CHECK-NEXT:    [[AND:%.*]] = and i16 [[LSHR]], [[X:%.*]]
26; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], 0
27; CHECK-NEXT:    ret i1 [[R]]
28;
29  %lshr = lshr i16 32768, %y
30  %and = and i16 %lshr, %x
31  %r = icmp eq i16 %and, 0
32  ret i1 %r
33}
34
35define i1 @scalar_i32_signbit_lshr_and_eq(i32 %x, i32 %y) {
36; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq(
37; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
38; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
39; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
40; CHECK-NEXT:    ret i1 [[R]]
41;
42  %lshr = lshr i32 2147483648, %y
43  %and = and i32 %lshr, %x
44  %r = icmp eq i32 %and, 0
45  ret i1 %r
46}
47
48define i1 @scalar_i64_signbit_lshr_and_eq(i64 %x, i64 %y) {
49; CHECK-LABEL: @scalar_i64_signbit_lshr_and_eq(
50; CHECK-NEXT:    [[LSHR:%.*]] = lshr i64 -9223372036854775808, [[Y:%.*]]
51; CHECK-NEXT:    [[AND:%.*]] = and i64 [[LSHR]], [[X:%.*]]
52; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[AND]], 0
53; CHECK-NEXT:    ret i1 [[R]]
54;
55  %lshr = lshr i64 9223372036854775808, %y
56  %and = and i64 %lshr, %x
57  %r = icmp eq i64 %and, 0
58  ret i1 %r
59}
60
61define i1 @scalar_i32_signbit_lshr_and_ne(i32 %x, i32 %y) {
62; CHECK-LABEL: @scalar_i32_signbit_lshr_and_ne(
63; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
64; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
65; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[AND]], 0
66; CHECK-NEXT:    ret i1 [[R]]
67;
68  %lshr = lshr i32 2147483648, %y
69  %and = and i32 %lshr, %x
70  %r = icmp ne i32 %and, 0  ; check 'ne' predicate
71  ret i1 %r
72}
73
74; Vector tests
75
76define <4 x i1> @vec_4xi32_signbit_lshr_and_eq(<4 x i32> %x, <4 x i32> %y) {
77; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq(
78; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, [[Y:%.*]]
79; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
80; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
81; CHECK-NEXT:    ret <4 x i1> [[R]]
82;
83  %lshr = lshr <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>, %y
84  %and = and <4 x i32> %lshr, %x
85  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
86  ret <4 x i1> %r
87}
88
89define <4 x i1> @vec_4xi32_signbit_lshr_and_eq_undef1(<4 x i32> %x, <4 x i32> %y) {
90; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq_undef1(
91; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 undef, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
92; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
93; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
94; CHECK-NEXT:    ret <4 x i1> [[R]]
95;
96  %lshr = lshr <4 x i32> <i32 2147483648, i32 undef, i32 2147483648, i32 2147473648>, %y
97  %and = and <4 x i32> %lshr, %x
98  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
99  ret <4 x i1> %r
100}
101
102define <4 x i1> @vec_4xi32_signbit_lshr_and_eq_undef2(<4 x i32> %x, <4 x i32> %y) {
103; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq_undef2(
104; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
105; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
106; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
107; CHECK-NEXT:    ret <4 x i1> [[R]]
108;
109  %lshr = lshr <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147473648>, %y
110  %and = and <4 x i32> %lshr, %x
111  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
112  ret <4 x i1> %r
113}
114
115define <4 x i1> @vec_4xi32_signbit_lshr_and_eq_undef3(<4 x i32> %x, <4 x i32> %y) {
116; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq_undef3(
117; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 undef, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
118; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
119; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 undef, i32 0, i32 0, i32 0>
120; CHECK-NEXT:    ret <4 x i1> [[R]]
121;
122  %lshr = lshr <4 x i32> <i32 2147483648, i32 undef, i32 2147483648, i32 2147473648>, %y
123  %and = and <4 x i32> %lshr, %x
124  %r = icmp eq <4 x i32> %and, <i32 undef, i32 0, i32 0, i32 0>
125  ret <4 x i1> %r
126}
127
128; Extra use
129
130; Fold happened
131define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, i32* %p) {
132; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_lshr(
133; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
134; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[LSHR]], [[Z:%.*]]
135; CHECK-NEXT:    store i32 [[XOR]], i32* [[P:%.*]], align 4
136; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
137; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
138; CHECK-NEXT:    ret i1 [[R]]
139;
140  %lshr = lshr i32 2147483648, %y
141  %xor = xor i32 %lshr, %z  ; extra use of lshr
142  store i32 %xor, i32* %p
143  %and = and i32 %lshr, %x
144  %r = icmp eq i32 %and, 0
145  ret i1 %r
146}
147
148; Not fold
149define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
150; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_and(
151; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
152; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
153; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
154; CHECK-NEXT:    store i32 [[MUL]], i32* [[P:%.*]], align 4
155; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
156; CHECK-NEXT:    ret i1 [[R]]
157;
158  %lshr = lshr i32 2147483648, %y
159  %and = and i32 %lshr, %x
160  %mul = mul i32 %and, %z  ; extra use of and
161  store i32 %mul, i32* %p
162  %r = icmp eq i32 %and, 0
163  ret i1 %r
164}
165
166; Not fold
167define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
168; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_lshr_and(
169; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
170; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
171; CHECK-NEXT:    store i32 [[AND]], i32* [[P:%.*]], align 4
172; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[LSHR]], [[Z:%.*]]
173; CHECK-NEXT:    store i32 [[ADD]], i32* [[Q:%.*]], align 4
174; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
175; CHECK-NEXT:    ret i1 [[R]]
176;
177  %lshr = lshr i32 2147483648, %y
178  %and = and i32 %lshr, %x
179  store i32 %and, i32* %p  ; extra use of and
180  %add = add i32 %lshr, %z  ; extra use of lshr
181  store i32 %add, i32* %q
182  %r = icmp eq i32 %and, 0
183  ret i1 %r
184}
185
186; X is constant
187
188define i1 @scalar_i32_signbit_lshr_and_eq_X_is_constant1(i32 %y) {
189; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_X_is_constant1(
190; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
191; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], 12345
192; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
193; CHECK-NEXT:    ret i1 [[R]]
194;
195  %lshr = lshr i32 2147483648, %y
196  %and = and i32 %lshr, 12345
197  %r = icmp eq i32 %and, 0
198  ret i1 %r
199}
200
201define i1 @scalar_i32_signbit_lshr_and_eq_X_is_constant2(i32 %y) {
202; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_X_is_constant2(
203; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[Y:%.*]], 31
204; CHECK-NEXT:    ret i1 [[R]]
205;
206  %lshr = lshr i32 2147483648, %y
207  %and = and i32 %lshr, 1
208  %r = icmp eq i32 %and, 0
209  ret i1 %r
210}
211
212; Negative tests
213
214; Check 'slt' predicate
215
216define i1 @scalar_i32_signbit_lshr_and_slt(i32 %x, i32 %y) {
217; CHECK-LABEL: @scalar_i32_signbit_lshr_and_slt(
218; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
219; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
220; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[AND]], 0
221; CHECK-NEXT:    ret i1 [[R]]
222;
223  %lshr = lshr i32 2147483648, %y
224  %and = and i32 %lshr, %x
225  %r = icmp slt i32 %and, 0
226  ret i1 %r
227}
228
229; Compare with nonzero
230
231define i1 @scalar_i32_signbit_lshr_and_eq_nonzero(i32 %x, i32 %y) {
232; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_nonzero(
233; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
234; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
235; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 1
236; CHECK-NEXT:    ret i1 [[R]]
237;
238  %lshr = lshr i32 2147483648, %y
239  %and = and i32 %lshr, %x
240  %r = icmp eq i32 %and, 1  ; should be comparing with 0
241  ret i1 %r
242}
243